1 /*
2  * Two Levels Segregate Fit memory allocator (TLSF)
3  * Version 2.3.2
4  *
5  * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
6  *
7  * Thanks to Ismael Ripoll for his suggestions and reviews
8  *
9  * Copyright (C) 2007, 2006, 2005, 2004
10  *
11  * This code is released using a dual license strategy: GPL/LGPL
12  * You can choose the licence that better fits your requirements.
13  *
14  * Released under the terms of the GNU General Public License Version 2.0
15  * Released under the terms of the GNU Lesser General Public License
16  * Version 2.1
17  *
18  * This is kernel port of TLSF allocator.
19  * Original code can be found at: http://rtportal.upv.es/rtmalloc/
20  * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
21  * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
22  *  /allocators/tlsf-kmod r229 dated Aug 27, 2008
23  * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
24  */
25 
26 #include <xen/irq.h>
27 #include <xen/mm.h>
28 #include <xen/pfn.h>
29 #include <asm/time.h>
30 
31 #define MAX_POOL_NAME_LEN       16
32 
33 /* Some IMPORTANT TLSF parameters */
34 #define MEM_ALIGN       (sizeof(void *) * 2)
35 #define MEM_ALIGN_MASK  (~(MEM_ALIGN - 1))
36 
37 #define MAX_FLI         (30)
38 #define MAX_LOG2_SLI    (5)
39 #define MAX_SLI         (1 << MAX_LOG2_SLI)
40 
41 #define FLI_OFFSET      (6)
42 /* tlsf structure just will manage blocks bigger than 128 bytes */
43 #define SMALL_BLOCK     (128)
44 #define REAL_FLI        (MAX_FLI - FLI_OFFSET)
45 #define MIN_BLOCK_SIZE  (sizeof(struct free_ptr))
46 #define BHDR_OVERHEAD   (sizeof(struct bhdr) - MIN_BLOCK_SIZE)
47 
48 #define PTR_MASK        (sizeof(void *) - 1)
49 #define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)
50 
51 #define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
52                                 ((char *)(addr) + (r)))
53 #define ROUNDUP_SIZE(r)         (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
54 #define ROUNDDOWN_SIZE(r)       ((r) & MEM_ALIGN_MASK)
55 #define ROUNDUP_PAGE(r)         (((r) + PAGE_SIZE - 1) & PAGE_MASK)
56 
57 #define BLOCK_STATE     (0x1)
58 #define PREV_STATE      (0x2)
59 
60 /* bit 0 of the block size */
61 #define FREE_BLOCK      (0x1)
62 #define USED_BLOCK      (0x0)
63 
64 /* bit 1 of the block size */
65 #define PREV_FREE       (0x2)
66 #define PREV_USED       (0x0)
67 
68 static spinlock_t pool_list_lock;
69 static struct list_head pool_list_head;
70 
71 struct free_ptr {
72     struct bhdr *prev;
73     struct bhdr *next;
74 };
75 
76 struct bhdr {
77     /* All blocks in a region are linked in order of physical address */
78     struct bhdr *prev_hdr;
79     /*
80      * The size is stored in bytes
81      *  bit 0: block is free, if set
82      *  bit 1: previous block is free, if set
83      */
84     u32 size;
85     /* Free blocks in individual freelists are linked */
86     union {
87         struct free_ptr free_ptr;
88         u8 buffer[sizeof(struct free_ptr)];
89     } ptr;
90 };
91 
92 struct xmem_pool {
93     /* First level bitmap (REAL_FLI bits) */
94     u32 fl_bitmap;
95 
96     /* Second level bitmap */
97     u32 sl_bitmap[REAL_FLI];
98 
99     /* Free lists */
100     struct bhdr *matrix[REAL_FLI][MAX_SLI];
101 
102     spinlock_t lock;
103 
104     unsigned long init_size;
105     unsigned long max_size;
106     unsigned long grow_size;
107 
108     /* Basic stats */
109     unsigned long used_size;
110     unsigned long num_regions;
111 
112     /* User provided functions for expanding/shrinking pool */
113     xmem_pool_get_memory *get_mem;
114     xmem_pool_put_memory *put_mem;
115 
116     struct list_head list;
117 
118     void *init_region;
119     char name[MAX_POOL_NAME_LEN];
120 };
121 
122 /*
123  * Helping functions
124  */
125 
126 /**
127  * Returns indexes (fl, sl) of the list used to serve request of size r
128  */
MAPPING_SEARCH(unsigned long * r,int * fl,int * sl)129 static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
130 {
131     int t;
132 
133     if ( *r < SMALL_BLOCK )
134     {
135         *fl = 0;
136         *sl = *r / (SMALL_BLOCK / MAX_SLI);
137     }
138     else
139     {
140         t = (1 << (flsl(*r) - 1 - MAX_LOG2_SLI)) - 1;
141         *r = *r + t;
142         *fl = flsl(*r) - 1;
143         *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
144         *fl -= FLI_OFFSET;
145         /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
146          *fl = *sl = 0;
147          */
148         *r &= ~t;
149     }
150 }
151 
152 /**
153  * Returns indexes (fl, sl) which is used as starting point to search
154  * for a block of size r. It also rounds up requested size(r) to the
155  * next list.
156  */
MAPPING_INSERT(unsigned long r,int * fl,int * sl)157 static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
158 {
159     if ( r < SMALL_BLOCK )
160     {
161         *fl = 0;
162         *sl = r / (SMALL_BLOCK / MAX_SLI);
163     }
164     else
165     {
166         *fl = flsl(r) - 1;
167         *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
168         *fl -= FLI_OFFSET;
169     }
170 }
171 
172 /**
173  * Returns first block from a list that hold blocks larger than or
174  * equal to the one pointed by the indexes (fl, sl)
175  */
FIND_SUITABLE_BLOCK(struct xmem_pool * p,int * fl,int * sl)176 static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
177                                                int *sl)
178 {
179     u32 tmp = p->sl_bitmap[*fl] & (~0u << *sl);
180     struct bhdr *b = NULL;
181 
182     if ( tmp )
183     {
184         *sl = ffs(tmp) - 1;
185         b = p->matrix[*fl][*sl];
186     }
187     else
188     {
189         *fl = ffs(p->fl_bitmap & (~0u << (*fl + 1))) - 1;
190         if ( likely(*fl > 0) )
191         {
192             *sl = ffs(p->sl_bitmap[*fl]) - 1;
193             b = p->matrix[*fl][*sl];
194         }
195     }
196 
197     return b;
198 }
199 
200 /**
201  * Remove first free block(b) from free list with indexes (fl, sl).
202  */
EXTRACT_BLOCK_HDR(struct bhdr * b,struct xmem_pool * p,int fl,int sl)203 static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
204                                      int sl)
205 {
206     p->matrix[fl][sl] = b->ptr.free_ptr.next;
207     if ( p->matrix[fl][sl] )
208     {
209         p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
210     }
211     else
212     {
213         clear_bit(sl, &p->sl_bitmap[fl]);
214         if ( !p->sl_bitmap[fl] )
215             clear_bit(fl, &p->fl_bitmap);
216     }
217     b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
218 }
219 
220 /**
221  * Removes block(b) from free list with indexes (fl, sl)
222  */
EXTRACT_BLOCK(struct bhdr * b,struct xmem_pool * p,int fl,int sl)223 static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
224                                  int sl)
225 {
226     if ( b->ptr.free_ptr.next )
227         b->ptr.free_ptr.next->ptr.free_ptr.prev =
228             b->ptr.free_ptr.prev;
229     if ( b->ptr.free_ptr.prev )
230         b->ptr.free_ptr.prev->ptr.free_ptr.next =
231             b->ptr.free_ptr.next;
232     if ( p->matrix[fl][sl] == b )
233     {
234         p->matrix[fl][sl] = b->ptr.free_ptr.next;
235         if ( !p->matrix[fl][sl] )
236         {
237             clear_bit(sl, &p->sl_bitmap[fl]);
238             if ( !p->sl_bitmap[fl] )
239                 clear_bit (fl, &p->fl_bitmap);
240         }
241     }
242     b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
243 }
244 
245 /**
246  * Insert block(b) in free list with indexes (fl, sl)
247  */
INSERT_BLOCK(struct bhdr * b,struct xmem_pool * p,int fl,int sl)248 static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
249 {
250     b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
251     if ( p->matrix[fl][sl] )
252         p->matrix[fl][sl]->ptr.free_ptr.prev = b;
253     p->matrix[fl][sl] = b;
254     set_bit(sl, &p->sl_bitmap[fl]);
255     set_bit(fl, &p->fl_bitmap);
256 }
257 
258 /**
259  * Region is a virtually contiguous memory region and Pool is
260  * collection of such regions
261  */
ADD_REGION(void * region,unsigned long region_size,struct xmem_pool * pool)262 static inline void ADD_REGION(void *region, unsigned long region_size,
263                               struct xmem_pool *pool)
264 {
265     int fl, sl;
266     struct bhdr *b, *lb;
267 
268     b = (struct bhdr *)(region);
269     b->prev_hdr = NULL;
270     b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
271         | FREE_BLOCK | PREV_USED;
272     MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
273     INSERT_BLOCK(b, pool, fl, sl);
274     /* The sentinel block: allows us to know when we're in the last block */
275     lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
276     lb->prev_hdr = b;
277     lb->size = 0 | USED_BLOCK | PREV_FREE;
278     pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
279     pool->num_regions++;
280 }
281 
282 /*
283  * TLSF pool-based allocator start.
284  */
285 
xmem_pool_create(const char * name,xmem_pool_get_memory get_mem,xmem_pool_put_memory put_mem,unsigned long init_size,unsigned long max_size,unsigned long grow_size)286 struct xmem_pool *xmem_pool_create(
287     const char *name,
288     xmem_pool_get_memory get_mem,
289     xmem_pool_put_memory put_mem,
290     unsigned long init_size,
291     unsigned long max_size,
292     unsigned long grow_size)
293 {
294     struct xmem_pool *pool;
295     int pool_bytes, pool_order;
296 
297     BUG_ON(max_size && (max_size < init_size));
298 
299     pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
300     pool_order = get_order_from_bytes(pool_bytes);
301 
302     pool = (void *)alloc_xenheap_pages(pool_order, 0);
303     if ( pool == NULL )
304         return NULL;
305     memset(pool, 0, pool_bytes);
306 
307     /* Round to next page boundary */
308     init_size = ROUNDUP_PAGE(init_size);
309     max_size = ROUNDUP_PAGE(max_size);
310     grow_size = ROUNDUP_PAGE(grow_size);
311 
312     /* pool global overhead not included in used size */
313     pool->used_size = 0;
314 
315     pool->init_size = init_size;
316     pool->max_size = max_size;
317     pool->grow_size = grow_size;
318     pool->get_mem = get_mem;
319     pool->put_mem = put_mem;
320     strlcpy(pool->name, name, sizeof(pool->name));
321 
322     /* always obtain init_region lazily now to ensure it is get_mem'd
323      * in the same "context" as all other regions */
324 
325     spin_lock_init(&pool->lock);
326 
327     spin_lock(&pool_list_lock);
328     list_add_tail(&pool->list, &pool_list_head);
329     spin_unlock(&pool_list_lock);
330 
331     return pool;
332 }
333 
xmem_pool_get_used_size(struct xmem_pool * pool)334 unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
335 {
336     return pool->used_size;
337 }
338 
xmem_pool_get_total_size(struct xmem_pool * pool)339 unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
340 {
341     unsigned long total;
342     total = ROUNDUP_SIZE(sizeof(*pool))
343         + pool->init_size
344         + (pool->num_regions - 1) * pool->grow_size;
345     return total;
346 }
347 
xmem_pool_destroy(struct xmem_pool * pool)348 void xmem_pool_destroy(struct xmem_pool *pool)
349 {
350     int pool_bytes, pool_order;
351 
352     if ( pool == NULL )
353         return;
354 
355     /* User is destroying without ever allocating from this pool */
356     if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
357     {
358         ASSERT(!pool->init_region);
359         pool->used_size -= BHDR_OVERHEAD;
360     }
361 
362     /* Check for memory leaks in this pool */
363     if ( xmem_pool_get_used_size(pool) )
364         printk("memory leak in pool: %s (%p). "
365                "%lu bytes still in use.\n",
366                pool->name, pool, xmem_pool_get_used_size(pool));
367 
368     spin_lock(&pool_list_lock);
369     list_del_init(&pool->list);
370     spin_unlock(&pool_list_lock);
371 
372     pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
373     pool_order = get_order_from_bytes(pool_bytes);
374     free_xenheap_pages(pool,pool_order);
375 }
376 
xmem_pool_alloc(unsigned long size,struct xmem_pool * pool)377 void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
378 {
379     struct bhdr *b, *b2, *next_b, *region;
380     int fl, sl;
381     unsigned long tmp_size;
382 
383     if ( pool->init_region == NULL )
384     {
385         if ( (region = pool->get_mem(pool->init_size)) == NULL )
386             goto out;
387         ADD_REGION(region, pool->init_size, pool);
388         pool->init_region = region;
389     }
390 
391     size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
392     /* Rounding up the requested size and calculating fl and sl */
393 
394     spin_lock(&pool->lock);
395  retry_find:
396     MAPPING_SEARCH(&size, &fl, &sl);
397 
398     /* Searching a free block */
399     if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
400     {
401         /* Not found */
402         if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
403             goto out_locked;
404         if ( pool->max_size && (pool->init_size +
405                                 pool->num_regions * pool->grow_size
406                                 > pool->max_size) )
407             goto out_locked;
408         spin_unlock(&pool->lock);
409         if ( (region = pool->get_mem(pool->grow_size)) == NULL )
410             goto out;
411         spin_lock(&pool->lock);
412         ADD_REGION(region, pool->grow_size, pool);
413         goto retry_find;
414     }
415     EXTRACT_BLOCK_HDR(b, pool, fl, sl);
416 
417     /*-- found: */
418     next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
419     /* Should the block be split? */
420     tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
421     if ( tmp_size >= sizeof(struct bhdr) )
422     {
423         tmp_size -= BHDR_OVERHEAD;
424         b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
425 
426         b2->size = tmp_size | FREE_BLOCK | PREV_USED;
427         b2->prev_hdr = b;
428 
429         next_b->prev_hdr = b2;
430 
431         MAPPING_INSERT(tmp_size, &fl, &sl);
432         INSERT_BLOCK(b2, pool, fl, sl);
433 
434         b->size = size | (b->size & PREV_STATE);
435     }
436     else
437     {
438         next_b->size &= (~PREV_FREE);
439         b->size &= (~FREE_BLOCK); /* Now it's used */
440     }
441 
442     pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
443 
444     spin_unlock(&pool->lock);
445     return (void *)b->ptr.buffer;
446 
447     /* Failed alloc */
448  out_locked:
449     spin_unlock(&pool->lock);
450 
451  out:
452     return NULL;
453 }
454 
xmem_pool_free(void * ptr,struct xmem_pool * pool)455 void xmem_pool_free(void *ptr, struct xmem_pool *pool)
456 {
457     struct bhdr *b, *tmp_b;
458     int fl = 0, sl = 0;
459 
460     if ( unlikely(ptr == NULL) )
461         return;
462 
463     b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);
464 
465     spin_lock(&pool->lock);
466     b->size |= FREE_BLOCK;
467     pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
468     b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
469     tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
470     if ( tmp_b->size & FREE_BLOCK )
471     {
472         MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
473         EXTRACT_BLOCK(tmp_b, pool, fl, sl);
474         b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
475     }
476     if ( b->size & PREV_FREE )
477     {
478         tmp_b = b->prev_hdr;
479         MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
480         EXTRACT_BLOCK(tmp_b, pool, fl, sl);
481         tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
482         b = tmp_b;
483     }
484     tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
485     tmp_b->prev_hdr = b;
486 
487     MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
488 
489     if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
490     {
491         pool->put_mem(b);
492         pool->num_regions--;
493         pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
494         goto out;
495     }
496 
497     INSERT_BLOCK(b, pool, fl, sl);
498 
499     tmp_b->size |= PREV_FREE;
500     tmp_b->prev_hdr = b;
501  out:
502     spin_unlock(&pool->lock);
503 }
504 
xmem_pool_maxalloc(struct xmem_pool * pool)505 int xmem_pool_maxalloc(struct xmem_pool *pool)
506 {
507     return pool->grow_size - (2 * BHDR_OVERHEAD);
508 }
509 
510 /*
511  * Glue for xmalloc().
512  */
513 
514 static struct xmem_pool *xenpool;
515 
xmalloc_pool_get(unsigned long size)516 static void *xmalloc_pool_get(unsigned long size)
517 {
518     ASSERT(size == PAGE_SIZE);
519     return alloc_xenheap_page();
520 }
521 
xmalloc_pool_put(void * p)522 static void xmalloc_pool_put(void *p)
523 {
524     free_xenheap_page(p);
525 }
526 
xmalloc_whole_pages(unsigned long size,unsigned long align)527 static void *xmalloc_whole_pages(unsigned long size, unsigned long align)
528 {
529     unsigned int i, order;
530     void *res, *p;
531 
532     order = get_order_from_bytes(max(align, size));
533 
534     res = alloc_xenheap_pages(order, 0);
535     if ( res == NULL )
536         return NULL;
537 
538     for ( p = res + PAGE_ALIGN(size), i = 0; i < order; ++i )
539         if ( (unsigned long)p & (PAGE_SIZE << i) )
540         {
541             free_xenheap_pages(p, i);
542             p += PAGE_SIZE << i;
543         }
544 
545     PFN_ORDER(virt_to_page(res)) = PFN_UP(size);
546     /* Check that there was no truncation: */
547     ASSERT(PFN_ORDER(virt_to_page(res)) == PFN_UP(size));
548 
549     return res;
550 }
551 
tlsf_init(void)552 static void tlsf_init(void)
553 {
554     INIT_LIST_HEAD(&pool_list_head);
555     spin_lock_init(&pool_list_lock);
556     xenpool = xmem_pool_create(
557         "xmalloc", xmalloc_pool_get, xmalloc_pool_put,
558         PAGE_SIZE, 0, PAGE_SIZE);
559     BUG_ON(!xenpool);
560 }
561 
562 /*
563  * xmalloc()
564  */
565 
566 #ifndef ZERO_BLOCK_PTR
567 /* Return value for zero-size allocation, distinguished from NULL. */
568 #define ZERO_BLOCK_PTR ((void *)-1L)
569 #endif
570 
_xmalloc(unsigned long size,unsigned long align)571 void *_xmalloc(unsigned long size, unsigned long align)
572 {
573     void *p = NULL;
574     u32 pad;
575 
576     ASSERT(!in_irq());
577 
578     if ( !size )
579         return ZERO_BLOCK_PTR;
580 
581     ASSERT((align & (align - 1)) == 0);
582     if ( align < MEM_ALIGN )
583         align = MEM_ALIGN;
584     size += align - MEM_ALIGN;
585 
586     if ( !xenpool )
587         tlsf_init();
588 
589     if ( size < PAGE_SIZE )
590         p = xmem_pool_alloc(size, xenpool);
591     if ( p == NULL )
592         return xmalloc_whole_pages(size - align + MEM_ALIGN, align);
593 
594     /* Add alignment padding. */
595     if ( (pad = -(long)p & (align - 1)) != 0 )
596     {
597         char *q = (char *)p + pad;
598         struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
599         ASSERT(q > (char *)p);
600         b->size = pad | 1;
601         p = q;
602     }
603 
604     ASSERT(((unsigned long)p & (align - 1)) == 0);
605     return p;
606 }
607 
_xzalloc(unsigned long size,unsigned long align)608 void *_xzalloc(unsigned long size, unsigned long align)
609 {
610     void *p = _xmalloc(size, align);
611 
612     return p ? memset(p, 0, size) : p;
613 }
614 
xfree(void * p)615 void xfree(void *p)
616 {
617     struct bhdr *b;
618 
619     if ( p == NULL || p == ZERO_BLOCK_PTR )
620         return;
621 
622     ASSERT(!in_irq());
623 
624     if ( !((unsigned long)p & (PAGE_SIZE - 1)) )
625     {
626         unsigned long size = PFN_ORDER(virt_to_page(p));
627         unsigned int i, order = get_order_from_pages(size);
628 
629         BUG_ON((unsigned long)p & ((PAGE_SIZE << order) - 1));
630         PFN_ORDER(virt_to_page(p)) = 0;
631         for ( i = 0; ; ++i )
632         {
633             if ( !(size & (1 << i)) )
634                 continue;
635             size -= 1 << i;
636             free_xenheap_pages(p + (size << PAGE_SHIFT), i);
637             if ( i + 1 >= order )
638                 return;
639         }
640     }
641 
642     /* Strip alignment padding. */
643     b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
644     if ( b->size & 1 )
645     {
646         p = (char *)p - (b->size & ~1u);
647         b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
648         ASSERT(!(b->size & 1));
649     }
650 
651     xmem_pool_free(p, xenpool);
652 }
653