1 // -*- C++ -*- Allocate exception objects.
2 // Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 //
4 // This file is part of GCC.
5 //
6 // GCC is free software; you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 //
11 // GCC is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 //
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 // This is derived from the C++ ABI for IA-64.  Where we diverge
26 // for cross-architecture compatibility are noted with "@@@".
27 
28 #include <bits/c++config.h>
29 #include <cstdlib>
30 #if _GLIBCXX_HOSTED
31 #include <cstring>
32 #endif
33 #include <climits>
34 #include <exception>
35 #include "unwind-cxx.h"
36 #include <ext/concurrence.h>
37 #include <new>
38 
39 #if _GLIBCXX_HOSTED
40 using std::free;
41 using std::malloc;
42 using std::memset;
43 #else
44 // In a freestanding environment, these functions may not be available
45 // -- but for now, we assume that they are.
46 extern "C" void *malloc (std::size_t);
47 extern "C" void free(void *);
48 extern "C" void *memset (void *, int, std::size_t);
49 #endif
50 
51 using namespace __cxxabiv1;
52 
53 // ??? How to control these parameters.
54 
55 // Guess from the size of basic types how large a buffer is reasonable.
56 // Note that the basic c++ exception header has 13 pointers and 2 ints,
57 // so on a system with PSImode pointers we're talking about 56 bytes
58 // just for overhead.
59 
60 #if INT_MAX == 32767
61 # define EMERGENCY_OBJ_SIZE	128
62 # define EMERGENCY_OBJ_COUNT	16
63 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
64 # define EMERGENCY_OBJ_SIZE	512
65 # define EMERGENCY_OBJ_COUNT	32
66 #else
67 # define EMERGENCY_OBJ_SIZE	1024
68 # define EMERGENCY_OBJ_COUNT	64
69 #endif
70 
71 #ifndef __GTHREADS
72 # undef EMERGENCY_OBJ_COUNT
73 # define EMERGENCY_OBJ_COUNT	4
74 #endif
75 
76 namespace __gnu_cxx
77 {
78   void __freeres();
79 }
80 
81 namespace
82 {
83   // A fixed-size heap, variable size object allocator
84   class pool
85     {
86     public:
87       pool();
88 
89       _GLIBCXX_NODISCARD void *allocate (std::size_t);
90       void free (void *);
91 
92       bool in_pool (void *);
93 
94     private:
95       struct free_entry {
96 	std::size_t size;
97 	free_entry *next;
98       };
99       struct allocated_entry {
100 	std::size_t size;
101 	char data[] __attribute__((aligned));
102       };
103 
104       // A single mutex controlling emergency allocations.
105       __gnu_cxx::__mutex emergency_mutex;
106 
107       // The free-list
108       free_entry *first_free_entry;
109       // The arena itself - we need to keep track of these only
110       // to implement in_pool.
111       char *arena;
112       std::size_t arena_size;
113 
114       friend void __gnu_cxx::__freeres();
115     };
116 
pool()117   pool::pool()
118     {
119       // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
120       // to make this tunable.
121       arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
122 		    + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
123       arena = (char *)malloc (arena_size);
124       if (!arena)
125 	{
126 	  // If the allocation failed go without an emergency pool.
127 	  arena_size = 0;
128 	  first_free_entry = NULL;
129 	  return;
130 	}
131 
132       // Populate the free-list with a single entry covering the whole arena
133       first_free_entry = reinterpret_cast <free_entry *> (arena);
134       new (first_free_entry) free_entry;
135       first_free_entry->size = arena_size;
136       first_free_entry->next = NULL;
137     }
138 
allocate(std::size_t size)139   void *pool::allocate (std::size_t size)
140     {
141       __gnu_cxx::__scoped_lock sentry(emergency_mutex);
142       // We need an additional size_t member plus the padding to
143       // ensure proper alignment of data.
144       size += offsetof (allocated_entry, data);
145       // And we need to at least hand out objects of the size of
146       // a freelist entry.
147       if (size < sizeof (free_entry))
148 	size = sizeof (free_entry);
149       // And we need to align objects we hand out to the maximum
150       // alignment required on the target (this really aligns the
151       // tail which will become a new freelist entry).
152       size = ((size + __alignof__ (allocated_entry::data) - 1)
153 	      & ~(__alignof__ (allocated_entry::data) - 1));
154       // Search for an entry of proper size on the freelist.
155       free_entry **e;
156       for (e = &first_free_entry;
157 	   *e && (*e)->size < size;
158 	   e = &(*e)->next)
159 	;
160       if (!*e)
161 	return NULL;
162       allocated_entry *x;
163       if ((*e)->size - size >= sizeof (free_entry))
164 	{
165 	  // Split block if it is too large.
166 	  free_entry *f = reinterpret_cast <free_entry *>
167 	      (reinterpret_cast <char *> (*e) + size);
168 	  std::size_t sz = (*e)->size;
169 	  free_entry *next = (*e)->next;
170 	  new (f) free_entry;
171 	  f->next = next;
172 	  f->size = sz - size;
173 	  x = reinterpret_cast <allocated_entry *> (*e);
174 	  new (x) allocated_entry;
175 	  x->size = size;
176 	  *e = f;
177 	}
178       else
179 	{
180 	  // Exact size match or too small overhead for a free entry.
181 	  std::size_t sz = (*e)->size;
182 	  free_entry *next = (*e)->next;
183 	  x = reinterpret_cast <allocated_entry *> (*e);
184 	  new (x) allocated_entry;
185 	  x->size = sz;
186 	  *e = next;
187 	}
188       return &x->data;
189     }
190 
free(void * data)191   void pool::free (void *data)
192     {
193       __gnu_cxx::__scoped_lock sentry(emergency_mutex);
194       allocated_entry *e = reinterpret_cast <allocated_entry *>
195 	(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
196       std::size_t sz = e->size;
197       if (!first_free_entry
198 	  || (reinterpret_cast <char *> (e) + sz
199 	      < reinterpret_cast <char *> (first_free_entry)))
200 	{
201 	  // If the free list is empty or the entry is before the
202 	  // first element and cannot be merged with it add it as
203 	  // the first free entry.
204 	  free_entry *f = reinterpret_cast <free_entry *> (e);
205 	  new (f) free_entry;
206 	  f->size = sz;
207 	  f->next = first_free_entry;
208 	  first_free_entry = f;
209 	}
210       else if (reinterpret_cast <char *> (e) + sz
211 	       == reinterpret_cast <char *> (first_free_entry))
212 	{
213 	  // Check if we can merge with the first free entry being right
214 	  // after us.
215 	  free_entry *f = reinterpret_cast <free_entry *> (e);
216 	  new (f) free_entry;
217 	  f->size = sz + first_free_entry->size;
218 	  f->next = first_free_entry->next;
219 	  first_free_entry = f;
220 	}
221       else
222 	{
223 	  // Else search for a free item we can merge with at its end.
224 	  free_entry **fe;
225 	  for (fe = &first_free_entry;
226 	       (*fe)->next
227 	       && (reinterpret_cast <char *> ((*fe)->next)
228 		   > reinterpret_cast <char *> (e) + sz);
229 	       fe = &(*fe)->next)
230 	    ;
231 	  // If we can merge the next block into us do so and continue
232 	  // with the cases below.
233 	  if (reinterpret_cast <char *> (e) + sz
234 	      == reinterpret_cast <char *> ((*fe)->next))
235 	    {
236 	      sz += (*fe)->next->size;
237 	      (*fe)->next = (*fe)->next->next;
238 	    }
239 	  if (reinterpret_cast <char *> (*fe) + (*fe)->size
240 	      == reinterpret_cast <char *> (e))
241 	    // Merge with the freelist entry.
242 	    (*fe)->size += sz;
243 	  else
244 	    {
245 	      // Else put it after it which keeps the freelist sorted.
246 	      free_entry *f = reinterpret_cast <free_entry *> (e);
247 	      new (f) free_entry;
248 	      f->size = sz;
249 	      f->next = (*fe)->next;
250 	      (*fe)->next = f;
251 	    }
252 	}
253     }
254 
in_pool(void * ptr)255   bool pool::in_pool (void *ptr)
256     {
257       char *p = reinterpret_cast <char *> (ptr);
258       return (p > arena
259 	      && p < arena + arena_size);
260     }
261 
262   pool emergency_pool;
263 }
264 
265 namespace __gnu_cxx
266 {
267   void
__freeres()268   __freeres()
269   {
270     if (emergency_pool.arena)
271       {
272 	::free(emergency_pool.arena);
273 	emergency_pool.arena = 0;
274       }
275   }
276 }
277 
278 extern "C" void *
__cxa_allocate_exception(std::size_t thrown_size)279 __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
280 {
281   void *ret;
282 
283   thrown_size += sizeof (__cxa_refcounted_exception);
284   ret = malloc (thrown_size);
285 
286   if (!ret)
287     ret = emergency_pool.allocate (thrown_size);
288 
289   if (!ret)
290     std::terminate ();
291 
292   memset (ret, 0, sizeof (__cxa_refcounted_exception));
293 
294   return (void *)((char *)ret + sizeof (__cxa_refcounted_exception));
295 }
296 
297 
298 extern "C" void
__cxa_free_exception(void * vptr)299 __cxxabiv1::__cxa_free_exception(void *vptr) _GLIBCXX_NOTHROW
300 {
301   char *ptr = (char *) vptr - sizeof (__cxa_refcounted_exception);
302   if (emergency_pool.in_pool (ptr))
303     emergency_pool.free (ptr);
304   else
305     free (ptr);
306 }
307 
308 
309 extern "C" __cxa_dependent_exception*
__cxa_allocate_dependent_exception()310 __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
311 {
312   __cxa_dependent_exception *ret;
313 
314   ret = static_cast<__cxa_dependent_exception*>
315     (malloc (sizeof (__cxa_dependent_exception)));
316 
317   if (!ret)
318     ret = static_cast <__cxa_dependent_exception*>
319       (emergency_pool.allocate (sizeof (__cxa_dependent_exception)));
320 
321   if (!ret)
322     std::terminate ();
323 
324   memset (ret, 0, sizeof (__cxa_dependent_exception));
325 
326   return ret;
327 }
328 
329 
330 extern "C" void
__cxa_free_dependent_exception(__cxa_dependent_exception * vptr)331 __cxxabiv1::__cxa_free_dependent_exception
332   (__cxa_dependent_exception *vptr) _GLIBCXX_NOTHROW
333 {
334   if (emergency_pool.in_pool (vptr))
335     emergency_pool.free (vptr);
336   else
337     free (vptr);
338 }
339