1 #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
2 #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
3 /*
4  * Read-only information associated with each element of arena_t's bins array
5  * is stored separately, partly to reduce memory usage (only one copy, rather
6  * than one per arena), but mainly to avoid false cacheline sharing.
7  *
8  * Each slab has the following layout:
9  *
10  *   /--------------------\
11  *   | region 0           |
12  *   |--------------------|
13  *   | region 1           |
14  *   |--------------------|
15  *   | ...                |
16  *   | ...                |
17  *   | ...                |
18  *   |--------------------|
19  *   | region nregs-1     |
20  *   \--------------------/
21  */
22 struct arena_bin_info_s {
23 	/* Size of regions in a slab for this bin's size class. */
24 	size_t			reg_size;
25 
26 	/* Total size of a slab for this bin's size class. */
27 	size_t			slab_size;
28 
29 	/* Total number of regions in a slab for this bin's size class. */
30 	uint32_t		nregs;
31 
32 	/*
33 	 * Metadata used to manipulate bitmaps for slabs associated with this
34 	 * bin.
35 	 */
36 	bitmap_info_t		bitmap_info;
37 };
38 
39 struct arena_decay_s {
40 	/*
41 	 * Approximate time in seconds from the creation of a set of unused
42 	 * dirty pages until an equivalent set of unused dirty pages is purged
43 	 * and/or reused.
44 	 */
45 	ssize_t			time;
46 	/* time / SMOOTHSTEP_NSTEPS. */
47 	nstime_t		interval;
48 	/*
49 	 * Time at which the current decay interval logically started.  We do
50 	 * not actually advance to a new epoch until sometime after it starts
51 	 * because of scheduling and computation delays, and it is even possible
52 	 * to completely skip epochs.  In all cases, during epoch advancement we
53 	 * merge all relevant activity into the most recently recorded epoch.
54 	 */
55 	nstime_t		epoch;
56 	/* Deadline randomness generator. */
57 	uint64_t		jitter_state;
58 	/*
59 	 * Deadline for current epoch.  This is the sum of interval and per
60 	 * epoch jitter which is a uniform random variable in [0..interval).
61 	 * Epochs always advance by precise multiples of interval, but we
62 	 * randomize the deadline to reduce the likelihood of arenas purging in
63 	 * lockstep.
64 	 */
65 	nstime_t		deadline;
66 	/*
67 	 * Number of dirty pages at beginning of current epoch.  During epoch
68 	 * advancement we use the delta between arena->decay.ndirty and
69 	 * arena->ndirty to determine how many dirty pages, if any, were
70 	 * generated.
71 	 */
72 	size_t			nunpurged;
73 	/*
74 	 * Trailing log of how many unused dirty pages were generated during
75 	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
76 	 * element is the most recent epoch.  Corresponding epoch times are
77 	 * relative to epoch.
78 	 */
79 	size_t			backlog[SMOOTHSTEP_NSTEPS];
80 };
81 
82 struct arena_bin_s {
83 	/* All operations on arena_bin_t fields require lock ownership. */
84 	malloc_mutex_t		lock;
85 
86 	/*
87 	 * Current slab being used to service allocations of this bin's size
88 	 * class.  slabcur is independent of slabs_{nonfull,full}; whenever
89 	 * slabcur is reassigned, the previous slab must be deallocated or
90 	 * inserted into slabs_{nonfull,full}.
91 	 */
92 	extent_t		*slabcur;
93 
94 	/*
95 	 * Heap of non-full slabs.  This heap is used to assure that new
96 	 * allocations come from the non-full slab that is oldest/lowest in
97 	 * memory.
98 	 */
99 	extent_heap_t		slabs_nonfull;
100 
101 	/* Ring sentinel used to track full slabs. */
102 	extent_t		slabs_full;
103 
104 	/* Bin statistics. */
105 	malloc_bin_stats_t	stats;
106 };
107 
108 struct arena_s {
109 	/*
110 	 * Number of threads currently assigned to this arena, synchronized via
111 	 * atomic operations.  Each thread has two distinct assignments, one for
112 	 * application-serving allocation, and the other for internal metadata
113 	 * allocation.  Internal metadata must not be allocated from arenas
114 	 * explicitly created via the arenas.create mallctl, because the
115 	 * arena.<i>.reset mallctl indiscriminately discards all allocations for
116 	 * the affected arena.
117 	 *
118 	 *   0: Application allocation.
119 	 *   1: Internal metadata allocation.
120 	 */
121 	unsigned		nthreads[2];
122 
123 	/*
124 	 * There are three classes of arena operations from a locking
125 	 * perspective:
126 	 * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
127 	 * 2) Bin-related operations are protected by bin locks.
128 	 * 3) Extent-related operations are protected by this mutex.
129 	 */
130 	malloc_mutex_t		lock;
131 
132 	arena_stats_t		stats;
133 	/*
134 	 * List of tcaches for extant threads associated with this arena.
135 	 * Stats from these are merged incrementally, and at exit if
136 	 * opt_stats_print is enabled.
137 	 */
138 	ql_head(tcache_t)	tcache_ql;
139 
140 	uint64_t		prof_accumbytes;
141 
142 	/*
143 	 * PRNG state for cache index randomization of large allocation base
144 	 * pointers.
145 	 */
146 	size_t			offset_state;
147 
148 	/* Extent serial number generator state. */
149 	size_t			extent_sn_next;
150 
151 	dss_prec_t		dss_prec;
152 
153 	/* True if a thread is currently executing arena_purge_to_limit(). */
154 	bool			purging;
155 
156 	/* Number of pages in active extents. */
157 	size_t			nactive;
158 
159 	/*
160 	 * Current count of pages within unused extents that are potentially
161 	 * dirty, and for which pages_purge_*() has not been called.  By
162 	 * tracking this, we can institute a limit on how much dirty unused
163 	 * memory is mapped for each arena.
164 	 */
165 	size_t			ndirty;
166 
167 	/* Decay-based purging state. */
168 	arena_decay_t		decay;
169 
170 	/* Extant large allocations. */
171 	ql_head(extent_t)	large;
172 	/* Synchronizes all large allocation/update/deallocation. */
173 	malloc_mutex_t		large_mtx;
174 
175 	/*
176 	 * Heaps of extents that were previously allocated.  These are used when
177 	 * allocating extents, in an attempt to re-use address space.
178 	 */
179 	extent_heap_t		extents_cached[NPSIZES+1];
180 	extent_heap_t		extents_retained[NPSIZES+1];
181 	/*
182 	 * Ring sentinel used to track unused dirty memory.  Dirty memory is
183 	 * managed as an LRU of cached extents.
184 	 */
185 	extent_t		extents_dirty;
186 	/* Protects extents_{cached,retained,dirty}. */
187 	malloc_mutex_t		extents_mtx;
188 
189 	/*
190 	 * Next extent size class in a growing series to use when satisfying a
191 	 * request via the extent hooks (only if !config_munmap).  This limits
192 	 * the number of disjoint virtual memory ranges so that extent merging
193 	 * can be effective even if multiple arenas' extent allocation requests
194 	 * are highly interleaved.
195 	 */
196 	pszind_t		extent_grow_next;
197 
198 	/* Cache of extent structures that were allocated via base_alloc(). */
199 	ql_head(extent_t)	extent_cache;
200 	malloc_mutex_t		extent_cache_mtx;
201 
202 	/* bins is used to store heaps of free regions. */
203 	arena_bin_t		bins[NBINS];
204 
205 	/* Base allocator, from which arena metadata are allocated. */
206 	base_t			*base;
207 };
208 
209 /* Used in conjunction with tsd for fast arena-related context lookup. */
210 struct arena_tdata_s {
211 	ticker_t		decay_ticker;
212 };
213 
214 #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
215