1 #ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
2 #define JEMALLOC_INTERNAL_PROF_STRUCTS_H
3 
4 struct prof_bt_s {
5 	/* Backtrace, stored as len program counters. */
6 	void		**vec;
7 	unsigned	len;
8 };
9 
10 #ifdef JEMALLOC_PROF_LIBGCC
11 /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
12 typedef struct {
13 	prof_bt_t	*bt;
14 	unsigned	max;
15 } prof_unwind_data_t;
16 #endif
17 
18 struct prof_cnt_s {
19 	/* Profiling counters. */
20 	uint64_t	curobjs;
21 	uint64_t	curbytes;
22 	uint64_t	accumobjs;
23 	uint64_t	accumbytes;
24 };
25 
26 typedef enum {
27 	prof_tctx_state_initializing,
28 	prof_tctx_state_nominal,
29 	prof_tctx_state_dumping,
30 	prof_tctx_state_purgatory /* Dumper must finish destroying. */
31 } prof_tctx_state_t;
32 
33 struct prof_tctx_s {
34 	/* Thread data for thread that performed the allocation. */
35 	prof_tdata_t		*tdata;
36 
37 	/*
38 	 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
39 	 * defunct during teardown.
40 	 */
41 	uint64_t		thr_uid;
42 	uint64_t		thr_discrim;
43 
44 	/* Profiling counters, protected by tdata->lock. */
45 	prof_cnt_t		cnts;
46 
47 	/* Associated global context. */
48 	prof_gctx_t		*gctx;
49 
50 	/*
51 	 * UID that distinguishes multiple tctx's created by the same thread,
52 	 * but coexisting in gctx->tctxs.  There are two ways that such
53 	 * coexistence can occur:
54 	 * - A dumper thread can cause a tctx to be retained in the purgatory
55 	 *   state.
56 	 * - Although a single "producer" thread must create all tctx's which
57 	 *   share the same thr_uid, multiple "consumers" can each concurrently
58 	 *   execute portions of prof_tctx_destroy().  prof_tctx_destroy() only
59 	 *   gets called once each time cnts.cur{objs,bytes} drop to 0, but this
60 	 *   threshold can be hit again before the first consumer finishes
61 	 *   executing prof_tctx_destroy().
62 	 */
63 	uint64_t		tctx_uid;
64 
65 	/* Linkage into gctx's tctxs. */
66 	rb_node(prof_tctx_t)	tctx_link;
67 
68 	/*
69 	 * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
70 	 * sample vs destroy race.
71 	 */
72 	bool			prepared;
73 
74 	/* Current dump-related state, protected by gctx->lock. */
75 	prof_tctx_state_t	state;
76 
77 	/*
78 	 * Copy of cnts snapshotted during early dump phase, protected by
79 	 * dump_mtx.
80 	 */
81 	prof_cnt_t		dump_cnts;
82 };
83 typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
84 
85 struct prof_gctx_s {
86 	/* Protects nlimbo, cnt_summed, and tctxs. */
87 	malloc_mutex_t		*lock;
88 
89 	/*
90 	 * Number of threads that currently cause this gctx to be in a state of
91 	 * limbo due to one of:
92 	 *   - Initializing this gctx.
93 	 *   - Initializing per thread counters associated with this gctx.
94 	 *   - Preparing to destroy this gctx.
95 	 *   - Dumping a heap profile that includes this gctx.
96 	 * nlimbo must be 1 (single destroyer) in order to safely destroy the
97 	 * gctx.
98 	 */
99 	unsigned		nlimbo;
100 
101 	/*
102 	 * Tree of profile counters, one for each thread that has allocated in
103 	 * this context.
104 	 */
105 	prof_tctx_tree_t	tctxs;
106 
107 	/* Linkage for tree of contexts to be dumped. */
108 	rb_node(prof_gctx_t)	dump_link;
109 
110 	/* Temporary storage for summation during dump. */
111 	prof_cnt_t		cnt_summed;
112 
113 	/* Associated backtrace. */
114 	prof_bt_t		bt;
115 
116 	/* Backtrace vector, variable size, referred to by bt. */
117 	void			*vec[1];
118 };
119 typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
120 
121 struct prof_tdata_s {
122 	malloc_mutex_t		*lock;
123 
124 	/* Monotonically increasing unique thread identifier. */
125 	uint64_t		thr_uid;
126 
127 	/*
128 	 * Monotonically increasing discriminator among tdata structures
129 	 * associated with the same thr_uid.
130 	 */
131 	uint64_t		thr_discrim;
132 
133 	/* Included in heap profile dumps if non-NULL. */
134 	char			*thread_name;
135 
136 	bool			attached;
137 	bool			expired;
138 
139 	rb_node(prof_tdata_t)	tdata_link;
140 
141 	/*
142 	 * Counter used to initialize prof_tctx_t's tctx_uid.  No locking is
143 	 * necessary when incrementing this field, because only one thread ever
144 	 * does so.
145 	 */
146 	uint64_t		tctx_uid_next;
147 
148 	/*
149 	 * Hash of (prof_bt_t *)-->(prof_tctx_t *).  Each thread tracks
150 	 * backtraces for which it has non-zero allocation/deallocation counters
151 	 * associated with thread-specific prof_tctx_t objects.  Other threads
152 	 * may write to prof_tctx_t contents when freeing associated objects.
153 	 */
154 	ckh_t			bt2tctx;
155 
156 	/* Sampling state. */
157 	uint64_t		prng_state;
158 	uint64_t		bytes_until_sample;
159 
160 	/* State used to avoid dumping while operating on prof internals. */
161 	bool			enq;
162 	bool			enq_idump;
163 	bool			enq_gdump;
164 
165 	/*
166 	 * Set to true during an early dump phase for tdata's which are
167 	 * currently being dumped.  New threads' tdata's have this initialized
168 	 * to false so that they aren't accidentally included in later dump
169 	 * phases.
170 	 */
171 	bool			dumping;
172 
173 	/*
174 	 * True if profiling is active for this tdata's thread
175 	 * (thread.prof.active mallctl).
176 	 */
177 	bool			active;
178 
179 	/* Temporary storage for summation during dump. */
180 	prof_cnt_t		cnt_summed;
181 
182 	/* Backtrace vector, used for calls to prof_backtrace(). */
183 	void			*vec[PROF_BT_MAX];
184 };
185 typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
186 
187 #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
188