1 #ifndef JEMALLOC_INTERNAL_STATS_STRUCTS_H
2 #define JEMALLOC_INTERNAL_STATS_STRUCTS_H
3 
4 struct tcache_bin_stats_s {
5 	/*
6 	 * Number of allocation requests that corresponded to the size of this
7 	 * bin.
8 	 */
9 	uint64_t	nrequests;
10 };
11 
12 struct malloc_bin_stats_s {
13 	/*
14 	 * Total number of allocation/deallocation requests served directly by
15 	 * the bin.  Note that tcache may allocate an object, then recycle it
16 	 * many times, resulting many increments to nrequests, but only one
17 	 * each to nmalloc and ndalloc.
18 	 */
19 	uint64_t	nmalloc;
20 	uint64_t	ndalloc;
21 
22 	/*
23 	 * Number of allocation requests that correspond to the size of this
24 	 * bin.  This includes requests served by tcache, though tcache only
25 	 * periodically merges into this counter.
26 	 */
27 	uint64_t	nrequests;
28 
29 	/*
30 	 * Current number of regions of this size class, including regions
31 	 * currently cached by tcache.
32 	 */
33 	size_t		curregs;
34 
35 	/* Number of tcache fills from this bin. */
36 	uint64_t	nfills;
37 
38 	/* Number of tcache flushes to this bin. */
39 	uint64_t	nflushes;
40 
41 	/* Total number of slabs created for this bin's size class. */
42 	uint64_t	nslabs;
43 
44 	/*
45 	 * Total number of slabs reused by extracting them from the slabs heap
46 	 * for this bin's size class.
47 	 */
48 	uint64_t	reslabs;
49 
50 	/* Current number of slabs in this bin. */
51 	size_t		curslabs;
52 };
53 
54 struct malloc_large_stats_s {
55 	/*
56 	 * Total number of allocation/deallocation requests served directly by
57 	 * the arena.
58 	 */
59 	uint64_t	nmalloc;
60 	uint64_t	ndalloc;
61 
62 	/*
63 	 * Number of allocation requests that correspond to this size class.
64 	 * This includes requests served by tcache, though tcache only
65 	 * periodically merges into this counter.
66 	 */
67 	uint64_t	nrequests;
68 
69 	/* Current number of allocations of this size class. */
70 	size_t		curlextents;
71 };
72 
73 struct arena_stats_s {
74 	/* Number of bytes currently mapped. */
75 	size_t		mapped;
76 
77 	/*
78 	 * Number of bytes currently retained as a side effect of munmap() being
79 	 * disabled/bypassed.  Retained bytes are technically mapped (though
80 	 * always decommitted or purged), but they are excluded from the mapped
81 	 * statistic (above).
82 	 */
83 	size_t		retained;
84 
85 	/*
86 	 * Total number of purge sweeps, total number of madvise calls made,
87 	 * and total pages purged in order to keep dirty unused memory under
88 	 * control.
89 	 */
90 	uint64_t	npurge;
91 	uint64_t	nmadvise;
92 	uint64_t	purged;
93 
94 	size_t		base;
95 	size_t		internal; /* Protected via atomic_*_zu(). */
96 	size_t		resident;
97 
98 	size_t		allocated_large;
99 	uint64_t	nmalloc_large;
100 	uint64_t	ndalloc_large;
101 	uint64_t	nrequests_large;
102 
103 	/* Number of bytes cached in tcache associated with this arena. */
104 	size_t		tcache_bytes;
105 
106 	/* One element for each large size class. */
107 	malloc_large_stats_t	lstats[NSIZES - NBINS];
108 };
109 
110 #endif /* JEMALLOC_INTERNAL_STATS_STRUCTS_H */
111