1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shows data access monitoring resutls in simple metrics.
4 */
5
6 #define pr_fmt(fmt) "damon-stat: " fmt
7
8 #include <linux/damon.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sort.h>
13
14 #ifdef MODULE_PARAM_PREFIX
15 #undef MODULE_PARAM_PREFIX
16 #endif
17 #define MODULE_PARAM_PREFIX "damon_stat."
18
19 static int damon_stat_enabled_store(
20 const char *val, const struct kernel_param *kp);
21
22 static const struct kernel_param_ops enabled_param_ops = {
23 .set = damon_stat_enabled_store,
24 .get = param_get_bool,
25 };
26
27 static bool enabled __read_mostly = IS_ENABLED(
28 CONFIG_DAMON_STAT_ENABLED_DEFAULT);
29 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
30 MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT");
31
32 static unsigned long estimated_memory_bandwidth __read_mostly;
33 module_param(estimated_memory_bandwidth, ulong, 0400);
34 MODULE_PARM_DESC(estimated_memory_bandwidth,
35 "Estimated memory bandwidth usage in bytes per second");
36
37 static unsigned long memory_idle_ms_percentiles[101] __read_mostly = {0,};
38 module_param_array(memory_idle_ms_percentiles, ulong, NULL, 0400);
39 MODULE_PARM_DESC(memory_idle_ms_percentiles,
40 "Memory idle time percentiles in milliseconds");
41
42 static struct damon_ctx *damon_stat_context;
43
damon_stat_set_estimated_memory_bandwidth(struct damon_ctx * c)44 static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
45 {
46 struct damon_target *t;
47 struct damon_region *r;
48 unsigned long access_bytes = 0;
49
50 damon_for_each_target(t, c) {
51 damon_for_each_region(r, t)
52 access_bytes += (r->ar.end - r->ar.start) *
53 r->nr_accesses;
54 }
55 estimated_memory_bandwidth = access_bytes * USEC_PER_MSEC *
56 MSEC_PER_SEC / c->attrs.aggr_interval;
57 }
58
damon_stat_idletime(const struct damon_region * r)59 static unsigned int damon_stat_idletime(const struct damon_region *r)
60 {
61 if (r->nr_accesses)
62 return 0;
63 return r->age + 1;
64 }
65
damon_stat_cmp_regions(const void * a,const void * b)66 static int damon_stat_cmp_regions(const void *a, const void *b)
67 {
68 const struct damon_region *ra = *(const struct damon_region **)a;
69 const struct damon_region *rb = *(const struct damon_region **)b;
70
71 return damon_stat_idletime(ra) - damon_stat_idletime(rb);
72 }
73
damon_stat_sort_regions(struct damon_ctx * c,struct damon_region *** sorted_ptr,int * nr_regions_ptr,unsigned long * total_sz_ptr)74 static int damon_stat_sort_regions(struct damon_ctx *c,
75 struct damon_region ***sorted_ptr, int *nr_regions_ptr,
76 unsigned long *total_sz_ptr)
77 {
78 struct damon_target *t;
79 struct damon_region *r;
80 struct damon_region **region_pointers;
81 unsigned int nr_regions = 0;
82 unsigned long total_sz = 0;
83
84 damon_for_each_target(t, c) {
85 /* there is only one target */
86 region_pointers = kmalloc_array(damon_nr_regions(t),
87 sizeof(*region_pointers), GFP_KERNEL);
88 if (!region_pointers)
89 return -ENOMEM;
90 damon_for_each_region(r, t) {
91 region_pointers[nr_regions++] = r;
92 total_sz += r->ar.end - r->ar.start;
93 }
94 }
95 sort(region_pointers, nr_regions, sizeof(*region_pointers),
96 damon_stat_cmp_regions, NULL);
97 *sorted_ptr = region_pointers;
98 *nr_regions_ptr = nr_regions;
99 *total_sz_ptr = total_sz;
100 return 0;
101 }
102
damon_stat_set_idletime_percentiles(struct damon_ctx * c)103 static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
104 {
105 struct damon_region **sorted_regions, *region;
106 int nr_regions;
107 unsigned long total_sz, accounted_bytes = 0;
108 int err, i, next_percentile = 0;
109
110 err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions,
111 &total_sz);
112 if (err)
113 return;
114 for (i = 0; i < nr_regions; i++) {
115 region = sorted_regions[i];
116 accounted_bytes += region->ar.end - region->ar.start;
117 while (next_percentile <= accounted_bytes * 100 / total_sz)
118 memory_idle_ms_percentiles[next_percentile++] =
119 damon_stat_idletime(region) *
120 c->attrs.aggr_interval / USEC_PER_MSEC;
121 }
122 kfree(sorted_regions);
123 }
124
damon_stat_damon_call_fn(void * data)125 static int damon_stat_damon_call_fn(void *data)
126 {
127 struct damon_ctx *c = data;
128 static unsigned long last_refresh_jiffies;
129
130 /* avoid unnecessarily frequent stat update */
131 if (time_before_eq(jiffies, last_refresh_jiffies +
132 msecs_to_jiffies(5 * MSEC_PER_SEC)))
133 return 0;
134 last_refresh_jiffies = jiffies;
135
136 damon_stat_set_estimated_memory_bandwidth(c);
137 damon_stat_set_idletime_percentiles(c);
138 return 0;
139 }
140
damon_stat_build_ctx(void)141 static struct damon_ctx *damon_stat_build_ctx(void)
142 {
143 struct damon_ctx *ctx;
144 struct damon_attrs attrs;
145 struct damon_target *target;
146 unsigned long start = 0, end = 0;
147
148 ctx = damon_new_ctx();
149 if (!ctx)
150 return NULL;
151 attrs = (struct damon_attrs) {
152 .sample_interval = 5 * USEC_PER_MSEC,
153 .aggr_interval = 100 * USEC_PER_MSEC,
154 .ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
155 .min_nr_regions = 10,
156 .max_nr_regions = 1000,
157 };
158 /*
159 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
160 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
161 */
162 attrs.intervals_goal = (struct damon_intervals_goal) {
163 .access_bp = 400, .aggrs = 3,
164 .min_sample_us = 5000, .max_sample_us = 10000000,
165 };
166 if (damon_set_attrs(ctx, &attrs))
167 goto free_out;
168
169 /*
170 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
171 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
172 */
173 ctx->attrs.intervals_goal = (struct damon_intervals_goal) {
174 .access_bp = 400, .aggrs = 3,
175 .min_sample_us = 5000, .max_sample_us = 10000000,
176 };
177 if (damon_select_ops(ctx, DAMON_OPS_PADDR))
178 goto free_out;
179
180 target = damon_new_target();
181 if (!target)
182 goto free_out;
183 damon_add_target(ctx, target);
184 if (damon_set_region_biggest_system_ram_default(target, &start, &end))
185 goto free_out;
186 return ctx;
187 free_out:
188 damon_destroy_ctx(ctx);
189 return NULL;
190 }
191
192 static struct damon_call_control call_control = {
193 .fn = damon_stat_damon_call_fn,
194 .repeat = true,
195 };
196
damon_stat_start(void)197 static int damon_stat_start(void)
198 {
199 int err;
200
201 damon_stat_context = damon_stat_build_ctx();
202 if (!damon_stat_context)
203 return -ENOMEM;
204 err = damon_start(&damon_stat_context, 1, true);
205 if (err)
206 return err;
207 call_control.data = damon_stat_context;
208 return damon_call(damon_stat_context, &call_control);
209 }
210
damon_stat_stop(void)211 static void damon_stat_stop(void)
212 {
213 damon_stop(&damon_stat_context, 1);
214 damon_destroy_ctx(damon_stat_context);
215 }
216
217 static bool damon_stat_init_called;
218
damon_stat_enabled_store(const char * val,const struct kernel_param * kp)219 static int damon_stat_enabled_store(
220 const char *val, const struct kernel_param *kp)
221 {
222 bool is_enabled = enabled;
223 int err;
224
225 err = kstrtobool(val, &enabled);
226 if (err)
227 return err;
228
229 if (is_enabled == enabled)
230 return 0;
231
232 if (!damon_stat_init_called)
233 /*
234 * probably called from command line parsing (parse_args()).
235 * Cannot call damon_new_ctx(). Let damon_stat_init() handle.
236 */
237 return 0;
238
239 if (enabled) {
240 err = damon_stat_start();
241 if (err)
242 enabled = false;
243 return err;
244 }
245 damon_stat_stop();
246 return 0;
247 }
248
damon_stat_init(void)249 static int __init damon_stat_init(void)
250 {
251 int err = 0;
252
253 damon_stat_init_called = true;
254
255 /* probably set via command line */
256 if (enabled)
257 err = damon_stat_start();
258
259 if (err && enabled)
260 enabled = false;
261 return err;
262 }
263
264 module_init(damon_stat_init);
265