1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
4 * Tosatti's implementations.
5 *
6 * Copyright 2008 Rusty Russell IBM Corporation
7 */
8
9 #include <linux/virtio.h>
10 #include <linux/virtio_balloon.h>
11 #include <linux/swap.h>
12 #include <linux/workqueue.h>
13 #include <linux/delay.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/balloon_compaction.h>
17 #include <linux/oom.h>
18 #include <linux/wait.h>
19 #include <linux/mm.h>
20 #include <linux/page_reporting.h>
21
22 /*
23 * Balloon device works in 4K page units. So each page is pointed to by
24 * multiple balloon pages. All memory counters in this driver are in balloon
25 * page units.
26 */
27 #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
28 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
29 /* Maximum number of (4k) pages to deflate on OOM notifications. */
30 #define VIRTIO_BALLOON_OOM_NR_PAGES 256
31 #define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
32
33 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
34 __GFP_NOMEMALLOC)
35 /* The order of free page blocks to report to host */
36 #define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1)
37 /* The size of a free page block in bytes */
38 #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
39 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
40 #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
41
42 enum virtio_balloon_vq {
43 VIRTIO_BALLOON_VQ_INFLATE,
44 VIRTIO_BALLOON_VQ_DEFLATE,
45 VIRTIO_BALLOON_VQ_STATS,
46 VIRTIO_BALLOON_VQ_FREE_PAGE,
47 VIRTIO_BALLOON_VQ_REPORTING,
48 VIRTIO_BALLOON_VQ_MAX
49 };
50
51 enum virtio_balloon_config_read {
52 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
53 };
54
55 struct virtio_balloon {
56 struct virtio_device *vdev;
57 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
58
59 /* Balloon's own wq for cpu-intensive work items */
60 struct workqueue_struct *balloon_wq;
61 /* The free page reporting work item submitted to the balloon wq */
62 struct work_struct report_free_page_work;
63
64 /* The balloon servicing is delegated to a freezable workqueue. */
65 struct work_struct update_balloon_stats_work;
66 struct work_struct update_balloon_size_work;
67
68 /* Prevent updating balloon when it is being canceled. */
69 spinlock_t stop_update_lock;
70 bool stop_update;
71 /* Bitmap to indicate if reading the related config fields are needed */
72 unsigned long config_read_bitmap;
73
74 /* The list of allocated free pages, waiting to be given back to mm */
75 struct list_head free_page_list;
76 spinlock_t free_page_list_lock;
77 /* The number of free page blocks on the above list */
78 unsigned long num_free_page_blocks;
79 /*
80 * The cmd id received from host.
81 * Read it via virtio_balloon_cmd_id_received to get the latest value
82 * sent from host.
83 */
84 u32 cmd_id_received_cache;
85 /* The cmd id that is actively in use */
86 __virtio32 cmd_id_active;
87 /* Buffer to store the stop sign */
88 __virtio32 cmd_id_stop;
89
90 /* Waiting for host to ack the pages we released. */
91 wait_queue_head_t acked;
92
93 /* Number of balloon pages we've told the Host we're not using. */
94 unsigned int num_pages;
95 /*
96 * The pages we've told the Host we're not using are enqueued
97 * at vb_dev_info->pages list.
98 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
99 * to num_pages above.
100 */
101 struct balloon_dev_info vb_dev_info;
102
103 /* Synchronize access/update to this struct virtio_balloon elements */
104 struct mutex balloon_lock;
105
106 /* The array of pfns we tell the Host about. */
107 unsigned int num_pfns;
108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
109
110 /* Memory statistics */
111 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
112
113 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
114 struct shrinker shrinker;
115
116 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
117 struct notifier_block oom_nb;
118
119 /* Free page reporting device */
120 struct virtqueue *reporting_vq;
121 struct page_reporting_dev_info pr_dev_info;
122 };
123
124 static const struct virtio_device_id id_table[] = {
125 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
126 { 0 },
127 };
128
page_to_balloon_pfn(struct page * page)129 static u32 page_to_balloon_pfn(struct page *page)
130 {
131 unsigned long pfn = page_to_pfn(page);
132
133 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
134 /* Convert pfn from Linux page size to balloon page size. */
135 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
136 }
137
balloon_ack(struct virtqueue * vq)138 static void balloon_ack(struct virtqueue *vq)
139 {
140 struct virtio_balloon *vb = vq->vdev->priv;
141
142 wake_up(&vb->acked);
143 }
144
tell_host(struct virtio_balloon * vb,struct virtqueue * vq)145 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
146 {
147 struct scatterlist sg;
148 unsigned int len;
149
150 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
151
152 /* We should always be able to add one buffer to an empty queue. */
153 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
154 virtqueue_kick(vq);
155
156 /* When host has read buffer, this completes via balloon_ack */
157 wait_event(vb->acked, virtqueue_get_buf(vq, &len));
158
159 }
160
virtballoon_free_page_report(struct page_reporting_dev_info * pr_dev_info,struct scatterlist * sg,unsigned int nents)161 static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
162 struct scatterlist *sg, unsigned int nents)
163 {
164 struct virtio_balloon *vb =
165 container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
166 struct virtqueue *vq = vb->reporting_vq;
167 unsigned int unused, err;
168
169 /* We should always be able to add these buffers to an empty queue. */
170 err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
171
172 /*
173 * In the extremely unlikely case that something has occurred and we
174 * are able to trigger an error we will simply display a warning
175 * and exit without actually processing the pages.
176 */
177 if (WARN_ON_ONCE(err))
178 return err;
179
180 virtqueue_kick(vq);
181
182 /* When host has read buffer, this completes via balloon_ack */
183 wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
184
185 return 0;
186 }
187
set_page_pfns(struct virtio_balloon * vb,__virtio32 pfns[],struct page * page)188 static void set_page_pfns(struct virtio_balloon *vb,
189 __virtio32 pfns[], struct page *page)
190 {
191 unsigned int i;
192
193 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
194
195 /*
196 * Set balloon pfns pointing at this page.
197 * Note that the first pfn points at start of the page.
198 */
199 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
200 pfns[i] = cpu_to_virtio32(vb->vdev,
201 page_to_balloon_pfn(page) + i);
202 }
203
fill_balloon(struct virtio_balloon * vb,size_t num)204 static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
205 {
206 unsigned int num_allocated_pages;
207 unsigned int num_pfns;
208 struct page *page;
209 LIST_HEAD(pages);
210
211 /* We can only do one array worth at a time. */
212 num = min(num, ARRAY_SIZE(vb->pfns));
213
214 for (num_pfns = 0; num_pfns < num;
215 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
216 struct page *page = balloon_page_alloc();
217
218 if (!page) {
219 dev_info_ratelimited(&vb->vdev->dev,
220 "Out of puff! Can't get %u pages\n",
221 VIRTIO_BALLOON_PAGES_PER_PAGE);
222 /* Sleep for at least 1/5 of a second before retry. */
223 msleep(200);
224 break;
225 }
226
227 balloon_page_push(&pages, page);
228 }
229
230 mutex_lock(&vb->balloon_lock);
231
232 vb->num_pfns = 0;
233
234 while ((page = balloon_page_pop(&pages))) {
235 balloon_page_enqueue(&vb->vb_dev_info, page);
236
237 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
238 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
239 if (!virtio_has_feature(vb->vdev,
240 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
241 adjust_managed_page_count(page, -1);
242 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
243 }
244
245 num_allocated_pages = vb->num_pfns;
246 /* Did we get any? */
247 if (vb->num_pfns != 0)
248 tell_host(vb, vb->inflate_vq);
249 mutex_unlock(&vb->balloon_lock);
250
251 return num_allocated_pages;
252 }
253
release_pages_balloon(struct virtio_balloon * vb,struct list_head * pages)254 static void release_pages_balloon(struct virtio_balloon *vb,
255 struct list_head *pages)
256 {
257 struct page *page, *next;
258
259 list_for_each_entry_safe(page, next, pages, lru) {
260 if (!virtio_has_feature(vb->vdev,
261 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
262 adjust_managed_page_count(page, 1);
263 list_del(&page->lru);
264 put_page(page); /* balloon reference */
265 }
266 }
267
leak_balloon(struct virtio_balloon * vb,size_t num)268 static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
269 {
270 unsigned int num_freed_pages;
271 struct page *page;
272 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
273 LIST_HEAD(pages);
274
275 /* We can only do one array worth at a time. */
276 num = min(num, ARRAY_SIZE(vb->pfns));
277
278 mutex_lock(&vb->balloon_lock);
279 /* We can't release more pages than taken */
280 num = min(num, (size_t)vb->num_pages);
281 for (vb->num_pfns = 0; vb->num_pfns < num;
282 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
283 page = balloon_page_dequeue(vb_dev_info);
284 if (!page)
285 break;
286 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
287 list_add(&page->lru, &pages);
288 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
289 }
290
291 num_freed_pages = vb->num_pfns;
292 /*
293 * Note that if
294 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
295 * is true, we *have* to do it in this order
296 */
297 if (vb->num_pfns != 0)
298 tell_host(vb, vb->deflate_vq);
299 release_pages_balloon(vb, &pages);
300 mutex_unlock(&vb->balloon_lock);
301 return num_freed_pages;
302 }
303
update_stat(struct virtio_balloon * vb,int idx,u16 tag,u64 val)304 static inline void update_stat(struct virtio_balloon *vb, int idx,
305 u16 tag, u64 val)
306 {
307 BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
308 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
309 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
310 }
311
312 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
313
update_balloon_stats(struct virtio_balloon * vb)314 static unsigned int update_balloon_stats(struct virtio_balloon *vb)
315 {
316 unsigned long events[NR_VM_EVENT_ITEMS];
317 struct sysinfo i;
318 unsigned int idx = 0;
319 long available;
320 unsigned long caches;
321
322 all_vm_events(events);
323 si_meminfo(&i);
324
325 available = si_mem_available();
326 caches = global_node_page_state(NR_FILE_PAGES);
327
328 #ifdef CONFIG_VM_EVENT_COUNTERS
329 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
330 pages_to_bytes(events[PSWPIN]));
331 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
332 pages_to_bytes(events[PSWPOUT]));
333 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
334 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
335 #ifdef CONFIG_HUGETLB_PAGE
336 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
337 events[HTLB_BUDDY_PGALLOC]);
338 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
339 events[HTLB_BUDDY_PGALLOC_FAIL]);
340 #endif
341 #endif
342 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
343 pages_to_bytes(i.freeram));
344 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
345 pages_to_bytes(i.totalram));
346 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
347 pages_to_bytes(available));
348 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES,
349 pages_to_bytes(caches));
350
351 return idx;
352 }
353
354 /*
355 * While most virtqueues communicate guest-initiated requests to the hypervisor,
356 * the stats queue operates in reverse. The driver initializes the virtqueue
357 * with a single buffer. From that point forward, all conversations consist of
358 * a hypervisor request (a call to this function) which directs us to refill
359 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
360 * we delegate the job to a freezable workqueue that will do the actual work via
361 * stats_handle_request().
362 */
stats_request(struct virtqueue * vq)363 static void stats_request(struct virtqueue *vq)
364 {
365 struct virtio_balloon *vb = vq->vdev->priv;
366
367 spin_lock(&vb->stop_update_lock);
368 if (!vb->stop_update)
369 queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
370 spin_unlock(&vb->stop_update_lock);
371 }
372
stats_handle_request(struct virtio_balloon * vb)373 static void stats_handle_request(struct virtio_balloon *vb)
374 {
375 struct virtqueue *vq;
376 struct scatterlist sg;
377 unsigned int len, num_stats;
378
379 num_stats = update_balloon_stats(vb);
380
381 vq = vb->stats_vq;
382 if (!virtqueue_get_buf(vq, &len))
383 return;
384 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
385 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
386 virtqueue_kick(vq);
387 }
388
towards_target(struct virtio_balloon * vb)389 static inline s64 towards_target(struct virtio_balloon *vb)
390 {
391 s64 target;
392 u32 num_pages;
393
394 /* Legacy balloon config space is LE, unlike all other devices. */
395 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
396 &num_pages);
397
398 target = num_pages;
399 return target - vb->num_pages;
400 }
401
402 /* Gives back @num_to_return blocks of free pages to mm. */
return_free_pages_to_mm(struct virtio_balloon * vb,unsigned long num_to_return)403 static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
404 unsigned long num_to_return)
405 {
406 struct page *page;
407 unsigned long num_returned;
408
409 spin_lock_irq(&vb->free_page_list_lock);
410 for (num_returned = 0; num_returned < num_to_return; num_returned++) {
411 page = balloon_page_pop(&vb->free_page_list);
412 if (!page)
413 break;
414 free_pages((unsigned long)page_address(page),
415 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
416 }
417 vb->num_free_page_blocks -= num_returned;
418 spin_unlock_irq(&vb->free_page_list_lock);
419
420 return num_returned;
421 }
422
virtio_balloon_queue_free_page_work(struct virtio_balloon * vb)423 static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
424 {
425 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
426 return;
427
428 /* No need to queue the work if the bit was already set. */
429 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
430 &vb->config_read_bitmap))
431 return;
432
433 queue_work(vb->balloon_wq, &vb->report_free_page_work);
434 }
435
virtballoon_changed(struct virtio_device * vdev)436 static void virtballoon_changed(struct virtio_device *vdev)
437 {
438 struct virtio_balloon *vb = vdev->priv;
439 unsigned long flags;
440
441 spin_lock_irqsave(&vb->stop_update_lock, flags);
442 if (!vb->stop_update) {
443 queue_work(system_freezable_wq,
444 &vb->update_balloon_size_work);
445 virtio_balloon_queue_free_page_work(vb);
446 }
447 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
448 }
449
update_balloon_size(struct virtio_balloon * vb)450 static void update_balloon_size(struct virtio_balloon *vb)
451 {
452 u32 actual = vb->num_pages;
453
454 /* Legacy balloon config space is LE, unlike all other devices. */
455 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
456 &actual);
457 }
458
update_balloon_stats_func(struct work_struct * work)459 static void update_balloon_stats_func(struct work_struct *work)
460 {
461 struct virtio_balloon *vb;
462
463 vb = container_of(work, struct virtio_balloon,
464 update_balloon_stats_work);
465 stats_handle_request(vb);
466 }
467
update_balloon_size_func(struct work_struct * work)468 static void update_balloon_size_func(struct work_struct *work)
469 {
470 struct virtio_balloon *vb;
471 s64 diff;
472
473 vb = container_of(work, struct virtio_balloon,
474 update_balloon_size_work);
475 diff = towards_target(vb);
476
477 if (!diff)
478 return;
479
480 if (diff > 0)
481 diff -= fill_balloon(vb, diff);
482 else
483 diff += leak_balloon(vb, -diff);
484 update_balloon_size(vb);
485
486 if (diff)
487 queue_work(system_freezable_wq, work);
488 }
489
init_vqs(struct virtio_balloon * vb)490 static int init_vqs(struct virtio_balloon *vb)
491 {
492 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
493 vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX];
494 const char *names[VIRTIO_BALLOON_VQ_MAX];
495 int err;
496
497 /*
498 * Inflateq and deflateq are used unconditionally. The names[]
499 * will be NULL if the related feature is not enabled, which will
500 * cause no allocation for the corresponding virtqueue in find_vqs.
501 */
502 callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack;
503 names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
504 callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
505 names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
506 callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
507 names[VIRTIO_BALLOON_VQ_STATS] = NULL;
508 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
509 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
510 names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
511
512 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
513 names[VIRTIO_BALLOON_VQ_STATS] = "stats";
514 callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request;
515 }
516
517 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
518 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq";
519 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
520 }
521
522 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
523 names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
524 callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
525 }
526
527 err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
528 callbacks, names, NULL);
529 if (err)
530 return err;
531
532 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
533 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
534 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
535 struct scatterlist sg;
536 unsigned int num_stats;
537 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
538
539 /*
540 * Prime this virtqueue with one buffer so the hypervisor can
541 * use it to signal us later (it can't be broken yet!).
542 */
543 num_stats = update_balloon_stats(vb);
544
545 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
546 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
547 GFP_KERNEL);
548 if (err) {
549 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
550 __func__);
551 return err;
552 }
553 virtqueue_kick(vb->stats_vq);
554 }
555
556 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
557 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
558
559 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
560 vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
561
562 return 0;
563 }
564
virtio_balloon_cmd_id_received(struct virtio_balloon * vb)565 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
566 {
567 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
568 &vb->config_read_bitmap)) {
569 /* Legacy balloon config space is LE, unlike all other devices. */
570 virtio_cread_le(vb->vdev, struct virtio_balloon_config,
571 free_page_hint_cmd_id,
572 &vb->cmd_id_received_cache);
573 }
574
575 return vb->cmd_id_received_cache;
576 }
577
send_cmd_id_start(struct virtio_balloon * vb)578 static int send_cmd_id_start(struct virtio_balloon *vb)
579 {
580 struct scatterlist sg;
581 struct virtqueue *vq = vb->free_page_vq;
582 int err, unused;
583
584 /* Detach all the used buffers from the vq */
585 while (virtqueue_get_buf(vq, &unused))
586 ;
587
588 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
589 virtio_balloon_cmd_id_received(vb));
590 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
591 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
592 if (!err)
593 virtqueue_kick(vq);
594 return err;
595 }
596
send_cmd_id_stop(struct virtio_balloon * vb)597 static int send_cmd_id_stop(struct virtio_balloon *vb)
598 {
599 struct scatterlist sg;
600 struct virtqueue *vq = vb->free_page_vq;
601 int err, unused;
602
603 /* Detach all the used buffers from the vq */
604 while (virtqueue_get_buf(vq, &unused))
605 ;
606
607 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop));
608 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL);
609 if (!err)
610 virtqueue_kick(vq);
611 return err;
612 }
613
get_free_page_and_send(struct virtio_balloon * vb)614 static int get_free_page_and_send(struct virtio_balloon *vb)
615 {
616 struct virtqueue *vq = vb->free_page_vq;
617 struct page *page;
618 struct scatterlist sg;
619 int err, unused;
620 void *p;
621
622 /* Detach all the used buffers from the vq */
623 while (virtqueue_get_buf(vq, &unused))
624 ;
625
626 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
627 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
628 /*
629 * When the allocation returns NULL, it indicates that we have got all
630 * the possible free pages, so return -EINTR to stop.
631 */
632 if (!page)
633 return -EINTR;
634
635 p = page_address(page);
636 sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES);
637 /* There is always 1 entry reserved for the cmd id to use. */
638 if (vq->num_free > 1) {
639 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
640 if (unlikely(err)) {
641 free_pages((unsigned long)p,
642 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
643 return err;
644 }
645 virtqueue_kick(vq);
646 spin_lock_irq(&vb->free_page_list_lock);
647 balloon_page_push(&vb->free_page_list, page);
648 vb->num_free_page_blocks++;
649 spin_unlock_irq(&vb->free_page_list_lock);
650 } else {
651 /*
652 * The vq has no available entry to add this page block, so
653 * just free it.
654 */
655 free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
656 }
657
658 return 0;
659 }
660
send_free_pages(struct virtio_balloon * vb)661 static int send_free_pages(struct virtio_balloon *vb)
662 {
663 int err;
664 u32 cmd_id_active;
665
666 while (1) {
667 /*
668 * If a stop id or a new cmd id was just received from host,
669 * stop the reporting.
670 */
671 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
672 if (unlikely(cmd_id_active !=
673 virtio_balloon_cmd_id_received(vb)))
674 break;
675
676 /*
677 * The free page blocks are allocated and sent to host one by
678 * one.
679 */
680 err = get_free_page_and_send(vb);
681 if (err == -EINTR)
682 break;
683 else if (unlikely(err))
684 return err;
685 }
686
687 return 0;
688 }
689
virtio_balloon_report_free_page(struct virtio_balloon * vb)690 static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
691 {
692 int err;
693 struct device *dev = &vb->vdev->dev;
694
695 /* Start by sending the received cmd id to host with an outbuf. */
696 err = send_cmd_id_start(vb);
697 if (unlikely(err))
698 dev_err(dev, "Failed to send a start id, err = %d\n", err);
699
700 err = send_free_pages(vb);
701 if (unlikely(err))
702 dev_err(dev, "Failed to send a free page, err = %d\n", err);
703
704 /* End by sending a stop id to host with an outbuf. */
705 err = send_cmd_id_stop(vb);
706 if (unlikely(err))
707 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
708 }
709
report_free_page_func(struct work_struct * work)710 static void report_free_page_func(struct work_struct *work)
711 {
712 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
713 report_free_page_work);
714 u32 cmd_id_received;
715
716 cmd_id_received = virtio_balloon_cmd_id_received(vb);
717 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
718 /* Pass ULONG_MAX to give back all the free pages */
719 return_free_pages_to_mm(vb, ULONG_MAX);
720 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
721 cmd_id_received !=
722 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
723 virtio_balloon_report_free_page(vb);
724 }
725 }
726
727 #ifdef CONFIG_BALLOON_COMPACTION
728 /*
729 * virtballoon_migratepage - perform the balloon page migration on behalf of
730 * a compaction thread. (called under page lock)
731 * @vb_dev_info: the balloon device
732 * @newpage: page that will replace the isolated page after migration finishes.
733 * @page : the isolated (old) page that is about to be migrated to newpage.
734 * @mode : compaction mode -- not used for balloon page migration.
735 *
736 * After a ballooned page gets isolated by compaction procedures, this is the
737 * function that performs the page migration on behalf of a compaction thread
738 * The page migration for virtio balloon is done in a simple swap fashion which
739 * follows these two macro steps:
740 * 1) insert newpage into vb->pages list and update the host about it;
741 * 2) update the host about the old page removed from vb->pages list;
742 *
743 * This function preforms the balloon page migration task.
744 * Called through balloon_mapping->a_ops->migratepage
745 */
virtballoon_migratepage(struct balloon_dev_info * vb_dev_info,struct page * newpage,struct page * page,enum migrate_mode mode)746 static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
747 struct page *newpage, struct page *page, enum migrate_mode mode)
748 {
749 struct virtio_balloon *vb = container_of(vb_dev_info,
750 struct virtio_balloon, vb_dev_info);
751 unsigned long flags;
752
753 /*
754 * In order to avoid lock contention while migrating pages concurrently
755 * to leak_balloon() or fill_balloon() we just give up the balloon_lock
756 * this turn, as it is easier to retry the page migration later.
757 * This also prevents fill_balloon() getting stuck into a mutex
758 * recursion in the case it ends up triggering memory compaction
759 * while it is attempting to inflate the ballon.
760 */
761 if (!mutex_trylock(&vb->balloon_lock))
762 return -EAGAIN;
763
764 get_page(newpage); /* balloon reference */
765
766 /*
767 * When we migrate a page to a different zone and adjusted the
768 * managed page count when inflating, we have to fixup the count of
769 * both involved zones.
770 */
771 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
772 page_zone(page) != page_zone(newpage)) {
773 adjust_managed_page_count(page, 1);
774 adjust_managed_page_count(newpage, -1);
775 }
776
777 /* balloon's page migration 1st step -- inflate "newpage" */
778 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
779 balloon_page_insert(vb_dev_info, newpage);
780 vb_dev_info->isolated_pages--;
781 __count_vm_event(BALLOON_MIGRATE);
782 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
783 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
784 set_page_pfns(vb, vb->pfns, newpage);
785 tell_host(vb, vb->inflate_vq);
786
787 /* balloon's page migration 2nd step -- deflate "page" */
788 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
789 balloon_page_delete(page);
790 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
791 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
792 set_page_pfns(vb, vb->pfns, page);
793 tell_host(vb, vb->deflate_vq);
794
795 mutex_unlock(&vb->balloon_lock);
796
797 put_page(page); /* balloon reference */
798
799 return MIGRATEPAGE_SUCCESS;
800 }
801 #endif /* CONFIG_BALLOON_COMPACTION */
802
shrink_free_pages(struct virtio_balloon * vb,unsigned long pages_to_free)803 static unsigned long shrink_free_pages(struct virtio_balloon *vb,
804 unsigned long pages_to_free)
805 {
806 unsigned long blocks_to_free, blocks_freed;
807
808 pages_to_free = round_up(pages_to_free,
809 VIRTIO_BALLOON_HINT_BLOCK_PAGES);
810 blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES;
811 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
812
813 return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
814 }
815
virtio_balloon_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)816 static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
817 struct shrink_control *sc)
818 {
819 struct virtio_balloon *vb = container_of(shrinker,
820 struct virtio_balloon, shrinker);
821
822 return shrink_free_pages(vb, sc->nr_to_scan);
823 }
824
virtio_balloon_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)825 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
826 struct shrink_control *sc)
827 {
828 struct virtio_balloon *vb = container_of(shrinker,
829 struct virtio_balloon, shrinker);
830
831 return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
832 }
833
virtio_balloon_oom_notify(struct notifier_block * nb,unsigned long dummy,void * parm)834 static int virtio_balloon_oom_notify(struct notifier_block *nb,
835 unsigned long dummy, void *parm)
836 {
837 struct virtio_balloon *vb = container_of(nb,
838 struct virtio_balloon, oom_nb);
839 unsigned long *freed = parm;
840
841 *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
842 VIRTIO_BALLOON_PAGES_PER_PAGE;
843 update_balloon_size(vb);
844
845 return NOTIFY_OK;
846 }
847
virtio_balloon_unregister_shrinker(struct virtio_balloon * vb)848 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
849 {
850 unregister_shrinker(&vb->shrinker);
851 }
852
virtio_balloon_register_shrinker(struct virtio_balloon * vb)853 static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
854 {
855 vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
856 vb->shrinker.count_objects = virtio_balloon_shrinker_count;
857 vb->shrinker.seeks = DEFAULT_SEEKS;
858
859 return register_shrinker(&vb->shrinker, "virtio-balloon");
860 }
861
virtballoon_probe(struct virtio_device * vdev)862 static int virtballoon_probe(struct virtio_device *vdev)
863 {
864 struct virtio_balloon *vb;
865 int err;
866
867 if (!vdev->config->get) {
868 dev_err(&vdev->dev, "%s failure: config access disabled\n",
869 __func__);
870 return -EINVAL;
871 }
872
873 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL);
874 if (!vb) {
875 err = -ENOMEM;
876 goto out;
877 }
878
879 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
880 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
881 spin_lock_init(&vb->stop_update_lock);
882 mutex_init(&vb->balloon_lock);
883 init_waitqueue_head(&vb->acked);
884 vb->vdev = vdev;
885
886 balloon_devinfo_init(&vb->vb_dev_info);
887
888 err = init_vqs(vb);
889 if (err)
890 goto out_free_vb;
891
892 #ifdef CONFIG_BALLOON_COMPACTION
893 vb->vb_dev_info.migratepage = virtballoon_migratepage;
894 #endif
895 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
896 /*
897 * There is always one entry reserved for cmd id, so the ring
898 * size needs to be at least two to report free page hints.
899 */
900 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
901 err = -ENOSPC;
902 goto out_del_vqs;
903 }
904 vb->balloon_wq = alloc_workqueue("balloon-wq",
905 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
906 if (!vb->balloon_wq) {
907 err = -ENOMEM;
908 goto out_del_vqs;
909 }
910 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
911 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
912 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
913 VIRTIO_BALLOON_CMD_ID_STOP);
914 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
915 VIRTIO_BALLOON_CMD_ID_STOP);
916 spin_lock_init(&vb->free_page_list_lock);
917 INIT_LIST_HEAD(&vb->free_page_list);
918 /*
919 * We're allowed to reuse any free pages, even if they are
920 * still to be processed by the host.
921 */
922 err = virtio_balloon_register_shrinker(vb);
923 if (err)
924 goto out_del_balloon_wq;
925 }
926
927 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
928 vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
929 vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
930 err = register_oom_notifier(&vb->oom_nb);
931 if (err < 0)
932 goto out_unregister_shrinker;
933 }
934
935 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
936 /* Start with poison val of 0 representing general init */
937 __u32 poison_val = 0;
938
939 /*
940 * Let the hypervisor know that we are expecting a
941 * specific value to be written back in balloon pages.
942 *
943 * If the PAGE_POISON value was larger than a byte we would
944 * need to byte swap poison_val here to guarantee it is
945 * little-endian. However for now it is a single byte so we
946 * can pass it as-is.
947 */
948 if (!want_init_on_free())
949 memset(&poison_val, PAGE_POISON, sizeof(poison_val));
950
951 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
952 poison_val, &poison_val);
953 }
954
955 vb->pr_dev_info.report = virtballoon_free_page_report;
956 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
957 unsigned int capacity;
958
959 capacity = virtqueue_get_vring_size(vb->reporting_vq);
960 if (capacity < PAGE_REPORTING_CAPACITY) {
961 err = -ENOSPC;
962 goto out_unregister_oom;
963 }
964
965 /*
966 * The default page reporting order is @pageblock_order, which
967 * corresponds to 512MB in size on ARM64 when 64KB base page
968 * size is used. The page reporting won't be triggered if the
969 * freeing page can't come up with a free area like that huge.
970 * So we specify the page reporting order to 5, corresponding
971 * to 2MB. It helps to avoid THP splitting if 4KB base page
972 * size is used by host.
973 *
974 * Ideally, the page reporting order is selected based on the
975 * host's base page size. However, it needs more work to report
976 * that value. The hard-coded order would be fine currently.
977 */
978 #if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES)
979 vb->pr_dev_info.order = 5;
980 #endif
981
982 err = page_reporting_register(&vb->pr_dev_info);
983 if (err)
984 goto out_unregister_oom;
985 }
986
987 virtio_device_ready(vdev);
988
989 if (towards_target(vb))
990 virtballoon_changed(vdev);
991 return 0;
992
993 out_unregister_oom:
994 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
995 unregister_oom_notifier(&vb->oom_nb);
996 out_unregister_shrinker:
997 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
998 virtio_balloon_unregister_shrinker(vb);
999 out_del_balloon_wq:
1000 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1001 destroy_workqueue(vb->balloon_wq);
1002 out_del_vqs:
1003 vdev->config->del_vqs(vdev);
1004 out_free_vb:
1005 kfree(vb);
1006 out:
1007 return err;
1008 }
1009
remove_common(struct virtio_balloon * vb)1010 static void remove_common(struct virtio_balloon *vb)
1011 {
1012 /* There might be pages left in the balloon: free them. */
1013 while (vb->num_pages)
1014 leak_balloon(vb, vb->num_pages);
1015 update_balloon_size(vb);
1016
1017 /* There might be free pages that are being reported: release them. */
1018 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1019 return_free_pages_to_mm(vb, ULONG_MAX);
1020
1021 /* Now we reset the device so we can clean up the queues. */
1022 virtio_reset_device(vb->vdev);
1023
1024 vb->vdev->config->del_vqs(vb->vdev);
1025 }
1026
virtballoon_remove(struct virtio_device * vdev)1027 static void virtballoon_remove(struct virtio_device *vdev)
1028 {
1029 struct virtio_balloon *vb = vdev->priv;
1030
1031 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
1032 page_reporting_unregister(&vb->pr_dev_info);
1033 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1034 unregister_oom_notifier(&vb->oom_nb);
1035 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1036 virtio_balloon_unregister_shrinker(vb);
1037 spin_lock_irq(&vb->stop_update_lock);
1038 vb->stop_update = true;
1039 spin_unlock_irq(&vb->stop_update_lock);
1040 cancel_work_sync(&vb->update_balloon_size_work);
1041 cancel_work_sync(&vb->update_balloon_stats_work);
1042
1043 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
1044 cancel_work_sync(&vb->report_free_page_work);
1045 destroy_workqueue(vb->balloon_wq);
1046 }
1047
1048 remove_common(vb);
1049 kfree(vb);
1050 }
1051
1052 #ifdef CONFIG_PM_SLEEP
virtballoon_freeze(struct virtio_device * vdev)1053 static int virtballoon_freeze(struct virtio_device *vdev)
1054 {
1055 struct virtio_balloon *vb = vdev->priv;
1056
1057 /*
1058 * The workqueue is already frozen by the PM core before this
1059 * function is called.
1060 */
1061 remove_common(vb);
1062 return 0;
1063 }
1064
virtballoon_restore(struct virtio_device * vdev)1065 static int virtballoon_restore(struct virtio_device *vdev)
1066 {
1067 struct virtio_balloon *vb = vdev->priv;
1068 int ret;
1069
1070 ret = init_vqs(vdev->priv);
1071 if (ret)
1072 return ret;
1073
1074 virtio_device_ready(vdev);
1075
1076 if (towards_target(vb))
1077 virtballoon_changed(vdev);
1078 update_balloon_size(vb);
1079 return 0;
1080 }
1081 #endif
1082
virtballoon_validate(struct virtio_device * vdev)1083 static int virtballoon_validate(struct virtio_device *vdev)
1084 {
1085 /*
1086 * Inform the hypervisor that our pages are poisoned or
1087 * initialized. If we cannot do that then we should disable
1088 * page reporting as it could potentially change the contents
1089 * of our free pages.
1090 */
1091 if (!want_init_on_free() && !page_poisoning_enabled_static())
1092 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
1093 else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
1094 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
1095
1096 __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
1097 return 0;
1098 }
1099
1100 static unsigned int features[] = {
1101 VIRTIO_BALLOON_F_MUST_TELL_HOST,
1102 VIRTIO_BALLOON_F_STATS_VQ,
1103 VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
1104 VIRTIO_BALLOON_F_FREE_PAGE_HINT,
1105 VIRTIO_BALLOON_F_PAGE_POISON,
1106 VIRTIO_BALLOON_F_REPORTING,
1107 };
1108
1109 static struct virtio_driver virtio_balloon_driver = {
1110 .feature_table = features,
1111 .feature_table_size = ARRAY_SIZE(features),
1112 .driver.name = KBUILD_MODNAME,
1113 .driver.owner = THIS_MODULE,
1114 .id_table = id_table,
1115 .validate = virtballoon_validate,
1116 .probe = virtballoon_probe,
1117 .remove = virtballoon_remove,
1118 .config_changed = virtballoon_changed,
1119 #ifdef CONFIG_PM_SLEEP
1120 .freeze = virtballoon_freeze,
1121 .restore = virtballoon_restore,
1122 #endif
1123 };
1124
1125 module_virtio_driver(virtio_balloon_driver);
1126 MODULE_DEVICE_TABLE(virtio, id_table);
1127 MODULE_DESCRIPTION("Virtio balloon driver");
1128 MODULE_LICENSE("GPL");
1129