This source file includes following definitions.
- is_vmballoon_stats_on
- vmballoon_stats_op_inc
- vmballoon_stats_gen_inc
- vmballoon_stats_gen_add
- vmballoon_stats_page_inc
- vmballoon_stats_page_add
- __vmballoon_cmd
- vmballoon_cmd
- vmballoon_send_start
- vmballoon_send_guest_id
- vmballoon_page_order
- vmballoon_page_in_frames
- vmballoon_mark_page_offline
- vmballoon_mark_page_online
- vmballoon_send_get_target
- vmballoon_alloc_page_list
- vmballoon_handle_one_result
- vmballoon_status_page
- vmballoon_lock_op
- vmballoon_add_page
- vmballoon_lock
- vmballoon_release_page_list
- vmballoon_release_refused_pages
- vmballoon_change
- vmballoon_enqueue_page_list
- vmballoon_dequeue_page_list
- vmballoon_split_refused_pages
- vmballoon_inflate
- vmballoon_deflate
- vmballoon_deinit_batching
- vmballoon_init_batching
- vmballoon_doorbell
- vmballoon_vmci_cleanup
- vmballoon_vmci_init
- vmballoon_pop
- vmballoon_reset
- vmballoon_work
- vmballoon_shrinker_scan
- vmballoon_shrinker_count
- vmballoon_unregister_shrinker
- vmballoon_register_shrinker
- vmballoon_enable_stats
- vmballoon_debug_show
- vmballoon_debugfs_init
- vmballoon_debugfs_exit
- vmballoon_debugfs_init
- vmballoon_debugfs_exit
- vmballoon_init_fs_context
- vmballoon_migratepage
- vmballoon_compaction_deinit
- vmballoon_compaction_init
- vmballoon_compaction_deinit
- vmballoon_compaction_init
- vmballoon_init
- vmballoon_exit
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18 
  19 #include <linux/types.h>
  20 #include <linux/kernel.h>
  21 #include <linux/mm.h>
  22 #include <linux/vmalloc.h>
  23 #include <linux/sched.h>
  24 #include <linux/module.h>
  25 #include <linux/workqueue.h>
  26 #include <linux/debugfs.h>
  27 #include <linux/seq_file.h>
  28 #include <linux/rwsem.h>
  29 #include <linux/slab.h>
  30 #include <linux/spinlock.h>
  31 #include <linux/mount.h>
  32 #include <linux/pseudo_fs.h>
  33 #include <linux/balloon_compaction.h>
  34 #include <linux/vmw_vmci_defs.h>
  35 #include <linux/vmw_vmci_api.h>
  36 #include <asm/hypervisor.h>
  37 
  38 MODULE_AUTHOR("VMware, Inc.");
  39 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
  40 MODULE_ALIAS("dmi:*:svnVMware*:*");
  41 MODULE_ALIAS("vmware_vmmemctl");
  42 MODULE_LICENSE("GPL");
  43 
  44 static bool __read_mostly vmwballoon_shrinker_enable;
  45 module_param(vmwballoon_shrinker_enable, bool, 0444);
  46 MODULE_PARM_DESC(vmwballoon_shrinker_enable,
  47         "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
  48 
  49 
  50 #define VMBALLOON_SHRINK_DELAY          (5)
  51 
  52 
  53 #define VMW_BALLOON_MAX_REFUSED         16
  54 
  55 
  56 #define BALLOON_VMW_MAGIC               0x0ba11007
  57 
  58 
  59 
  60 
  61 #define VMW_BALLOON_HV_PORT             0x5670
  62 #define VMW_BALLOON_HV_MAGIC            0x456c6d6f
  63 #define VMW_BALLOON_GUEST_ID            1       
  64 
  65 enum vmwballoon_capabilities {
  66         
  67 
  68 
  69         VMW_BALLOON_BASIC_CMDS                  = (1 << 1),
  70         VMW_BALLOON_BATCHED_CMDS                = (1 << 2),
  71         VMW_BALLOON_BATCHED_2M_CMDS             = (1 << 3),
  72         VMW_BALLOON_SIGNALLED_WAKEUP_CMD        = (1 << 4),
  73         VMW_BALLOON_64_BIT_TARGET               = (1 << 5)
  74 };
  75 
  76 #define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
  77                                         | VMW_BALLOON_BATCHED_CMDS \
  78                                         | VMW_BALLOON_BATCHED_2M_CMDS \
  79                                         | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  80 
  81 #define VMW_BALLOON_2M_ORDER            (PMD_SHIFT - PAGE_SHIFT)
  82 
  83 
  84 
  85 
  86 #ifdef CONFIG_64BIT
  87 #define VMW_BALLOON_CAPABILITIES        (VMW_BALLOON_CAPABILITIES_COMMON \
  88                                         | VMW_BALLOON_64_BIT_TARGET)
  89 #else
  90 #define VMW_BALLOON_CAPABILITIES        VMW_BALLOON_CAPABILITIES_COMMON
  91 #endif
  92 
  93 enum vmballoon_page_size_type {
  94         VMW_BALLOON_4K_PAGE,
  95         VMW_BALLOON_2M_PAGE,
  96         VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
  97 };
  98 
  99 #define VMW_BALLOON_NUM_PAGE_SIZES      (VMW_BALLOON_LAST_SIZE + 1)
 100 
 101 static const char * const vmballoon_page_size_names[] = {
 102         [VMW_BALLOON_4K_PAGE]                   = "4k",
 103         [VMW_BALLOON_2M_PAGE]                   = "2M"
 104 };
 105 
 106 enum vmballoon_op {
 107         VMW_BALLOON_INFLATE,
 108         VMW_BALLOON_DEFLATE
 109 };
 110 
 111 enum vmballoon_op_stat_type {
 112         VMW_BALLOON_OP_STAT,
 113         VMW_BALLOON_OP_FAIL_STAT
 114 };
 115 
 116 #define VMW_BALLOON_OP_STAT_TYPES       (VMW_BALLOON_OP_FAIL_STAT + 1)
 117 
 118 
 119 
 120 
 121 
 122 
 123 
 124 
 125 
 126 
 127 
 128 
 129 
 130 
 131 
 132 
 133 
 134 
 135 
 136 
 137 
 138 
 139 
 140 
 141 
 142 
 143 
 144 
 145 
 146 
 147 
 148 
 149 
 150 
 151 
 152 
 153 
 154 
 155 
 156 
 157 
 158 
 159 
 160 
 161 
 162 enum vmballoon_cmd_type {
 163         VMW_BALLOON_CMD_START,
 164         VMW_BALLOON_CMD_GET_TARGET,
 165         VMW_BALLOON_CMD_LOCK,
 166         VMW_BALLOON_CMD_UNLOCK,
 167         VMW_BALLOON_CMD_GUEST_ID,
 168         
 169         VMW_BALLOON_CMD_BATCHED_LOCK = 6,
 170         VMW_BALLOON_CMD_BATCHED_UNLOCK,
 171         VMW_BALLOON_CMD_BATCHED_2M_LOCK,
 172         VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
 173         VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
 174         VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
 175 };
 176 
 177 #define VMW_BALLOON_CMD_NUM     (VMW_BALLOON_CMD_LAST + 1)
 178 
 179 enum vmballoon_error_codes {
 180         VMW_BALLOON_SUCCESS,
 181         VMW_BALLOON_ERROR_CMD_INVALID,
 182         VMW_BALLOON_ERROR_PPN_INVALID,
 183         VMW_BALLOON_ERROR_PPN_LOCKED,
 184         VMW_BALLOON_ERROR_PPN_UNLOCKED,
 185         VMW_BALLOON_ERROR_PPN_PINNED,
 186         VMW_BALLOON_ERROR_PPN_NOTNEEDED,
 187         VMW_BALLOON_ERROR_RESET,
 188         VMW_BALLOON_ERROR_BUSY
 189 };
 190 
 191 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES   (0x03000000)
 192 
 193 #define VMW_BALLOON_CMD_WITH_TARGET_MASK                        \
 194         ((1UL << VMW_BALLOON_CMD_GET_TARGET)            |       \
 195          (1UL << VMW_BALLOON_CMD_LOCK)                  |       \
 196          (1UL << VMW_BALLOON_CMD_UNLOCK)                |       \
 197          (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)          |       \
 198          (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)        |       \
 199          (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)       |       \
 200          (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
 201 
 202 static const char * const vmballoon_cmd_names[] = {
 203         [VMW_BALLOON_CMD_START]                 = "start",
 204         [VMW_BALLOON_CMD_GET_TARGET]            = "target",
 205         [VMW_BALLOON_CMD_LOCK]                  = "lock",
 206         [VMW_BALLOON_CMD_UNLOCK]                = "unlock",
 207         [VMW_BALLOON_CMD_GUEST_ID]              = "guestType",
 208         [VMW_BALLOON_CMD_BATCHED_LOCK]          = "batchLock",
 209         [VMW_BALLOON_CMD_BATCHED_UNLOCK]        = "batchUnlock",
 210         [VMW_BALLOON_CMD_BATCHED_2M_LOCK]       = "2m-lock",
 211         [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]     = "2m-unlock",
 212         [VMW_BALLOON_CMD_VMCI_DOORBELL_SET]     = "doorbellSet"
 213 };
 214 
 215 enum vmballoon_stat_page {
 216         VMW_BALLOON_PAGE_STAT_ALLOC,
 217         VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
 218         VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
 219         VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
 220         VMW_BALLOON_PAGE_STAT_FREE,
 221         VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
 222 };
 223 
 224 #define VMW_BALLOON_PAGE_STAT_NUM       (VMW_BALLOON_PAGE_STAT_LAST + 1)
 225 
 226 enum vmballoon_stat_general {
 227         VMW_BALLOON_STAT_TIMER,
 228         VMW_BALLOON_STAT_DOORBELL,
 229         VMW_BALLOON_STAT_RESET,
 230         VMW_BALLOON_STAT_SHRINK,
 231         VMW_BALLOON_STAT_SHRINK_FREE,
 232         VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
 233 };
 234 
 235 #define VMW_BALLOON_STAT_NUM            (VMW_BALLOON_STAT_LAST + 1)
 236 
 237 static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
 238 static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
 239 
 240 struct vmballoon_ctl {
 241         struct list_head pages;
 242         struct list_head refused_pages;
 243         struct list_head prealloc_pages;
 244         unsigned int n_refused_pages;
 245         unsigned int n_pages;
 246         enum vmballoon_page_size_type page_size;
 247         enum vmballoon_op op;
 248 };
 249 
 250 
 251 
 252 
 253 
 254 
 255 
 256 
 257 struct vmballoon_batch_entry {
 258         u64 status : 5;
 259         u64 reserved : PAGE_SHIFT - 5;
 260         u64 pfn : 52;
 261 } __packed;
 262 
 263 struct vmballoon {
 264         
 265 
 266 
 267 
 268 
 269         enum vmballoon_page_size_type max_page_size;
 270 
 271         
 272 
 273 
 274 
 275 
 276 
 277         atomic64_t size;
 278 
 279         
 280 
 281 
 282 
 283 
 284 
 285 
 286 
 287 
 288         unsigned long target;
 289 
 290         
 291 
 292 
 293 
 294 
 295 
 296 
 297 
 298         bool reset_required;
 299 
 300         
 301 
 302 
 303 
 304 
 305         unsigned long capabilities;
 306 
 307         
 308 
 309 
 310 
 311 
 312 
 313         struct vmballoon_batch_entry *batch_page;
 314 
 315         
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324         unsigned int batch_max_pages;
 325 
 326         
 327 
 328 
 329 
 330 
 331 
 332 
 333 
 334         struct page *page;
 335 
 336         
 337 
 338 
 339 
 340 
 341 
 342 
 343         unsigned long shrink_timeout;
 344 
 345         
 346         struct vmballoon_stats *stats;
 347 
 348 #ifdef CONFIG_DEBUG_FS
 349         
 350         struct dentry *dbg_entry;
 351 #endif
 352 
 353         
 354 
 355 
 356         struct balloon_dev_info b_dev_info;
 357 
 358         struct delayed_work dwork;
 359 
 360         
 361 
 362 
 363 
 364 
 365         struct list_head huge_pages;
 366 
 367         
 368 
 369 
 370 
 371 
 372         struct vmci_handle vmci_doorbell;
 373 
 374         
 375 
 376 
 377         struct rw_semaphore conf_sem;
 378 
 379         
 380 
 381 
 382 
 383 
 384         spinlock_t comm_lock;
 385 
 386         
 387 
 388 
 389         struct shrinker shrinker;
 390 
 391         
 392 
 393 
 394 
 395 
 396 
 397 
 398         bool shrinker_registered;
 399 };
 400 
 401 static struct vmballoon balloon;
 402 
 403 struct vmballoon_stats {
 404         
 405         atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
 406 
 407         
 408         atomic64_t
 409                page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
 410 
 411         
 412         atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
 413 };
 414 
 415 static inline bool is_vmballoon_stats_on(void)
 416 {
 417         return IS_ENABLED(CONFIG_DEBUG_FS) &&
 418                 static_branch_unlikely(&balloon_stat_enabled);
 419 }
 420 
 421 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
 422                                           enum vmballoon_op_stat_type type)
 423 {
 424         if (is_vmballoon_stats_on())
 425                 atomic64_inc(&b->stats->ops[op][type]);
 426 }
 427 
 428 static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
 429                                            enum vmballoon_stat_general stat)
 430 {
 431         if (is_vmballoon_stats_on())
 432                 atomic64_inc(&b->stats->general_stat[stat]);
 433 }
 434 
 435 static inline void vmballoon_stats_gen_add(struct vmballoon *b,
 436                                            enum vmballoon_stat_general stat,
 437                                            unsigned int val)
 438 {
 439         if (is_vmballoon_stats_on())
 440                 atomic64_add(val, &b->stats->general_stat[stat]);
 441 }
 442 
 443 static inline void vmballoon_stats_page_inc(struct vmballoon *b,
 444                                             enum vmballoon_stat_page stat,
 445                                             enum vmballoon_page_size_type size)
 446 {
 447         if (is_vmballoon_stats_on())
 448                 atomic64_inc(&b->stats->page_stat[stat][size]);
 449 }
 450 
 451 static inline void vmballoon_stats_page_add(struct vmballoon *b,
 452                                             enum vmballoon_stat_page stat,
 453                                             enum vmballoon_page_size_type size,
 454                                             unsigned int val)
 455 {
 456         if (is_vmballoon_stats_on())
 457                 atomic64_add(val, &b->stats->page_stat[stat][size]);
 458 }
 459 
 460 static inline unsigned long
 461 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
 462                 unsigned long arg2, unsigned long *result)
 463 {
 464         unsigned long status, dummy1, dummy2, dummy3, local_result;
 465 
 466         vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
 467 
 468         asm volatile ("inl %%dx" :
 469                 "=a"(status),
 470                 "=c"(dummy1),
 471                 "=d"(dummy2),
 472                 "=b"(local_result),
 473                 "=S"(dummy3) :
 474                 "0"(VMW_BALLOON_HV_MAGIC),
 475                 "1"(cmd),
 476                 "2"(VMW_BALLOON_HV_PORT),
 477                 "3"(arg1),
 478                 "4"(arg2) :
 479                 "memory");
 480 
 481         
 482         if (result)
 483                 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
 484                                                            local_result;
 485 
 486         
 487         if (status == VMW_BALLOON_SUCCESS &&
 488             ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
 489                 WRITE_ONCE(b->target, local_result);
 490 
 491         if (status != VMW_BALLOON_SUCCESS &&
 492             status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
 493                 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
 494                 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
 495                          __func__, vmballoon_cmd_names[cmd], arg1, arg2,
 496                          status);
 497         }
 498 
 499         
 500         if (status == VMW_BALLOON_ERROR_RESET)
 501                 b->reset_required = true;
 502 
 503         return status;
 504 }
 505 
 506 static __always_inline unsigned long
 507 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
 508               unsigned long arg2)
 509 {
 510         unsigned long dummy;
 511 
 512         return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
 513 }
 514 
 515 
 516 
 517 
 518 
 519 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
 520 {
 521         unsigned long status, capabilities;
 522 
 523         status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
 524                                  &capabilities);
 525 
 526         switch (status) {
 527         case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
 528                 b->capabilities = capabilities;
 529                 break;
 530         case VMW_BALLOON_SUCCESS:
 531                 b->capabilities = VMW_BALLOON_BASIC_CMDS;
 532                 break;
 533         default:
 534                 return -EIO;
 535         }
 536 
 537         
 538 
 539 
 540 
 541 
 542         b->max_page_size = VMW_BALLOON_4K_PAGE;
 543         if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
 544             (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
 545                 b->max_page_size = VMW_BALLOON_2M_PAGE;
 546 
 547 
 548         return 0;
 549 }
 550 
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 
 560 
 561 
 562 
 563 static int vmballoon_send_guest_id(struct vmballoon *b)
 564 {
 565         unsigned long status;
 566 
 567         status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
 568                                VMW_BALLOON_GUEST_ID, 0);
 569 
 570         return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 571 }
 572 
 573 
 574 
 575 
 576 
 577 
 578 
 579 static inline
 580 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
 581 {
 582         return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
 583 }
 584 
 585 
 586 
 587 
 588 
 589 
 590 
 591 static inline unsigned int
 592 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
 593 {
 594         return 1 << vmballoon_page_order(page_size);
 595 }
 596 
 597 
 598 
 599 
 600 
 601 
 602 static void
 603 vmballoon_mark_page_offline(struct page *page,
 604                             enum vmballoon_page_size_type page_size)
 605 {
 606         int i;
 607 
 608         for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
 609                 __SetPageOffline(page + i);
 610 }
 611 
 612 
 613 
 614 
 615 
 616 
 617 static void
 618 vmballoon_mark_page_online(struct page *page,
 619                            enum vmballoon_page_size_type page_size)
 620 {
 621         int i;
 622 
 623         for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
 624                 __ClearPageOffline(page + i);
 625 }
 626 
 627 
 628 
 629 
 630 
 631 
 632 
 633 
 634 
 635 
 636 static int vmballoon_send_get_target(struct vmballoon *b)
 637 {
 638         unsigned long status;
 639         unsigned long limit;
 640 
 641         limit = totalram_pages();
 642 
 643         
 644         if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
 645             limit != (u32)limit)
 646                 return -EINVAL;
 647 
 648         status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
 649 
 650         return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 651 }
 652 
 653 
 654 
 655 
 656 
 657 
 658 
 659 
 660 
 661 
 662 
 663 
 664 
 665 static int vmballoon_alloc_page_list(struct vmballoon *b,
 666                                      struct vmballoon_ctl *ctl,
 667                                      unsigned int req_n_pages)
 668 {
 669         struct page *page;
 670         unsigned int i;
 671 
 672         for (i = 0; i < req_n_pages; i++) {
 673                 
 674 
 675 
 676 
 677 
 678                 if (!list_empty(&ctl->prealloc_pages)) {
 679                         page = list_first_entry(&ctl->prealloc_pages,
 680                                                 struct page, lru);
 681                         list_del(&page->lru);
 682                 } else {
 683                         if (ctl->page_size == VMW_BALLOON_2M_PAGE)
 684                                 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
 685                                         __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
 686                         else
 687                                 page = balloon_page_alloc();
 688 
 689                         vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
 690                                                  ctl->page_size);
 691                 }
 692 
 693                 if (page) {
 694                         
 695                         list_add(&page->lru, &ctl->pages);
 696                         continue;
 697                 }
 698 
 699                 
 700                 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
 701                                          ctl->page_size);
 702                 break;
 703         }
 704 
 705         ctl->n_pages = i;
 706 
 707         return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
 708 }
 709 
 710 
 711 
 712 
 713 
 714 
 715 
 716 
 717 
 718 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
 719                                        enum vmballoon_page_size_type page_size,
 720                                        unsigned long status)
 721 {
 722         
 723         if (likely(status == VMW_BALLOON_SUCCESS))
 724                 return 0;
 725 
 726         pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
 727                  page_to_pfn(page), status,
 728                  vmballoon_page_size_names[page_size]);
 729 
 730         
 731         vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
 732                                  page_size);
 733 
 734         return -EIO;
 735 }
 736 
 737 
 738 
 739 
 740 
 741 
 742 
 743 
 744 
 745 
 746 
 747 
 748 
 749 
 750 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
 751                                            struct page **p)
 752 {
 753         if (static_branch_likely(&vmw_balloon_batching)) {
 754                 
 755                 *p = pfn_to_page(b->batch_page[idx].pfn);
 756                 return b->batch_page[idx].status;
 757         }
 758 
 759         
 760         *p = b->page;
 761 
 762         
 763 
 764 
 765 
 766 
 767 
 768         return VMW_BALLOON_SUCCESS;
 769 }
 770 
 771 
 772 
 773 
 774 
 775 
 776 
 777 
 778 
 779 
 780 
 781 
 782 
 783 
 784 
 785 
 786 static unsigned long vmballoon_lock_op(struct vmballoon *b,
 787                                        unsigned int num_pages,
 788                                        enum vmballoon_page_size_type page_size,
 789                                        enum vmballoon_op op)
 790 {
 791         unsigned long cmd, pfn;
 792 
 793         lockdep_assert_held(&b->comm_lock);
 794 
 795         if (static_branch_likely(&vmw_balloon_batching)) {
 796                 if (op == VMW_BALLOON_INFLATE)
 797                         cmd = page_size == VMW_BALLOON_2M_PAGE ?
 798                                 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
 799                                 VMW_BALLOON_CMD_BATCHED_LOCK;
 800                 else
 801                         cmd = page_size == VMW_BALLOON_2M_PAGE ?
 802                                 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
 803                                 VMW_BALLOON_CMD_BATCHED_UNLOCK;
 804 
 805                 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 806         } else {
 807                 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
 808                                                   VMW_BALLOON_CMD_UNLOCK;
 809                 pfn = page_to_pfn(b->page);
 810 
 811                 
 812                 if (unlikely(pfn != (u32)pfn))
 813                         return VMW_BALLOON_ERROR_PPN_INVALID;
 814         }
 815 
 816         return vmballoon_cmd(b, cmd, pfn, num_pages);
 817 }
 818 
 819 
 820 
 821 
 822 
 823 
 824 
 825 
 826 
 827 
 828 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
 829                                struct page *p)
 830 {
 831         lockdep_assert_held(&b->comm_lock);
 832 
 833         if (static_branch_likely(&vmw_balloon_batching))
 834                 b->batch_page[idx] = (struct vmballoon_batch_entry)
 835                                         { .pfn = page_to_pfn(p) };
 836         else
 837                 b->page = p;
 838 }
 839 
 840 
 841 
 842 
 843 
 844 
 845 
 846 
 847 
 848 
 849 
 850 
 851 
 852 
 853 
 854 
 855 
 856 
 857 
 858 
 859 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
 860 {
 861         unsigned long batch_status;
 862         struct page *page;
 863         unsigned int i, num_pages;
 864 
 865         num_pages = ctl->n_pages;
 866         if (num_pages == 0)
 867                 return 0;
 868 
 869         
 870         spin_lock(&b->comm_lock);
 871 
 872         i = 0;
 873         list_for_each_entry(page, &ctl->pages, lru)
 874                 vmballoon_add_page(b, i++, page);
 875 
 876         batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
 877                                          ctl->op);
 878 
 879         
 880 
 881 
 882 
 883 
 884         for (i = 0; i < num_pages; i++) {
 885                 unsigned long status;
 886 
 887                 status = vmballoon_status_page(b, i, &page);
 888 
 889                 
 890 
 891 
 892 
 893                 if (batch_status != VMW_BALLOON_SUCCESS)
 894                         status = batch_status;
 895 
 896                 
 897                 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
 898                                                  status))
 899                         continue;
 900 
 901                 
 902 
 903 
 904 
 905                 list_move(&page->lru, &ctl->refused_pages);
 906                 ctl->n_pages--;
 907                 ctl->n_refused_pages++;
 908         }
 909 
 910         spin_unlock(&b->comm_lock);
 911 
 912         return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
 913 }
 914 
 915 
 916 
 917 
 918 
 919 
 920 
 921 
 922 
 923 
 924 static void vmballoon_release_page_list(struct list_head *page_list,
 925                                        int *n_pages,
 926                                        enum vmballoon_page_size_type page_size)
 927 {
 928         struct page *page, *tmp;
 929 
 930         list_for_each_entry_safe(page, tmp, page_list, lru) {
 931                 list_del(&page->lru);
 932                 __free_pages(page, vmballoon_page_order(page_size));
 933         }
 934 
 935         if (n_pages)
 936                 *n_pages = 0;
 937 }
 938 
 939 
 940 
 941 
 942 
 943 
 944 static void vmballoon_release_refused_pages(struct vmballoon *b,
 945                                             struct vmballoon_ctl *ctl)
 946 {
 947         vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
 948                                  ctl->page_size);
 949 
 950         vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
 951                                     ctl->page_size);
 952 }
 953 
 954 
 955 
 956 
 957 
 958 
 959 
 960 
 961 
 962 static int64_t vmballoon_change(struct vmballoon *b)
 963 {
 964         int64_t size, target;
 965 
 966         size = atomic64_read(&b->size);
 967         target = READ_ONCE(b->target);
 968 
 969         
 970 
 971 
 972 
 973 
 974         if (b->reset_required)
 975                 return 0;
 976 
 977         
 978         if (target < size && target != 0 &&
 979             size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
 980                 return 0;
 981 
 982         
 983         if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
 984                 return 0;
 985 
 986         return target - size;
 987 }
 988 
 989 
 990 
 991 
 992 
 993 
 994 
 995 
 996 
 997 
 998 
 999 
1000 static void vmballoon_enqueue_page_list(struct vmballoon *b,
1001                                         struct list_head *pages,
1002                                         unsigned int *n_pages,
1003                                         enum vmballoon_page_size_type page_size)
1004 {
1005         unsigned long flags;
1006         struct page *page;
1007 
1008         if (page_size == VMW_BALLOON_4K_PAGE) {
1009                 balloon_page_list_enqueue(&b->b_dev_info, pages);
1010         } else {
1011                 
1012 
1013 
1014 
1015                 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016 
1017                 list_for_each_entry(page, pages, lru) {
1018                         vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1019                 }
1020 
1021                 list_splice_init(pages, &b->huge_pages);
1022                 __count_vm_events(BALLOON_INFLATE, *n_pages *
1023                                   vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1024                 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1025         }
1026 
1027         *n_pages = 0;
1028 }
1029 
1030 
1031 
1032 
1033 
1034 
1035 
1036 
1037 
1038 
1039 
1040 
1041 
1042 
1043 static void vmballoon_dequeue_page_list(struct vmballoon *b,
1044                                         struct list_head *pages,
1045                                         unsigned int *n_pages,
1046                                         enum vmballoon_page_size_type page_size,
1047                                         unsigned int n_req_pages)
1048 {
1049         struct page *page, *tmp;
1050         unsigned int i = 0;
1051         unsigned long flags;
1052 
1053         
1054         if (page_size == VMW_BALLOON_4K_PAGE) {
1055                 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1056                                                      n_req_pages);
1057                 return;
1058         }
1059 
1060         
1061         spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1062         list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1063                 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1064 
1065                 list_move(&page->lru, pages);
1066                 if (++i == n_req_pages)
1067                         break;
1068         }
1069 
1070         __count_vm_events(BALLOON_DEFLATE,
1071                           i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1072         spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1073         *n_pages = i;
1074 }
1075 
1076 
1077 
1078 
1079 
1080 
1081 
1082 
1083 
1084 
1085 
1086 static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1087 {
1088         struct page *page, *tmp;
1089         unsigned int i, order;
1090 
1091         order = vmballoon_page_order(ctl->page_size);
1092 
1093         list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1094                 list_del(&page->lru);
1095                 split_page(page, order);
1096                 for (i = 0; i < (1 << order); i++)
1097                         list_add(&page[i].lru, &ctl->prealloc_pages);
1098         }
1099         ctl->n_refused_pages = 0;
1100 }
1101 
1102 
1103 
1104 
1105 
1106 
1107 static void vmballoon_inflate(struct vmballoon *b)
1108 {
1109         int64_t to_inflate_frames;
1110         struct vmballoon_ctl ctl = {
1111                 .pages = LIST_HEAD_INIT(ctl.pages),
1112                 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1113                 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1114                 .page_size = b->max_page_size,
1115                 .op = VMW_BALLOON_INFLATE
1116         };
1117 
1118         while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1119                 unsigned int to_inflate_pages, page_in_frames;
1120                 int alloc_error, lock_error = 0;
1121 
1122                 VM_BUG_ON(!list_empty(&ctl.pages));
1123                 VM_BUG_ON(ctl.n_pages != 0);
1124 
1125                 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1126 
1127                 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1128                                          DIV_ROUND_UP_ULL(to_inflate_frames,
1129                                                           page_in_frames));
1130 
1131                 
1132                 alloc_error = vmballoon_alloc_page_list(b, &ctl,
1133                                                         to_inflate_pages);
1134 
1135                 
1136                 lock_error = vmballoon_lock(b, &ctl);
1137 
1138                 
1139 
1140 
1141 
1142                 if (lock_error)
1143                         break;
1144 
1145                 
1146                 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1147 
1148                 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1149                                             ctl.page_size);
1150 
1151                 
1152 
1153 
1154 
1155                 if (alloc_error ||
1156                     ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1157                         if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1158                                 break;
1159 
1160                         
1161 
1162 
1163 
1164                         vmballoon_split_refused_pages(&ctl);
1165                         ctl.page_size--;
1166                 }
1167 
1168                 cond_resched();
1169         }
1170 
1171         
1172 
1173 
1174 
1175 
1176         if (ctl.n_refused_pages != 0)
1177                 vmballoon_release_refused_pages(b, &ctl);
1178 
1179         vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1180 }
1181 
1182 
1183 
1184 
1185 
1186 
1187 
1188 
1189 
1190 
1191 
1192 
1193 
1194 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1195                                        bool coordinated)
1196 {
1197         unsigned long deflated_frames = 0;
1198         unsigned long tried_frames = 0;
1199         struct vmballoon_ctl ctl = {
1200                 .pages = LIST_HEAD_INIT(ctl.pages),
1201                 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1202                 .page_size = VMW_BALLOON_4K_PAGE,
1203                 .op = VMW_BALLOON_DEFLATE
1204         };
1205 
1206         
1207         while (true) {
1208                 unsigned int to_deflate_pages, n_unlocked_frames;
1209                 unsigned int page_in_frames;
1210                 int64_t to_deflate_frames;
1211                 bool deflated_all;
1212 
1213                 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1214 
1215                 VM_BUG_ON(!list_empty(&ctl.pages));
1216                 VM_BUG_ON(ctl.n_pages);
1217                 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1218                 VM_BUG_ON(ctl.n_refused_pages);
1219 
1220                 
1221 
1222 
1223 
1224 
1225                 to_deflate_frames = n_frames ? n_frames - tried_frames :
1226                                                -vmballoon_change(b);
1227 
1228                 
1229                 if (to_deflate_frames <= 0)
1230                         break;
1231 
1232                 
1233 
1234 
1235 
1236                 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1237                                          DIV_ROUND_UP_ULL(to_deflate_frames,
1238                                                           page_in_frames));
1239 
1240                 
1241                 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1242                                             ctl.page_size, to_deflate_pages);
1243 
1244                 
1245 
1246 
1247 
1248                 tried_frames += ctl.n_pages * page_in_frames;
1249 
1250                 
1251 
1252 
1253 
1254 
1255 
1256 
1257                 if (coordinated)
1258                         vmballoon_lock(b, &ctl);
1259 
1260                 
1261 
1262 
1263 
1264 
1265 
1266                 deflated_all = (ctl.n_pages == to_deflate_pages);
1267 
1268                 
1269                 n_unlocked_frames = ctl.n_pages * page_in_frames;
1270                 atomic64_sub(n_unlocked_frames, &b->size);
1271                 deflated_frames += n_unlocked_frames;
1272 
1273                 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1274                                          ctl.page_size, ctl.n_pages);
1275 
1276                 
1277                 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1278                                             ctl.page_size);
1279 
1280                 
1281                 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1282                                             &ctl.n_refused_pages,
1283                                             ctl.page_size);
1284 
1285                 
1286                 if (!deflated_all) {
1287                         if (ctl.page_size == b->max_page_size)
1288                                 break;
1289                         ctl.page_size++;
1290                 }
1291 
1292                 cond_resched();
1293         }
1294 
1295         return deflated_frames;
1296 }
1297 
1298 
1299 
1300 
1301 
1302 
1303 
1304 
1305 
1306 static void vmballoon_deinit_batching(struct vmballoon *b)
1307 {
1308         free_page((unsigned long)b->batch_page);
1309         b->batch_page = NULL;
1310         static_branch_disable(&vmw_balloon_batching);
1311         b->batch_max_pages = 1;
1312 }
1313 
1314 
1315 
1316 
1317 
1318 
1319 
1320 
1321 
1322 
1323 
1324 static int vmballoon_init_batching(struct vmballoon *b)
1325 {
1326         struct page *page;
1327 
1328         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1329         if (!page)
1330                 return -ENOMEM;
1331 
1332         b->batch_page = page_address(page);
1333         b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1334 
1335         static_branch_enable(&vmw_balloon_batching);
1336 
1337         return 0;
1338 }
1339 
1340 
1341 
1342 
1343 static void vmballoon_doorbell(void *client_data)
1344 {
1345         struct vmballoon *b = client_data;
1346 
1347         vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1348 
1349         mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1350 }
1351 
1352 
1353 
1354 
1355 static void vmballoon_vmci_cleanup(struct vmballoon *b)
1356 {
1357         vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1358                       VMCI_INVALID_ID, VMCI_INVALID_ID);
1359 
1360         if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1361                 vmci_doorbell_destroy(b->vmci_doorbell);
1362                 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1363         }
1364 }
1365 
1366 
1367 
1368 
1369 
1370 
1371 
1372 
1373 
1374 
1375 
1376 static int vmballoon_vmci_init(struct vmballoon *b)
1377 {
1378         unsigned long error;
1379 
1380         if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1381                 return 0;
1382 
1383         error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1384                                      VMCI_PRIVILEGE_FLAG_RESTRICTED,
1385                                      vmballoon_doorbell, b);
1386 
1387         if (error != VMCI_SUCCESS)
1388                 goto fail;
1389 
1390         error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1391                                 b->vmci_doorbell.context,
1392                                 b->vmci_doorbell.resource, NULL);
1393 
1394         if (error != VMW_BALLOON_SUCCESS)
1395                 goto fail;
1396 
1397         return 0;
1398 fail:
1399         vmballoon_vmci_cleanup(b);
1400         return -EIO;
1401 }
1402 
1403 
1404 
1405 
1406 
1407 
1408 
1409 
1410 
1411 
1412 static void vmballoon_pop(struct vmballoon *b)
1413 {
1414         unsigned long size;
1415 
1416         while ((size = atomic64_read(&b->size)))
1417                 vmballoon_deflate(b, size, false);
1418 }
1419 
1420 
1421 
1422 
1423 
1424 
1425 static void vmballoon_reset(struct vmballoon *b)
1426 {
1427         int error;
1428 
1429         down_write(&b->conf_sem);
1430 
1431         vmballoon_vmci_cleanup(b);
1432 
1433         
1434         vmballoon_pop(b);
1435 
1436         if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1437                 goto unlock;
1438 
1439         if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1440                 if (vmballoon_init_batching(b)) {
1441                         
1442 
1443 
1444 
1445 
1446 
1447                         vmballoon_send_start(b, 0);
1448                         goto unlock;
1449                 }
1450         } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1451                 vmballoon_deinit_batching(b);
1452         }
1453 
1454         vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1455         b->reset_required = false;
1456 
1457         error = vmballoon_vmci_init(b);
1458         if (error)
1459                 pr_err("failed to initialize vmci doorbell\n");
1460 
1461         if (vmballoon_send_guest_id(b))
1462                 pr_err("failed to send guest ID to the host\n");
1463 
1464 unlock:
1465         up_write(&b->conf_sem);
1466 }
1467 
1468 
1469 
1470 
1471 
1472 
1473 
1474 
1475 
1476 static void vmballoon_work(struct work_struct *work)
1477 {
1478         struct delayed_work *dwork = to_delayed_work(work);
1479         struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1480         int64_t change = 0;
1481 
1482         if (b->reset_required)
1483                 vmballoon_reset(b);
1484 
1485         down_read(&b->conf_sem);
1486 
1487         
1488 
1489 
1490 
1491 
1492         vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1493 
1494         if (!vmballoon_send_get_target(b))
1495                 change = vmballoon_change(b);
1496 
1497         if (change != 0) {
1498                 pr_debug("%s - size: %llu, target %lu\n", __func__,
1499                          atomic64_read(&b->size), READ_ONCE(b->target));
1500 
1501                 if (change > 0)
1502                         vmballoon_inflate(b);
1503                 else  
1504                         vmballoon_deflate(b, 0, true);
1505         }
1506 
1507         up_read(&b->conf_sem);
1508 
1509         
1510 
1511 
1512 
1513         queue_delayed_work(system_freezable_wq,
1514                            dwork, round_jiffies_relative(HZ));
1515 
1516 }
1517 
1518 
1519 
1520 
1521 
1522 
1523 
1524 
1525 static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1526                                              struct shrink_control *sc)
1527 {
1528         struct vmballoon *b = &balloon;
1529         unsigned long deflated_frames;
1530 
1531         pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1532 
1533         vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1534 
1535         
1536 
1537 
1538 
1539         if (!down_read_trylock(&b->conf_sem))
1540                 return 0;
1541 
1542         deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1543 
1544         vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1545                                 deflated_frames);
1546 
1547         
1548 
1549 
1550 
1551 
1552         WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1553 
1554         up_read(&b->conf_sem);
1555 
1556         return deflated_frames;
1557 }
1558 
1559 
1560 
1561 
1562 
1563 
1564 
1565 
1566 
1567 static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1568                                               struct shrink_control *sc)
1569 {
1570         struct vmballoon *b = &balloon;
1571 
1572         return atomic64_read(&b->size);
1573 }
1574 
1575 static void vmballoon_unregister_shrinker(struct vmballoon *b)
1576 {
1577         if (b->shrinker_registered)
1578                 unregister_shrinker(&b->shrinker);
1579         b->shrinker_registered = false;
1580 }
1581 
1582 static int vmballoon_register_shrinker(struct vmballoon *b)
1583 {
1584         int r;
1585 
1586         
1587         if (!vmwballoon_shrinker_enable)
1588                 return 0;
1589 
1590         b->shrinker.scan_objects = vmballoon_shrinker_scan;
1591         b->shrinker.count_objects = vmballoon_shrinker_count;
1592         b->shrinker.seeks = DEFAULT_SEEKS;
1593 
1594         r = register_shrinker(&b->shrinker);
1595 
1596         if (r == 0)
1597                 b->shrinker_registered = true;
1598 
1599         return r;
1600 }
1601 
1602 
1603 
1604 
1605 #ifdef CONFIG_DEBUG_FS
1606 
1607 static const char * const vmballoon_stat_page_names[] = {
1608         [VMW_BALLOON_PAGE_STAT_ALLOC]           = "alloc",
1609         [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]      = "allocFail",
1610         [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]   = "errAlloc",
1611         [VMW_BALLOON_PAGE_STAT_REFUSED_FREE]    = "errFree",
1612         [VMW_BALLOON_PAGE_STAT_FREE]            = "free"
1613 };
1614 
1615 static const char * const vmballoon_stat_names[] = {
1616         [VMW_BALLOON_STAT_TIMER]                = "timer",
1617         [VMW_BALLOON_STAT_DOORBELL]             = "doorbell",
1618         [VMW_BALLOON_STAT_RESET]                = "reset",
1619         [VMW_BALLOON_STAT_SHRINK]               = "shrink",
1620         [VMW_BALLOON_STAT_SHRINK_FREE]          = "shrinkFree"
1621 };
1622 
1623 static int vmballoon_enable_stats(struct vmballoon *b)
1624 {
1625         int r = 0;
1626 
1627         down_write(&b->conf_sem);
1628 
1629         
1630         if (b->stats)
1631                 goto out;
1632 
1633         b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1634 
1635         if (!b->stats) {
1636                 
1637                 r = -ENOMEM;
1638                 goto out;
1639         }
1640         static_key_enable(&balloon_stat_enabled.key);
1641 out:
1642         up_write(&b->conf_sem);
1643         return r;
1644 }
1645 
1646 
1647 
1648 
1649 
1650 
1651 
1652 
1653 
1654 
1655 
1656 
1657 static int vmballoon_debug_show(struct seq_file *f, void *offset)
1658 {
1659         struct vmballoon *b = f->private;
1660         int i, j;
1661 
1662         
1663         if (!b->stats) {
1664                 int r = vmballoon_enable_stats(b);
1665 
1666                 if (r)
1667                         return r;
1668         }
1669 
1670         
1671         seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1672                    VMW_BALLOON_CAPABILITIES);
1673         seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1674         seq_printf(f, "%-22s: %16s\n", "is resetting",
1675                    b->reset_required ? "y" : "n");
1676 
1677         
1678         seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1679         seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1680 
1681         for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1682                 if (vmballoon_cmd_names[i] == NULL)
1683                         continue;
1684 
1685                 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1686                            vmballoon_cmd_names[i],
1687                            atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1688                            atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1689         }
1690 
1691         for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1692                 seq_printf(f, "%-22s: %16llu\n",
1693                            vmballoon_stat_names[i],
1694                            atomic64_read(&b->stats->general_stat[i]));
1695 
1696         for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1697                 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1698                         seq_printf(f, "%-18s(%s): %16llu\n",
1699                                    vmballoon_stat_page_names[i],
1700                                    vmballoon_page_size_names[j],
1701                                    atomic64_read(&b->stats->page_stat[i][j]));
1702         }
1703 
1704         return 0;
1705 }
1706 
1707 DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1708 
1709 static void __init vmballoon_debugfs_init(struct vmballoon *b)
1710 {
1711         b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1712                                            &vmballoon_debug_fops);
1713 }
1714 
1715 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1716 {
1717         static_key_disable(&balloon_stat_enabled.key);
1718         debugfs_remove(b->dbg_entry);
1719         kfree(b->stats);
1720         b->stats = NULL;
1721 }
1722 
1723 #else
1724 
1725 static inline void vmballoon_debugfs_init(struct vmballoon *b)
1726 {
1727 }
1728 
1729 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1730 {
1731 }
1732 
1733 #endif  
1734 
1735 
1736 #ifdef CONFIG_BALLOON_COMPACTION
1737 
1738 static int vmballoon_init_fs_context(struct fs_context *fc)
1739 {
1740         return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
1741 }
1742 
1743 static struct file_system_type vmballoon_fs = {
1744         .name                   = "balloon-vmware",
1745         .init_fs_context        = vmballoon_init_fs_context,
1746         .kill_sb                = kill_anon_super,
1747 };
1748 
1749 static struct vfsmount *vmballoon_mnt;
1750 
1751 
1752 
1753 
1754 
1755 
1756 
1757 
1758 
1759 
1760 
1761 
1762 
1763 
1764 
1765 static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1766                                  struct page *newpage, struct page *page,
1767                                  enum migrate_mode mode)
1768 {
1769         unsigned long status, flags;
1770         struct vmballoon *b;
1771         int ret;
1772 
1773         b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1774 
1775         
1776 
1777 
1778 
1779         if (!down_read_trylock(&b->conf_sem))
1780                 return -EAGAIN;
1781 
1782         spin_lock(&b->comm_lock);
1783         
1784 
1785 
1786 
1787 
1788 
1789 
1790         vmballoon_add_page(b, 0, page);
1791         status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1792                                    VMW_BALLOON_DEFLATE);
1793 
1794         if (status == VMW_BALLOON_SUCCESS)
1795                 status = vmballoon_status_page(b, 0, &page);
1796 
1797         
1798 
1799 
1800 
1801         if (status != VMW_BALLOON_SUCCESS) {
1802                 spin_unlock(&b->comm_lock);
1803                 ret = -EBUSY;
1804                 goto out_unlock;
1805         }
1806 
1807         
1808 
1809 
1810 
1811 
1812         balloon_page_delete(page);
1813 
1814         put_page(page);
1815 
1816         
1817         vmballoon_add_page(b, 0, newpage);
1818         status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1819                                    VMW_BALLOON_INFLATE);
1820 
1821         if (status == VMW_BALLOON_SUCCESS)
1822                 status = vmballoon_status_page(b, 0, &newpage);
1823 
1824         spin_unlock(&b->comm_lock);
1825 
1826         if (status != VMW_BALLOON_SUCCESS) {
1827                 
1828 
1829 
1830 
1831 
1832 
1833                 atomic64_dec(&b->size);
1834                 ret = -EBUSY;
1835         } else {
1836                 
1837 
1838 
1839 
1840                 get_page(newpage);
1841                 ret = MIGRATEPAGE_SUCCESS;
1842         }
1843 
1844         
1845         spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1846 
1847         
1848 
1849 
1850 
1851 
1852         if (ret == MIGRATEPAGE_SUCCESS) {
1853                 balloon_page_insert(&b->b_dev_info, newpage);
1854                 __count_vm_event(BALLOON_MIGRATE);
1855         }
1856 
1857         
1858 
1859 
1860 
1861         b->b_dev_info.isolated_pages--;
1862         spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1863 
1864 out_unlock:
1865         up_read(&b->conf_sem);
1866         return ret;
1867 }
1868 
1869 
1870 
1871 
1872 
1873 
1874 static void vmballoon_compaction_deinit(struct vmballoon *b)
1875 {
1876         if (!IS_ERR(b->b_dev_info.inode))
1877                 iput(b->b_dev_info.inode);
1878 
1879         b->b_dev_info.inode = NULL;
1880         kern_unmount(vmballoon_mnt);
1881         vmballoon_mnt = NULL;
1882 }
1883 
1884 
1885 
1886 
1887 
1888 
1889 
1890 
1891 
1892 
1893 
1894 
1895 static __init int vmballoon_compaction_init(struct vmballoon *b)
1896 {
1897         vmballoon_mnt = kern_mount(&vmballoon_fs);
1898         if (IS_ERR(vmballoon_mnt))
1899                 return PTR_ERR(vmballoon_mnt);
1900 
1901         b->b_dev_info.migratepage = vmballoon_migratepage;
1902         b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
1903 
1904         if (IS_ERR(b->b_dev_info.inode))
1905                 return PTR_ERR(b->b_dev_info.inode);
1906 
1907         b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
1908         return 0;
1909 }
1910 
1911 #else 
1912 
1913 static void vmballoon_compaction_deinit(struct vmballoon *b)
1914 {
1915 }
1916 
1917 static int vmballoon_compaction_init(struct vmballoon *b)
1918 {
1919         return 0;
1920 }
1921 
1922 #endif 
1923 
1924 static int __init vmballoon_init(void)
1925 {
1926         int error;
1927 
1928         
1929 
1930 
1931 
1932         if (x86_hyper_type != X86_HYPER_VMWARE)
1933                 return -ENODEV;
1934 
1935         INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1936 
1937         error = vmballoon_register_shrinker(&balloon);
1938         if (error)
1939                 goto fail;
1940 
1941         
1942 
1943 
1944 
1945         balloon_devinfo_init(&balloon.b_dev_info);
1946         error = vmballoon_compaction_init(&balloon);
1947         if (error)
1948                 goto fail;
1949 
1950         INIT_LIST_HEAD(&balloon.huge_pages);
1951         spin_lock_init(&balloon.comm_lock);
1952         init_rwsem(&balloon.conf_sem);
1953         balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1954         balloon.batch_page = NULL;
1955         balloon.page = NULL;
1956         balloon.reset_required = true;
1957 
1958         queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1959 
1960         vmballoon_debugfs_init(&balloon);
1961 
1962         return 0;
1963 fail:
1964         vmballoon_unregister_shrinker(&balloon);
1965         vmballoon_compaction_deinit(&balloon);
1966         return error;
1967 }
1968 
1969 
1970 
1971 
1972 
1973 
1974 
1975 late_initcall(vmballoon_init);
1976 
1977 static void __exit vmballoon_exit(void)
1978 {
1979         vmballoon_unregister_shrinker(&balloon);
1980         vmballoon_vmci_cleanup(&balloon);
1981         cancel_delayed_work_sync(&balloon.dwork);
1982 
1983         vmballoon_debugfs_exit(&balloon);
1984 
1985         
1986 
1987 
1988 
1989 
1990         vmballoon_send_start(&balloon, 0);
1991         vmballoon_pop(&balloon);
1992 
1993         
1994         vmballoon_compaction_deinit(&balloon);
1995 }
1996 module_exit(vmballoon_exit);