1/* 2 * Frontswap frontend 3 * 4 * This code provides the generic "frontend" layer to call a matching 5 * "backend" driver implementation of frontswap. See 6 * Documentation/vm/frontswap.txt for more information. 7 * 8 * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. 9 * Author: Dan Magenheimer 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. 12 */ 13 14#include <linux/mman.h> 15#include <linux/swap.h> 16#include <linux/swapops.h> 17#include <linux/security.h> 18#include <linux/module.h> 19#include <linux/debugfs.h> 20#include <linux/frontswap.h> 21#include <linux/swapfile.h> 22 23/* 24 * frontswap_ops is set by frontswap_register_ops to contain the pointers 25 * to the frontswap "backend" implementation functions. 26 */ 27static struct frontswap_ops *frontswap_ops __read_mostly; 28 29/* 30 * If enabled, frontswap_store will return failure even on success. As 31 * a result, the swap subsystem will always write the page to swap, in 32 * effect converting frontswap into a writethrough cache. In this mode, 33 * there is no direct reduction in swap writes, but a frontswap backend 34 * can unilaterally "reclaim" any pages in use with no data loss, thus 35 * providing increases control over maximum memory usage due to frontswap. 36 */ 37static bool frontswap_writethrough_enabled __read_mostly; 38 39/* 40 * If enabled, the underlying tmem implementation is capable of doing 41 * exclusive gets, so frontswap_load, on a successful tmem_get must 42 * mark the page as no longer in frontswap AND mark it dirty. 43 */ 44static bool frontswap_tmem_exclusive_gets_enabled __read_mostly; 45 46#ifdef CONFIG_DEBUG_FS 47/* 48 * Counters available via /sys/kernel/debug/frontswap (if debugfs is 49 * properly configured). These are for information only so are not protected 50 * against increment races. 51 */ 52static u64 frontswap_loads; 53static u64 frontswap_succ_stores; 54static u64 frontswap_failed_stores; 55static u64 frontswap_invalidates; 56 57static inline void inc_frontswap_loads(void) { 58 frontswap_loads++; 59} 60static inline void inc_frontswap_succ_stores(void) { 61 frontswap_succ_stores++; 62} 63static inline void inc_frontswap_failed_stores(void) { 64 frontswap_failed_stores++; 65} 66static inline void inc_frontswap_invalidates(void) { 67 frontswap_invalidates++; 68} 69#else 70static inline void inc_frontswap_loads(void) { } 71static inline void inc_frontswap_succ_stores(void) { } 72static inline void inc_frontswap_failed_stores(void) { } 73static inline void inc_frontswap_invalidates(void) { } 74#endif 75 76/* 77 * Due to the asynchronous nature of the backends loading potentially 78 * _after_ the swap system has been activated, we have chokepoints 79 * on all frontswap functions to not call the backend until the backend 80 * has registered. 81 * 82 * Specifically when no backend is registered (nobody called 83 * frontswap_register_ops) all calls to frontswap_init (which is done via 84 * swapon -> enable_swap_info -> frontswap_init) are registered and remembered 85 * (via the setting of need_init bitmap) but fail to create tmem_pools. When a 86 * backend registers with frontswap at some later point the previous 87 * calls to frontswap_init are executed (by iterating over the need_init 88 * bitmap) to create tmem_pools and set the respective poolids. All of that is 89 * guarded by us using atomic bit operations on the 'need_init' bitmap. 90 * 91 * This would not guards us against the user deciding to call swapoff right as 92 * we are calling the backend to initialize (so swapon is in action). 93 * Fortunatly for us, the swapon_mutex has been taked by the callee so we are 94 * OK. The other scenario where calls to frontswap_store (called via 95 * swap_writepage) is racing with frontswap_invalidate_area (called via 96 * swapoff) is again guarded by the swap subsystem. 97 * 98 * While no backend is registered all calls to frontswap_[store|load| 99 * invalidate_area|invalidate_page] are ignored or fail. 100 * 101 * The time between the backend being registered and the swap file system 102 * calling the backend (via the frontswap_* functions) is indeterminate as 103 * frontswap_ops is not atomic_t (or a value guarded by a spinlock). 104 * That is OK as we are comfortable missing some of these calls to the newly 105 * registered backend. 106 * 107 * Obviously the opposite (unloading the backend) must be done after all 108 * the frontswap_[store|load|invalidate_area|invalidate_page] start 109 * ignorning or failing the requests - at which point frontswap_ops 110 * would have to be made in some fashion atomic. 111 */ 112static DECLARE_BITMAP(need_init, MAX_SWAPFILES); 113 114/* 115 * Register operations for frontswap, returning previous thus allowing 116 * detection of multiple backends and possible nesting. 117 */ 118struct frontswap_ops *frontswap_register_ops(struct frontswap_ops *ops) 119{ 120 struct frontswap_ops *old = frontswap_ops; 121 int i; 122 123 for (i = 0; i < MAX_SWAPFILES; i++) { 124 if (test_and_clear_bit(i, need_init)) { 125 struct swap_info_struct *sis = swap_info[i]; 126 /* __frontswap_init _should_ have set it! */ 127 if (!sis->frontswap_map) 128 return ERR_PTR(-EINVAL); 129 ops->init(i); 130 } 131 } 132 /* 133 * We MUST have frontswap_ops set _after_ the frontswap_init's 134 * have been called. Otherwise __frontswap_store might fail. Hence 135 * the barrier to make sure compiler does not re-order us. 136 */ 137 barrier(); 138 frontswap_ops = ops; 139 return old; 140} 141EXPORT_SYMBOL(frontswap_register_ops); 142 143/* 144 * Enable/disable frontswap writethrough (see above). 145 */ 146void frontswap_writethrough(bool enable) 147{ 148 frontswap_writethrough_enabled = enable; 149} 150EXPORT_SYMBOL(frontswap_writethrough); 151 152/* 153 * Enable/disable frontswap exclusive gets (see above). 154 */ 155void frontswap_tmem_exclusive_gets(bool enable) 156{ 157 frontswap_tmem_exclusive_gets_enabled = enable; 158} 159EXPORT_SYMBOL(frontswap_tmem_exclusive_gets); 160 161/* 162 * Called when a swap device is swapon'd. 163 */ 164void __frontswap_init(unsigned type, unsigned long *map) 165{ 166 struct swap_info_struct *sis = swap_info[type]; 167 168 BUG_ON(sis == NULL); 169 170 /* 171 * p->frontswap is a bitmap that we MUST have to figure out which page 172 * has gone in frontswap. Without it there is no point of continuing. 173 */ 174 if (WARN_ON(!map)) 175 return; 176 /* 177 * Irregardless of whether the frontswap backend has been loaded 178 * before this function or it will be later, we _MUST_ have the 179 * p->frontswap set to something valid to work properly. 180 */ 181 frontswap_map_set(sis, map); 182 if (frontswap_ops) 183 frontswap_ops->init(type); 184 else { 185 BUG_ON(type >= MAX_SWAPFILES); 186 set_bit(type, need_init); 187 } 188} 189EXPORT_SYMBOL(__frontswap_init); 190 191bool __frontswap_test(struct swap_info_struct *sis, 192 pgoff_t offset) 193{ 194 bool ret = false; 195 196 if (frontswap_ops && sis->frontswap_map) 197 ret = test_bit(offset, sis->frontswap_map); 198 return ret; 199} 200EXPORT_SYMBOL(__frontswap_test); 201 202static inline void __frontswap_clear(struct swap_info_struct *sis, 203 pgoff_t offset) 204{ 205 clear_bit(offset, sis->frontswap_map); 206 atomic_dec(&sis->frontswap_pages); 207} 208 209/* 210 * "Store" data from a page to frontswap and associate it with the page's 211 * swaptype and offset. Page must be locked and in the swap cache. 212 * If frontswap already contains a page with matching swaptype and 213 * offset, the frontswap implementation may either overwrite the data and 214 * return success or invalidate the page from frontswap and return failure. 215 */ 216int __frontswap_store(struct page *page) 217{ 218 int ret = -1, dup = 0; 219 swp_entry_t entry = { .val = page_private(page), }; 220 int type = swp_type(entry); 221 struct swap_info_struct *sis = swap_info[type]; 222 pgoff_t offset = swp_offset(entry); 223 224 /* 225 * Return if no backend registed. 226 * Don't need to inc frontswap_failed_stores here. 227 */ 228 if (!frontswap_ops) 229 return ret; 230 231 BUG_ON(!PageLocked(page)); 232 BUG_ON(sis == NULL); 233 if (__frontswap_test(sis, offset)) 234 dup = 1; 235 ret = frontswap_ops->store(type, offset, page); 236 if (ret == 0) { 237 set_bit(offset, sis->frontswap_map); 238 inc_frontswap_succ_stores(); 239 if (!dup) 240 atomic_inc(&sis->frontswap_pages); 241 } else { 242 /* 243 failed dup always results in automatic invalidate of 244 the (older) page from frontswap 245 */ 246 inc_frontswap_failed_stores(); 247 if (dup) { 248 __frontswap_clear(sis, offset); 249 frontswap_ops->invalidate_page(type, offset); 250 } 251 } 252 if (frontswap_writethrough_enabled) 253 /* report failure so swap also writes to swap device */ 254 ret = -1; 255 return ret; 256} 257EXPORT_SYMBOL(__frontswap_store); 258 259/* 260 * "Get" data from frontswap associated with swaptype and offset that were 261 * specified when the data was put to frontswap and use it to fill the 262 * specified page with data. Page must be locked and in the swap cache. 263 */ 264int __frontswap_load(struct page *page) 265{ 266 int ret = -1; 267 swp_entry_t entry = { .val = page_private(page), }; 268 int type = swp_type(entry); 269 struct swap_info_struct *sis = swap_info[type]; 270 pgoff_t offset = swp_offset(entry); 271 272 BUG_ON(!PageLocked(page)); 273 BUG_ON(sis == NULL); 274 /* 275 * __frontswap_test() will check whether there is backend registered 276 */ 277 if (__frontswap_test(sis, offset)) 278 ret = frontswap_ops->load(type, offset, page); 279 if (ret == 0) { 280 inc_frontswap_loads(); 281 if (frontswap_tmem_exclusive_gets_enabled) { 282 SetPageDirty(page); 283 __frontswap_clear(sis, offset); 284 } 285 } 286 return ret; 287} 288EXPORT_SYMBOL(__frontswap_load); 289 290/* 291 * Invalidate any data from frontswap associated with the specified swaptype 292 * and offset so that a subsequent "get" will fail. 293 */ 294void __frontswap_invalidate_page(unsigned type, pgoff_t offset) 295{ 296 struct swap_info_struct *sis = swap_info[type]; 297 298 BUG_ON(sis == NULL); 299 /* 300 * __frontswap_test() will check whether there is backend registered 301 */ 302 if (__frontswap_test(sis, offset)) { 303 frontswap_ops->invalidate_page(type, offset); 304 __frontswap_clear(sis, offset); 305 inc_frontswap_invalidates(); 306 } 307} 308EXPORT_SYMBOL(__frontswap_invalidate_page); 309 310/* 311 * Invalidate all data from frontswap associated with all offsets for the 312 * specified swaptype. 313 */ 314void __frontswap_invalidate_area(unsigned type) 315{ 316 struct swap_info_struct *sis = swap_info[type]; 317 318 if (frontswap_ops) { 319 BUG_ON(sis == NULL); 320 if (sis->frontswap_map == NULL) 321 return; 322 frontswap_ops->invalidate_area(type); 323 atomic_set(&sis->frontswap_pages, 0); 324 bitmap_zero(sis->frontswap_map, sis->max); 325 } 326 clear_bit(type, need_init); 327} 328EXPORT_SYMBOL(__frontswap_invalidate_area); 329 330static unsigned long __frontswap_curr_pages(void) 331{ 332 unsigned long totalpages = 0; 333 struct swap_info_struct *si = NULL; 334 335 assert_spin_locked(&swap_lock); 336 plist_for_each_entry(si, &swap_active_head, list) 337 totalpages += atomic_read(&si->frontswap_pages); 338 return totalpages; 339} 340 341static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, 342 int *swapid) 343{ 344 int ret = -EINVAL; 345 struct swap_info_struct *si = NULL; 346 int si_frontswap_pages; 347 unsigned long total_pages_to_unuse = total; 348 unsigned long pages = 0, pages_to_unuse = 0; 349 350 assert_spin_locked(&swap_lock); 351 plist_for_each_entry(si, &swap_active_head, list) { 352 si_frontswap_pages = atomic_read(&si->frontswap_pages); 353 if (total_pages_to_unuse < si_frontswap_pages) { 354 pages = pages_to_unuse = total_pages_to_unuse; 355 } else { 356 pages = si_frontswap_pages; 357 pages_to_unuse = 0; /* unuse all */ 358 } 359 /* ensure there is enough RAM to fetch pages from frontswap */ 360 if (security_vm_enough_memory_mm(current->mm, pages)) { 361 ret = -ENOMEM; 362 continue; 363 } 364 vm_unacct_memory(pages); 365 *unused = pages_to_unuse; 366 *swapid = si->type; 367 ret = 0; 368 break; 369 } 370 371 return ret; 372} 373 374/* 375 * Used to check if it's necessory and feasible to unuse pages. 376 * Return 1 when nothing to do, 0 when need to shink pages, 377 * error code when there is an error. 378 */ 379static int __frontswap_shrink(unsigned long target_pages, 380 unsigned long *pages_to_unuse, 381 int *type) 382{ 383 unsigned long total_pages = 0, total_pages_to_unuse; 384 385 assert_spin_locked(&swap_lock); 386 387 total_pages = __frontswap_curr_pages(); 388 if (total_pages <= target_pages) { 389 /* Nothing to do */ 390 *pages_to_unuse = 0; 391 return 1; 392 } 393 total_pages_to_unuse = total_pages - target_pages; 394 return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type); 395} 396 397/* 398 * Frontswap, like a true swap device, may unnecessarily retain pages 399 * under certain circumstances; "shrink" frontswap is essentially a 400 * "partial swapoff" and works by calling try_to_unuse to attempt to 401 * unuse enough frontswap pages to attempt to -- subject to memory 402 * constraints -- reduce the number of pages in frontswap to the 403 * number given in the parameter target_pages. 404 */ 405void frontswap_shrink(unsigned long target_pages) 406{ 407 unsigned long pages_to_unuse = 0; 408 int uninitialized_var(type), ret; 409 410 /* 411 * we don't want to hold swap_lock while doing a very 412 * lengthy try_to_unuse, but swap_list may change 413 * so restart scan from swap_active_head each time 414 */ 415 spin_lock(&swap_lock); 416 ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type); 417 spin_unlock(&swap_lock); 418 if (ret == 0) 419 try_to_unuse(type, true, pages_to_unuse); 420 return; 421} 422EXPORT_SYMBOL(frontswap_shrink); 423 424/* 425 * Count and return the number of frontswap pages across all 426 * swap devices. This is exported so that backend drivers can 427 * determine current usage without reading debugfs. 428 */ 429unsigned long frontswap_curr_pages(void) 430{ 431 unsigned long totalpages = 0; 432 433 spin_lock(&swap_lock); 434 totalpages = __frontswap_curr_pages(); 435 spin_unlock(&swap_lock); 436 437 return totalpages; 438} 439EXPORT_SYMBOL(frontswap_curr_pages); 440 441static int __init init_frontswap(void) 442{ 443#ifdef CONFIG_DEBUG_FS 444 struct dentry *root = debugfs_create_dir("frontswap", NULL); 445 if (root == NULL) 446 return -ENXIO; 447 debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); 448 debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); 449 debugfs_create_u64("failed_stores", S_IRUGO, root, 450 &frontswap_failed_stores); 451 debugfs_create_u64("invalidates", S_IRUGO, 452 root, &frontswap_invalidates); 453#endif 454 return 0; 455} 456 457module_init(init_frontswap); 458