root/mm/interval_tree.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vma_start_pgoff
  2. vma_last_pgoff
  3. INTERVAL_TREE_DEFINE
  4. avc_start_pgoff
  5. avc_last_pgoff
  6. INTERVAL_TREE_DEFINE
  7. anon_vma_interval_tree_remove
  8. anon_vma_interval_tree_iter_first
  9. anon_vma_interval_tree_iter_next
  10. anon_vma_interval_tree_verify

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * mm/interval_tree.c - interval tree for mapping->i_mmap
   4  *
   5  * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
   6  */
   7 
   8 #include <linux/mm.h>
   9 #include <linux/fs.h>
  10 #include <linux/rmap.h>
  11 #include <linux/interval_tree_generic.h>
  12 
  13 static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
  14 {
  15         return v->vm_pgoff;
  16 }
  17 
  18 static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
  19 {
  20         return v->vm_pgoff + vma_pages(v) - 1;
  21 }
  22 
  23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
  24                      unsigned long, shared.rb_subtree_last,
  25                      vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
  26 
  27 /* Insert node immediately after prev in the interval tree */
  28 void vma_interval_tree_insert_after(struct vm_area_struct *node,
  29                                     struct vm_area_struct *prev,
  30                                     struct rb_root_cached *root)
  31 {
  32         struct rb_node **link;
  33         struct vm_area_struct *parent;
  34         unsigned long last = vma_last_pgoff(node);
  35 
  36         VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
  37 
  38         if (!prev->shared.rb.rb_right) {
  39                 parent = prev;
  40                 link = &prev->shared.rb.rb_right;
  41         } else {
  42                 parent = rb_entry(prev->shared.rb.rb_right,
  43                                   struct vm_area_struct, shared.rb);
  44                 if (parent->shared.rb_subtree_last < last)
  45                         parent->shared.rb_subtree_last = last;
  46                 while (parent->shared.rb.rb_left) {
  47                         parent = rb_entry(parent->shared.rb.rb_left,
  48                                 struct vm_area_struct, shared.rb);
  49                         if (parent->shared.rb_subtree_last < last)
  50                                 parent->shared.rb_subtree_last = last;
  51                 }
  52                 link = &parent->shared.rb.rb_left;
  53         }
  54 
  55         node->shared.rb_subtree_last = last;
  56         rb_link_node(&node->shared.rb, &parent->shared.rb, link);
  57         rb_insert_augmented(&node->shared.rb, &root->rb_root,
  58                             &vma_interval_tree_augment);
  59 }
  60 
  61 static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
  62 {
  63         return vma_start_pgoff(avc->vma);
  64 }
  65 
  66 static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
  67 {
  68         return vma_last_pgoff(avc->vma);
  69 }
  70 
  71 INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
  72                      avc_start_pgoff, avc_last_pgoff,
  73                      static inline, __anon_vma_interval_tree)
  74 
  75 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
  76                                    struct rb_root_cached *root)
  77 {
  78 #ifdef CONFIG_DEBUG_VM_RB
  79         node->cached_vma_start = avc_start_pgoff(node);
  80         node->cached_vma_last = avc_last_pgoff(node);
  81 #endif
  82         __anon_vma_interval_tree_insert(node, root);
  83 }
  84 
  85 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
  86                                    struct rb_root_cached *root)
  87 {
  88         __anon_vma_interval_tree_remove(node, root);
  89 }
  90 
  91 struct anon_vma_chain *
  92 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
  93                                   unsigned long first, unsigned long last)
  94 {
  95         return __anon_vma_interval_tree_iter_first(root, first, last);
  96 }
  97 
  98 struct anon_vma_chain *
  99 anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
 100                                  unsigned long first, unsigned long last)
 101 {
 102         return __anon_vma_interval_tree_iter_next(node, first, last);
 103 }
 104 
 105 #ifdef CONFIG_DEBUG_VM_RB
 106 void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
 107 {
 108         WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
 109         WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
 110 }
 111 #endif

/* [<][>][^][v][top][bottom][index][help] */