root/kernel/livepatch/patch.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. klp_find_ops
  2. klp_ftrace_handler
  3. klp_get_ftrace_location
  4. klp_unpatch_func
  5. klp_patch_func
  6. __klp_unpatch_object
  7. klp_unpatch_object
  8. klp_patch_object
  9. __klp_unpatch_objects
  10. klp_unpatch_objects
  11. klp_unpatch_objects_dynamic

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * patch.c - livepatch patching functions
   4  *
   5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   6  * Copyright (C) 2014 SUSE
   7  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
   8  */
   9 
  10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11 
  12 #include <linux/livepatch.h>
  13 #include <linux/list.h>
  14 #include <linux/ftrace.h>
  15 #include <linux/rculist.h>
  16 #include <linux/slab.h>
  17 #include <linux/bug.h>
  18 #include <linux/printk.h>
  19 #include "core.h"
  20 #include "patch.h"
  21 #include "transition.h"
  22 
  23 static LIST_HEAD(klp_ops);
  24 
  25 struct klp_ops *klp_find_ops(void *old_func)
  26 {
  27         struct klp_ops *ops;
  28         struct klp_func *func;
  29 
  30         list_for_each_entry(ops, &klp_ops, node) {
  31                 func = list_first_entry(&ops->func_stack, struct klp_func,
  32                                         stack_node);
  33                 if (func->old_func == old_func)
  34                         return ops;
  35         }
  36 
  37         return NULL;
  38 }
  39 
  40 static void notrace klp_ftrace_handler(unsigned long ip,
  41                                        unsigned long parent_ip,
  42                                        struct ftrace_ops *fops,
  43                                        struct pt_regs *regs)
  44 {
  45         struct klp_ops *ops;
  46         struct klp_func *func;
  47         int patch_state;
  48 
  49         ops = container_of(fops, struct klp_ops, fops);
  50 
  51         /*
  52          * A variant of synchronize_rcu() is used to allow patching functions
  53          * where RCU is not watching, see klp_synchronize_transition().
  54          */
  55         preempt_disable_notrace();
  56 
  57         func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  58                                       stack_node);
  59 
  60         /*
  61          * func should never be NULL because preemption should be disabled here
  62          * and unregister_ftrace_function() does the equivalent of a
  63          * synchronize_rcu() before the func_stack removal.
  64          */
  65         if (WARN_ON_ONCE(!func))
  66                 goto unlock;
  67 
  68         /*
  69          * In the enable path, enforce the order of the ops->func_stack and
  70          * func->transition reads.  The corresponding write barrier is in
  71          * __klp_enable_patch().
  72          *
  73          * (Note that this barrier technically isn't needed in the disable
  74          * path.  In the rare case where klp_update_patch_state() runs before
  75          * this handler, its TIF_PATCH_PENDING read and this func->transition
  76          * read need to be ordered.  But klp_update_patch_state() already
  77          * enforces that.)
  78          */
  79         smp_rmb();
  80 
  81         if (unlikely(func->transition)) {
  82 
  83                 /*
  84                  * Enforce the order of the func->transition and
  85                  * current->patch_state reads.  Otherwise we could read an
  86                  * out-of-date task state and pick the wrong function.  The
  87                  * corresponding write barrier is in klp_init_transition().
  88                  */
  89                 smp_rmb();
  90 
  91                 patch_state = current->patch_state;
  92 
  93                 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
  94 
  95                 if (patch_state == KLP_UNPATCHED) {
  96                         /*
  97                          * Use the previously patched version of the function.
  98                          * If no previous patches exist, continue with the
  99                          * original function.
 100                          */
 101                         func = list_entry_rcu(func->stack_node.next,
 102                                               struct klp_func, stack_node);
 103 
 104                         if (&func->stack_node == &ops->func_stack)
 105                                 goto unlock;
 106                 }
 107         }
 108 
 109         /*
 110          * NOPs are used to replace existing patches with original code.
 111          * Do nothing! Setting pc would cause an infinite loop.
 112          */
 113         if (func->nop)
 114                 goto unlock;
 115 
 116         klp_arch_set_pc(regs, (unsigned long)func->new_func);
 117 
 118 unlock:
 119         preempt_enable_notrace();
 120 }
 121 
 122 /*
 123  * Convert a function address into the appropriate ftrace location.
 124  *
 125  * Usually this is just the address of the function, but on some architectures
 126  * it's more complicated so allow them to provide a custom behaviour.
 127  */
 128 #ifndef klp_get_ftrace_location
 129 static unsigned long klp_get_ftrace_location(unsigned long faddr)
 130 {
 131         return faddr;
 132 }
 133 #endif
 134 
 135 static void klp_unpatch_func(struct klp_func *func)
 136 {
 137         struct klp_ops *ops;
 138 
 139         if (WARN_ON(!func->patched))
 140                 return;
 141         if (WARN_ON(!func->old_func))
 142                 return;
 143 
 144         ops = klp_find_ops(func->old_func);
 145         if (WARN_ON(!ops))
 146                 return;
 147 
 148         if (list_is_singular(&ops->func_stack)) {
 149                 unsigned long ftrace_loc;
 150 
 151                 ftrace_loc =
 152                         klp_get_ftrace_location((unsigned long)func->old_func);
 153                 if (WARN_ON(!ftrace_loc))
 154                         return;
 155 
 156                 WARN_ON(unregister_ftrace_function(&ops->fops));
 157                 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
 158 
 159                 list_del_rcu(&func->stack_node);
 160                 list_del(&ops->node);
 161                 kfree(ops);
 162         } else {
 163                 list_del_rcu(&func->stack_node);
 164         }
 165 
 166         func->patched = false;
 167 }
 168 
 169 static int klp_patch_func(struct klp_func *func)
 170 {
 171         struct klp_ops *ops;
 172         int ret;
 173 
 174         if (WARN_ON(!func->old_func))
 175                 return -EINVAL;
 176 
 177         if (WARN_ON(func->patched))
 178                 return -EINVAL;
 179 
 180         ops = klp_find_ops(func->old_func);
 181         if (!ops) {
 182                 unsigned long ftrace_loc;
 183 
 184                 ftrace_loc =
 185                         klp_get_ftrace_location((unsigned long)func->old_func);
 186                 if (!ftrace_loc) {
 187                         pr_err("failed to find location for function '%s'\n",
 188                                 func->old_name);
 189                         return -EINVAL;
 190                 }
 191 
 192                 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 193                 if (!ops)
 194                         return -ENOMEM;
 195 
 196                 ops->fops.func = klp_ftrace_handler;
 197                 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 198                                   FTRACE_OPS_FL_DYNAMIC |
 199                                   FTRACE_OPS_FL_IPMODIFY;
 200 
 201                 list_add(&ops->node, &klp_ops);
 202 
 203                 INIT_LIST_HEAD(&ops->func_stack);
 204                 list_add_rcu(&func->stack_node, &ops->func_stack);
 205 
 206                 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
 207                 if (ret) {
 208                         pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 209                                func->old_name, ret);
 210                         goto err;
 211                 }
 212 
 213                 ret = register_ftrace_function(&ops->fops);
 214                 if (ret) {
 215                         pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 216                                func->old_name, ret);
 217                         ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
 218                         goto err;
 219                 }
 220 
 221 
 222         } else {
 223                 list_add_rcu(&func->stack_node, &ops->func_stack);
 224         }
 225 
 226         func->patched = true;
 227 
 228         return 0;
 229 
 230 err:
 231         list_del_rcu(&func->stack_node);
 232         list_del(&ops->node);
 233         kfree(ops);
 234         return ret;
 235 }
 236 
 237 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
 238 {
 239         struct klp_func *func;
 240 
 241         klp_for_each_func(obj, func) {
 242                 if (nops_only && !func->nop)
 243                         continue;
 244 
 245                 if (func->patched)
 246                         klp_unpatch_func(func);
 247         }
 248 
 249         if (obj->dynamic || !nops_only)
 250                 obj->patched = false;
 251 }
 252 
 253 
 254 void klp_unpatch_object(struct klp_object *obj)
 255 {
 256         __klp_unpatch_object(obj, false);
 257 }
 258 
 259 int klp_patch_object(struct klp_object *obj)
 260 {
 261         struct klp_func *func;
 262         int ret;
 263 
 264         if (WARN_ON(obj->patched))
 265                 return -EINVAL;
 266 
 267         klp_for_each_func(obj, func) {
 268                 ret = klp_patch_func(func);
 269                 if (ret) {
 270                         klp_unpatch_object(obj);
 271                         return ret;
 272                 }
 273         }
 274         obj->patched = true;
 275 
 276         return 0;
 277 }
 278 
 279 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
 280 {
 281         struct klp_object *obj;
 282 
 283         klp_for_each_object(patch, obj)
 284                 if (obj->patched)
 285                         __klp_unpatch_object(obj, nops_only);
 286 }
 287 
 288 void klp_unpatch_objects(struct klp_patch *patch)
 289 {
 290         __klp_unpatch_objects(patch, false);
 291 }
 292 
 293 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
 294 {
 295         __klp_unpatch_objects(patch, true);
 296 }

/* [<][>][^][v][top][bottom][index][help] */