root/arch/x86/kernel/jump_label.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. bug_at
  2. __jump_label_set_jump_code
  3. __jump_label_transform
  4. arch_jump_label_transform
  5. arch_jump_label_transform_queue
  6. arch_jump_label_transform_apply
  7. arch_jump_label_transform_static

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * jump label x86 support
   4  *
   5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
   6  *
   7  */
   8 #include <linux/jump_label.h>
   9 #include <linux/memory.h>
  10 #include <linux/uaccess.h>
  11 #include <linux/module.h>
  12 #include <linux/list.h>
  13 #include <linux/jhash.h>
  14 #include <linux/cpu.h>
  15 #include <asm/kprobes.h>
  16 #include <asm/alternative.h>
  17 #include <asm/text-patching.h>
  18 
  19 union jump_code_union {
  20         char code[JUMP_LABEL_NOP_SIZE];
  21         struct {
  22                 char jump;
  23                 int offset;
  24         } __attribute__((packed));
  25 };
  26 
  27 static void bug_at(unsigned char *ip, int line)
  28 {
  29         /*
  30          * The location is not an op that we were expecting.
  31          * Something went wrong. Crash the box, as something could be
  32          * corrupting the kernel.
  33          */
  34         pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
  35         BUG();
  36 }
  37 
  38 static void __jump_label_set_jump_code(struct jump_entry *entry,
  39                                        enum jump_label_type type,
  40                                        union jump_code_union *code,
  41                                        int init)
  42 {
  43         const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
  44         const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
  45         const void *expect;
  46         int line;
  47 
  48         code->jump = 0xe9;
  49         code->offset = jump_entry_target(entry) -
  50                        (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
  51 
  52         if (init) {
  53                 expect = default_nop; line = __LINE__;
  54         } else if (type == JUMP_LABEL_JMP) {
  55                 expect = ideal_nop; line = __LINE__;
  56         } else {
  57                 expect = code->code; line = __LINE__;
  58         }
  59 
  60         if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
  61                 bug_at((void *)jump_entry_code(entry), line);
  62 
  63         if (type == JUMP_LABEL_NOP)
  64                 memcpy(code, ideal_nop, JUMP_LABEL_NOP_SIZE);
  65 }
  66 
  67 static void __ref __jump_label_transform(struct jump_entry *entry,
  68                                          enum jump_label_type type,
  69                                          int init)
  70 {
  71         union jump_code_union code;
  72 
  73         __jump_label_set_jump_code(entry, type, &code, init);
  74 
  75         /*
  76          * As long as only a single processor is running and the code is still
  77          * not marked as RO, text_poke_early() can be used; Checking that
  78          * system_state is SYSTEM_BOOTING guarantees it. It will be set to
  79          * SYSTEM_SCHEDULING before other cores are awaken and before the
  80          * code is write-protected.
  81          *
  82          * At the time the change is being done, just ignore whether we
  83          * are doing nop -> jump or jump -> nop transition, and assume
  84          * always nop being the 'currently valid' instruction
  85          */
  86         if (init || system_state == SYSTEM_BOOTING) {
  87                 text_poke_early((void *)jump_entry_code(entry), &code,
  88                                 JUMP_LABEL_NOP_SIZE);
  89                 return;
  90         }
  91 
  92         text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE,
  93                      (void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
  94 }
  95 
  96 void arch_jump_label_transform(struct jump_entry *entry,
  97                                enum jump_label_type type)
  98 {
  99         mutex_lock(&text_mutex);
 100         __jump_label_transform(entry, type, 0);
 101         mutex_unlock(&text_mutex);
 102 }
 103 
 104 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
 105 static struct text_poke_loc tp_vec[TP_VEC_MAX];
 106 static int tp_vec_nr;
 107 
 108 bool arch_jump_label_transform_queue(struct jump_entry *entry,
 109                                      enum jump_label_type type)
 110 {
 111         struct text_poke_loc *tp;
 112         void *entry_code;
 113 
 114         if (system_state == SYSTEM_BOOTING) {
 115                 /*
 116                  * Fallback to the non-batching mode.
 117                  */
 118                 arch_jump_label_transform(entry, type);
 119                 return true;
 120         }
 121 
 122         /*
 123          * No more space in the vector, tell upper layer to apply
 124          * the queue before continuing.
 125          */
 126         if (tp_vec_nr == TP_VEC_MAX)
 127                 return false;
 128 
 129         tp = &tp_vec[tp_vec_nr];
 130 
 131         entry_code = (void *)jump_entry_code(entry);
 132 
 133         /*
 134          * The INT3 handler will do a bsearch in the queue, so we need entries
 135          * to be sorted. We can survive an unsorted list by rejecting the entry,
 136          * forcing the generic jump_label code to apply the queue. Warning once,
 137          * to raise the attention to the case of an unsorted entry that is
 138          * better not happen, because, in the worst case we will perform in the
 139          * same way as we do without batching - with some more overhead.
 140          */
 141         if (tp_vec_nr > 0) {
 142                 int prev = tp_vec_nr - 1;
 143                 struct text_poke_loc *prev_tp = &tp_vec[prev];
 144 
 145                 if (WARN_ON_ONCE(prev_tp->addr > entry_code))
 146                         return false;
 147         }
 148 
 149         __jump_label_set_jump_code(entry, type,
 150                                    (union jump_code_union *) &tp->opcode, 0);
 151 
 152         tp->addr = entry_code;
 153         tp->detour = entry_code + JUMP_LABEL_NOP_SIZE;
 154         tp->len = JUMP_LABEL_NOP_SIZE;
 155 
 156         tp_vec_nr++;
 157 
 158         return true;
 159 }
 160 
 161 void arch_jump_label_transform_apply(void)
 162 {
 163         if (!tp_vec_nr)
 164                 return;
 165 
 166         mutex_lock(&text_mutex);
 167         text_poke_bp_batch(tp_vec, tp_vec_nr);
 168         mutex_unlock(&text_mutex);
 169 
 170         tp_vec_nr = 0;
 171 }
 172 
 173 static enum {
 174         JL_STATE_START,
 175         JL_STATE_NO_UPDATE,
 176         JL_STATE_UPDATE,
 177 } jlstate __initdata_or_module = JL_STATE_START;
 178 
 179 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
 180                                       enum jump_label_type type)
 181 {
 182         /*
 183          * This function is called at boot up and when modules are
 184          * first loaded. Check if the default nop, the one that is
 185          * inserted at compile time, is the ideal nop. If it is, then
 186          * we do not need to update the nop, and we can leave it as is.
 187          * If it is not, then we need to update the nop to the ideal nop.
 188          */
 189         if (jlstate == JL_STATE_START) {
 190                 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 191                 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 192 
 193                 if (memcmp(ideal_nop, default_nop, 5) != 0)
 194                         jlstate = JL_STATE_UPDATE;
 195                 else
 196                         jlstate = JL_STATE_NO_UPDATE;
 197         }
 198         if (jlstate == JL_STATE_UPDATE)
 199                 __jump_label_transform(entry, type, 1);
 200 }

/* [<][>][^][v][top][bottom][index][help] */