root/arch/x86/kernel/step.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. convert_ip_to_linear
  2. is_setting_trap_flag
  3. enable_single_step
  4. set_task_blockstep
  5. enable_step
  6. user_enable_single_step
  7. user_enable_block_step
  8. user_disable_single_step

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * x86 single-step support code, common to 32-bit and 64-bit.
   4  */
   5 #include <linux/sched.h>
   6 #include <linux/sched/task_stack.h>
   7 #include <linux/mm.h>
   8 #include <linux/ptrace.h>
   9 #include <asm/desc.h>
  10 #include <asm/mmu_context.h>
  11 
  12 unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
  13 {
  14         unsigned long addr, seg;
  15 
  16         addr = regs->ip;
  17         seg = regs->cs;
  18         if (v8086_mode(regs)) {
  19                 addr = (addr & 0xffff) + (seg << 4);
  20                 return addr;
  21         }
  22 
  23 #ifdef CONFIG_MODIFY_LDT_SYSCALL
  24         /*
  25          * We'll assume that the code segments in the GDT
  26          * are all zero-based. That is largely true: the
  27          * TLS segments are used for data, and the PNPBIOS
  28          * and APM bios ones we just ignore here.
  29          */
  30         if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
  31                 struct desc_struct *desc;
  32                 unsigned long base;
  33 
  34                 seg >>= 3;
  35 
  36                 mutex_lock(&child->mm->context.lock);
  37                 if (unlikely(!child->mm->context.ldt ||
  38                              seg >= child->mm->context.ldt->nr_entries))
  39                         addr = -1L; /* bogus selector, access would fault */
  40                 else {
  41                         desc = &child->mm->context.ldt->entries[seg];
  42                         base = get_desc_base(desc);
  43 
  44                         /* 16-bit code segment? */
  45                         if (!desc->d)
  46                                 addr &= 0xffff;
  47                         addr += base;
  48                 }
  49                 mutex_unlock(&child->mm->context.lock);
  50         }
  51 #endif
  52 
  53         return addr;
  54 }
  55 
  56 static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
  57 {
  58         int i, copied;
  59         unsigned char opcode[15];
  60         unsigned long addr = convert_ip_to_linear(child, regs);
  61 
  62         copied = access_process_vm(child, addr, opcode, sizeof(opcode),
  63                         FOLL_FORCE);
  64         for (i = 0; i < copied; i++) {
  65                 switch (opcode[i]) {
  66                 /* popf and iret */
  67                 case 0x9d: case 0xcf:
  68                         return 1;
  69 
  70                         /* CHECKME: 64 65 */
  71 
  72                 /* opcode and address size prefixes */
  73                 case 0x66: case 0x67:
  74                         continue;
  75                 /* irrelevant prefixes (segment overrides and repeats) */
  76                 case 0x26: case 0x2e:
  77                 case 0x36: case 0x3e:
  78                 case 0x64: case 0x65:
  79                 case 0xf0: case 0xf2: case 0xf3:
  80                         continue;
  81 
  82 #ifdef CONFIG_X86_64
  83                 case 0x40 ... 0x4f:
  84                         if (!user_64bit_mode(regs))
  85                                 /* 32-bit mode: register increment */
  86                                 return 0;
  87                         /* 64-bit mode: REX prefix */
  88                         continue;
  89 #endif
  90 
  91                         /* CHECKME: f2, f3 */
  92 
  93                 /*
  94                  * pushf: NOTE! We should probably not let
  95                  * the user see the TF bit being set. But
  96                  * it's more pain than it's worth to avoid
  97                  * it, and a debugger could emulate this
  98                  * all in user space if it _really_ cares.
  99                  */
 100                 case 0x9c:
 101                 default:
 102                         return 0;
 103                 }
 104         }
 105         return 0;
 106 }
 107 
 108 /*
 109  * Enable single-stepping.  Return nonzero if user mode is not using TF itself.
 110  */
 111 static int enable_single_step(struct task_struct *child)
 112 {
 113         struct pt_regs *regs = task_pt_regs(child);
 114         unsigned long oflags;
 115 
 116         /*
 117          * If we stepped into a sysenter/syscall insn, it trapped in
 118          * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
 119          * If user-mode had set TF itself, then it's still clear from
 120          * do_debug() and we need to set it again to restore the user
 121          * state so we don't wrongly set TIF_FORCED_TF below.
 122          * If enable_single_step() was used last and that is what
 123          * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
 124          * already set and our bookkeeping is fine.
 125          */
 126         if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
 127                 regs->flags |= X86_EFLAGS_TF;
 128 
 129         /*
 130          * Always set TIF_SINGLESTEP - this guarantees that
 131          * we single-step system calls etc..  This will also
 132          * cause us to set TF when returning to user mode.
 133          */
 134         set_tsk_thread_flag(child, TIF_SINGLESTEP);
 135 
 136         oflags = regs->flags;
 137 
 138         /* Set TF on the kernel stack.. */
 139         regs->flags |= X86_EFLAGS_TF;
 140 
 141         /*
 142          * ..but if TF is changed by the instruction we will trace,
 143          * don't mark it as being "us" that set it, so that we
 144          * won't clear it by hand later.
 145          *
 146          * Note that if we don't actually execute the popf because
 147          * of a signal arriving right now or suchlike, we will lose
 148          * track of the fact that it really was "us" that set it.
 149          */
 150         if (is_setting_trap_flag(child, regs)) {
 151                 clear_tsk_thread_flag(child, TIF_FORCED_TF);
 152                 return 0;
 153         }
 154 
 155         /*
 156          * If TF was already set, check whether it was us who set it.
 157          * If not, we should never attempt a block step.
 158          */
 159         if (oflags & X86_EFLAGS_TF)
 160                 return test_tsk_thread_flag(child, TIF_FORCED_TF);
 161 
 162         set_tsk_thread_flag(child, TIF_FORCED_TF);
 163 
 164         return 1;
 165 }
 166 
 167 void set_task_blockstep(struct task_struct *task, bool on)
 168 {
 169         unsigned long debugctl;
 170 
 171         /*
 172          * Ensure irq/preemption can't change debugctl in between.
 173          * Note also that both TIF_BLOCKSTEP and debugctl should
 174          * be changed atomically wrt preemption.
 175          *
 176          * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
 177          * task is current or it can't be running, otherwise we can race
 178          * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
 179          * PTRACE_KILL is not safe.
 180          */
 181         local_irq_disable();
 182         debugctl = get_debugctlmsr();
 183         if (on) {
 184                 debugctl |= DEBUGCTLMSR_BTF;
 185                 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
 186         } else {
 187                 debugctl &= ~DEBUGCTLMSR_BTF;
 188                 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
 189         }
 190         if (task == current)
 191                 update_debugctlmsr(debugctl);
 192         local_irq_enable();
 193 }
 194 
 195 /*
 196  * Enable single or block step.
 197  */
 198 static void enable_step(struct task_struct *child, bool block)
 199 {
 200         /*
 201          * Make sure block stepping (BTF) is not enabled unless it should be.
 202          * Note that we don't try to worry about any is_setting_trap_flag()
 203          * instructions after the first when using block stepping.
 204          * So no one should try to use debugger block stepping in a program
 205          * that uses user-mode single stepping itself.
 206          */
 207         if (enable_single_step(child) && block)
 208                 set_task_blockstep(child, true);
 209         else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
 210                 set_task_blockstep(child, false);
 211 }
 212 
 213 void user_enable_single_step(struct task_struct *child)
 214 {
 215         enable_step(child, 0);
 216 }
 217 
 218 void user_enable_block_step(struct task_struct *child)
 219 {
 220         enable_step(child, 1);
 221 }
 222 
 223 void user_disable_single_step(struct task_struct *child)
 224 {
 225         /*
 226          * Make sure block stepping (BTF) is disabled.
 227          */
 228         if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
 229                 set_task_blockstep(child, false);
 230 
 231         /* Always clear TIF_SINGLESTEP... */
 232         clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 233 
 234         /* But touch TF only if it was set by us.. */
 235         if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
 236                 task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
 237 }

/* [<][>][^][v][top][bottom][index][help] */