root/arch/ia64/include/asm/ptrace.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. user_stack_pointer
  2. is_syscall_success
  3. regs_return_value

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Copyright (C) 1998-2004 Hewlett-Packard Co
   4  *      David Mosberger-Tang <davidm@hpl.hp.com>
   5  *      Stephane Eranian <eranian@hpl.hp.com>
   6  * Copyright (C) 2003 Intel Co
   7  *      Suresh Siddha <suresh.b.siddha@intel.com>
   8  *      Fenghua Yu <fenghua.yu@intel.com>
   9  *      Arun Sharma <arun.sharma@intel.com>
  10  *
  11  * 12/07/98     S. Eranian      added pt_regs & switch_stack
  12  * 12/21/98     D. Mosberger    updated to match latest code
  13  *  6/17/99     D. Mosberger    added second unat member to "struct switch_stack"
  14  *
  15  */
  16 #ifndef _ASM_IA64_PTRACE_H
  17 #define _ASM_IA64_PTRACE_H
  18 
  19 #ifndef ASM_OFFSETS_C
  20 #include <asm/asm-offsets.h>
  21 #endif
  22 #include <uapi/asm/ptrace.h>
  23 
  24 /*
  25  * Base-2 logarithm of number of pages to allocate per task structure
  26  * (including register backing store and memory stack):
  27  */
  28 #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
  29 # define KERNEL_STACK_SIZE_ORDER                3
  30 #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
  31 # define KERNEL_STACK_SIZE_ORDER                2
  32 #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
  33 # define KERNEL_STACK_SIZE_ORDER                1
  34 #else
  35 # define KERNEL_STACK_SIZE_ORDER                0
  36 #endif
  37 
  38 #define IA64_RBS_OFFSET                 ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
  39 #define IA64_STK_OFFSET                 ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
  40 
  41 #define KERNEL_STACK_SIZE               IA64_STK_OFFSET
  42 
  43 #ifndef __ASSEMBLY__
  44 
  45 #include <asm/current.h>
  46 #include <asm/page.h>
  47 
  48 /*
  49  * We use the ia64_psr(regs)->ri to determine which of the three
  50  * instructions in bundle (16 bytes) took the sample. Generate
  51  * the canonical representation by adding to instruction pointer.
  52  */
  53 # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
  54 
  55 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
  56 {
  57         /* FIXME: should this be bspstore + nr_dirty regs? */
  58         return regs->ar_bspstore;
  59 }
  60 
  61 static inline int is_syscall_success(struct pt_regs *regs)
  62 {
  63         return regs->r10 != -1;
  64 }
  65 
  66 static inline long regs_return_value(struct pt_regs *regs)
  67 {
  68         if (is_syscall_success(regs))
  69                 return regs->r8;
  70         else
  71                 return -regs->r8;
  72 }
  73 
  74 /* Conserve space in histogram by encoding slot bits in address
  75  * bits 2 and 3 rather than bits 0 and 1.
  76  */
  77 #define profile_pc(regs)                                                \
  78 ({                                                                      \
  79         unsigned long __ip = instruction_pointer(regs);                 \
  80         (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
  81 })
  82 /*
  83  * Why not default?  Because user_stack_pointer() on ia64 gives register
  84  * stack backing store instead...
  85  */
  86 #define current_user_stack_pointer() (current_pt_regs()->r12)
  87 
  88   /* given a pointer to a task_struct, return the user's pt_regs */
  89 # define task_pt_regs(t)                (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
  90 # define ia64_psr(regs)                 ((struct ia64_psr *) &(regs)->cr_ipsr)
  91 # define user_mode(regs)                (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
  92 # define user_stack(task,regs)  ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
  93 # define fsys_mode(task,regs)                                   \
  94   ({                                                            \
  95           struct task_struct *_task = (task);                   \
  96           struct pt_regs *_regs = (regs);                       \
  97           !user_mode(_regs) && user_stack(_task, _regs);        \
  98   })
  99 
 100   /*
 101    * System call handlers that, upon successful completion, need to return a negative value
 102    * should call force_successful_syscall_return() right before returning.  On architectures
 103    * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
 104    * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
 105    * flag will not get set.  On architectures which do not support a separate error flag,
 106    * the macro is a no-op and the spurious error condition needs to be filtered out by some
 107    * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
 108    * or something along those lines).
 109    *
 110    * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
 111    */
 112 # define force_successful_syscall_return()      (task_pt_regs(current)->r8 = 0)
 113 
 114   struct task_struct;                   /* forward decl */
 115   struct unw_frame_info;                /* forward decl */
 116 
 117   extern void ia64_do_show_stack (struct unw_frame_info *, void *);
 118   extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
 119                                               unsigned long *);
 120   extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
 121                          unsigned long, long *);
 122   extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
 123                          unsigned long, long);
 124   extern void ia64_flush_fph (struct task_struct *);
 125   extern void ia64_sync_fph (struct task_struct *);
 126   extern void ia64_sync_krbs(void);
 127   extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
 128                                   unsigned long, unsigned long);
 129 
 130   /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
 131   extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
 132   /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
 133   extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
 134 
 135   extern void ia64_increment_ip (struct pt_regs *pt);
 136   extern void ia64_decrement_ip (struct pt_regs *pt);
 137 
 138   extern void ia64_ptrace_stop(void);
 139   #define arch_ptrace_stop(code, info) \
 140         ia64_ptrace_stop()
 141   #define arch_ptrace_stop_needed(code, info) \
 142         (!test_thread_flag(TIF_RESTORE_RSE))
 143 
 144   extern void ptrace_attach_sync_user_rbs (struct task_struct *);
 145   #define arch_ptrace_attach(child) \
 146         ptrace_attach_sync_user_rbs(child)
 147 
 148   #define arch_has_single_step()  (1)
 149   #define arch_has_block_step()   (1)
 150 
 151 #endif /* !__ASSEMBLY__ */
 152 #endif /* _ASM_IA64_PTRACE_H */

/* [<][>][^][v][top][bottom][index][help] */