LCOV - code coverage report
Current view: top level - include/linux - ptrace.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 21 23 91.3 %
Date: 2014-02-18 Functions: 0 0 -
Branches: 14 26 53.8 %

           Branch data     Line data    Source code
       1                 :            : #ifndef _LINUX_PTRACE_H
       2                 :            : #define _LINUX_PTRACE_H
       3                 :            : 
       4                 :            : #include <linux/compiler.h>               /* For unlikely.  */
       5                 :            : #include <linux/sched.h>          /* For struct task_struct.  */
       6                 :            : #include <linux/err.h>                    /* for IS_ERR_VALUE */
       7                 :            : #include <linux/bug.h>                    /* For BUG_ON.  */
       8                 :            : #include <uapi/linux/ptrace.h>
       9                 :            : 
      10                 :            : /*
      11                 :            :  * Ptrace flags
      12                 :            :  *
      13                 :            :  * The owner ship rules for task->ptrace which holds the ptrace
      14                 :            :  * flags is simple.  When a task is running it owns it's task->ptrace
      15                 :            :  * flags.  When the a task is stopped the ptracer owns task->ptrace.
      16                 :            :  */
      17                 :            : 
      18                 :            : #define PT_SEIZED       0x00010000      /* SEIZE used, enable new behavior */
      19                 :            : #define PT_PTRACED      0x00000001
      20                 :            : #define PT_DTRACE       0x00000002      /* delayed trace (used on m68k, i386) */
      21                 :            : #define PT_PTRACE_CAP   0x00000004      /* ptracer can follow suid-exec */
      22                 :            : 
      23                 :            : #define PT_OPT_FLAG_SHIFT       3
      24                 :            : /* PT_TRACE_* event enable flags */
      25                 :            : #define PT_EVENT_FLAG(event)    (1 << (PT_OPT_FLAG_SHIFT + (event)))
      26                 :            : #define PT_TRACESYSGOOD         PT_EVENT_FLAG(0)
      27                 :            : #define PT_TRACE_FORK           PT_EVENT_FLAG(PTRACE_EVENT_FORK)
      28                 :            : #define PT_TRACE_VFORK          PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
      29                 :            : #define PT_TRACE_CLONE          PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
      30                 :            : #define PT_TRACE_EXEC           PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
      31                 :            : #define PT_TRACE_VFORK_DONE     PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
      32                 :            : #define PT_TRACE_EXIT           PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
      33                 :            : #define PT_TRACE_SECCOMP        PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
      34                 :            : 
      35                 :            : #define PT_EXITKILL             (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
      36                 :            : 
      37                 :            : /* single stepping state bits (used on ARM and PA-RISC) */
      38                 :            : #define PT_SINGLESTEP_BIT       31
      39                 :            : #define PT_SINGLESTEP           (1<<PT_SINGLESTEP_BIT)
      40                 :            : #define PT_BLOCKSTEP_BIT        30
      41                 :            : #define PT_BLOCKSTEP            (1<<PT_BLOCKSTEP_BIT)
      42                 :            : 
      43                 :            : extern long arch_ptrace(struct task_struct *child, long request,
      44                 :            :                         unsigned long addr, unsigned long data);
      45                 :            : extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
      46                 :            : extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
      47                 :            : extern void ptrace_disable(struct task_struct *);
      48                 :            : extern int ptrace_request(struct task_struct *child, long request,
      49                 :            :                           unsigned long addr, unsigned long data);
      50                 :            : extern void ptrace_notify(int exit_code);
      51                 :            : extern void __ptrace_link(struct task_struct *child,
      52                 :            :                           struct task_struct *new_parent);
      53                 :            : extern void __ptrace_unlink(struct task_struct *child);
      54                 :            : extern void exit_ptrace(struct task_struct *tracer);
      55                 :            : #define PTRACE_MODE_READ        0x01
      56                 :            : #define PTRACE_MODE_ATTACH      0x02
      57                 :            : #define PTRACE_MODE_NOAUDIT     0x04
      58                 :            : /* Returns true on success, false on denial. */
      59                 :            : extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
      60                 :            : 
      61                 :            : static inline int ptrace_reparented(struct task_struct *child)
      62                 :            : {
      63                 :    2298643 :         return !same_thread_group(child->real_parent, child->parent);
      64                 :            : }
      65                 :            : 
      66                 :            : static inline void ptrace_unlink(struct task_struct *child)
      67                 :            : {
      68 [ #  # ][ +  + ]:    1151924 :         if (unlikely(child->ptrace))
      69                 :          7 :                 __ptrace_unlink(child);
      70                 :            : }
      71                 :            : 
      72                 :            : int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
      73                 :            :                             unsigned long data);
      74                 :            : int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
      75                 :            :                             unsigned long data);
      76                 :            : 
      77                 :            : /**
      78                 :            :  * ptrace_parent - return the task that is tracing the given task
      79                 :            :  * @task: task to consider
      80                 :            :  *
      81                 :            :  * Returns %NULL if no one is tracing @task, or the &struct task_struct
      82                 :            :  * pointer to its tracer.
      83                 :            :  *
      84                 :            :  * Must called under rcu_read_lock().  The pointer returned might be kept
      85                 :            :  * live only by RCU.  During exec, this may be called with task_lock() held
      86                 :            :  * on @task, still held from when check_unsafe_exec() was called.
      87                 :            :  */
      88                 :            : static inline struct task_struct *ptrace_parent(struct task_struct *task)
      89                 :            : {
      90         [ -  + ]:      13099 :         if (unlikely(task->ptrace))
           [ #  #  #  # ]
      91                 :          0 :                 return rcu_dereference(task->parent);
      92                 :            :         return NULL;
      93                 :            : }
      94                 :            : 
      95                 :            : /**
      96                 :            :  * ptrace_event_enabled - test whether a ptrace event is enabled
      97                 :            :  * @task: ptracee of interest
      98                 :            :  * @event: %PTRACE_EVENT_* to test
      99                 :            :  *
     100                 :            :  * Test whether @event is enabled for ptracee @task.
     101                 :            :  *
     102                 :            :  * Returns %true if @event is enabled, %false otherwise.
     103                 :            :  */
     104                 :            : static inline bool ptrace_event_enabled(struct task_struct *task, int event)
     105                 :            : {
     106                 :    2374097 :         return task->ptrace & PT_EVENT_FLAG(event);
     107                 :            : }
     108                 :            : 
     109                 :            : /**
     110                 :            :  * ptrace_event - possibly stop for a ptrace event notification
     111                 :            :  * @event:      %PTRACE_EVENT_* value to report
     112                 :            :  * @message:    value for %PTRACE_GETEVENTMSG to return
     113                 :            :  *
     114                 :            :  * Check whether @event is enabled and, if so, report @event and @message
     115                 :            :  * to the ptrace parent.
     116                 :            :  *
     117                 :            :  * Called without locks.
     118                 :            :  */
     119                 :            : static inline void ptrace_event(int event, unsigned long message)
     120                 :            : {
     121 [ +  + ][ -  + ]:    1222675 :         if (unlikely(ptrace_event_enabled(current, event))) {
     122                 :         63 :                 current->ptrace_message = message;
     123                 :    1151987 :                 ptrace_notify((event << 8) | SIGTRAP);
     124                 :            :         } else if (event == PTRACE_EVENT_EXEC) {
     125                 :            :                 /* legacy EXEC report via SIGTRAP */
     126         [ +  + ]:      58945 :                 if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
     127                 :         32 :                         send_sig(SIGTRAP, current, 0);
     128                 :            :         }
     129                 :            : }
     130                 :            : 
     131                 :            : /**
     132                 :            :  * ptrace_init_task - initialize ptrace state for a new child
     133                 :            :  * @child:              new child task
     134                 :            :  * @ptrace:             true if child should be ptrace'd by parent's tracer
     135                 :            :  *
     136                 :            :  * This is called immediately after adding @child to its parent's children
     137                 :            :  * list.  @ptrace is false in the normal case, and true to ptrace @child.
     138                 :            :  *
     139                 :            :  * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
     140                 :            :  */
     141                 :            : static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
     142                 :            : {
     143                 :    1151924 :         INIT_LIST_HEAD(&child->ptrace_entry);
     144                 :    1151924 :         INIT_LIST_HEAD(&child->ptraced);
     145                 :    1151924 :         child->jobctl = 0;
     146                 :    1151924 :         child->ptrace = 0;
     147                 :    1151924 :         child->parent = child->real_parent;
     148                 :            : 
     149 [ +  + ][ +  - ]:    1151924 :         if (unlikely(ptrace) && current->ptrace) {
     150                 :         32 :                 child->ptrace = current->ptrace;
     151                 :         32 :                 __ptrace_link(child, current->parent);
     152                 :            : 
     153         [ -  + ]:         32 :                 if (child->ptrace & PT_SEIZED)
     154                 :          0 :                         task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
     155                 :            :                 else
     156                 :            :                         sigaddset(&child->pending.signal, SIGSTOP);
     157                 :            : 
     158                 :            :                 set_tsk_thread_flag(child, TIF_SIGPENDING);
     159                 :            :         }
     160                 :            : }
     161                 :            : 
     162                 :            : /**
     163                 :            :  * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
     164                 :            :  * @task:       task in %EXIT_DEAD state
     165                 :            :  *
     166                 :            :  * Called with write_lock(&tasklist_lock) held.
     167                 :            :  */
     168                 :            : static inline void ptrace_release_task(struct task_struct *task)
     169                 :            : {
     170         [ -  + ]:    1151924 :         BUG_ON(!list_empty(&task->ptraced));
     171                 :            :         ptrace_unlink(task);
     172         [ -  + ]:    1151924 :         BUG_ON(!list_empty(&task->ptrace_entry));
     173                 :            : }
     174                 :            : 
     175                 :            : #ifndef force_successful_syscall_return
     176                 :            : /*
     177                 :            :  * System call handlers that, upon successful completion, need to return a
     178                 :            :  * negative value should call force_successful_syscall_return() right before
     179                 :            :  * returning.  On architectures where the syscall convention provides for a
     180                 :            :  * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
     181                 :            :  * others), this macro can be used to ensure that the error flag will not get
     182                 :            :  * set.  On architectures which do not support a separate error flag, the macro
     183                 :            :  * is a no-op and the spurious error condition needs to be filtered out by some
     184                 :            :  * other means (e.g., in user-level, by passing an extra argument to the
     185                 :            :  * syscall handler, or something along those lines).
     186                 :            :  */
     187                 :            : #define force_successful_syscall_return() do { } while (0)
     188                 :            : #endif
     189                 :            : 
     190                 :            : #ifndef is_syscall_success
     191                 :            : /*
     192                 :            :  * On most systems we can tell if a syscall is a success based on if the retval
     193                 :            :  * is an error value.  On some systems like ia64 and powerpc they have different
     194                 :            :  * indicators of success/failure and must define their own.
     195                 :            :  */
     196                 :            : #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
     197                 :            : #endif
     198                 :            : 
     199                 :            : /*
     200                 :            :  * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
     201                 :            :  *
     202                 :            :  * These do-nothing inlines are used when the arch does not
     203                 :            :  * implement single-step.  The kerneldoc comments are here
     204                 :            :  * to document the interface for all arch definitions.
     205                 :            :  */
     206                 :            : 
     207                 :            : #ifndef arch_has_single_step
     208                 :            : /**
     209                 :            :  * arch_has_single_step - does this CPU support user-mode single-step?
     210                 :            :  *
     211                 :            :  * If this is defined, then there must be function declarations or
     212                 :            :  * inlines for user_enable_single_step() and user_disable_single_step().
     213                 :            :  * arch_has_single_step() should evaluate to nonzero iff the machine
     214                 :            :  * supports instruction single-step for user mode.
     215                 :            :  * It can be a constant or it can test a CPU feature bit.
     216                 :            :  */
     217                 :            : #define arch_has_single_step()          (0)
     218                 :            : 
     219                 :            : /**
     220                 :            :  * user_enable_single_step - single-step in user-mode task
     221                 :            :  * @task: either current or a task stopped in %TASK_TRACED
     222                 :            :  *
     223                 :            :  * This can only be called when arch_has_single_step() has returned nonzero.
     224                 :            :  * Set @task so that when it returns to user mode, it will trap after the
     225                 :            :  * next single instruction executes.  If arch_has_block_step() is defined,
     226                 :            :  * this must clear the effects of user_enable_block_step() too.
     227                 :            :  */
     228                 :            : static inline void user_enable_single_step(struct task_struct *task)
     229                 :            : {
     230                 :            :         BUG();                  /* This can never be called.  */
     231                 :            : }
     232                 :            : 
     233                 :            : /**
     234                 :            :  * user_disable_single_step - cancel user-mode single-step
     235                 :            :  * @task: either current or a task stopped in %TASK_TRACED
     236                 :            :  *
     237                 :            :  * Clear @task of the effects of user_enable_single_step() and
     238                 :            :  * user_enable_block_step().  This can be called whether or not either
     239                 :            :  * of those was ever called on @task, and even if arch_has_single_step()
     240                 :            :  * returned zero.
     241                 :            :  */
     242                 :            : static inline void user_disable_single_step(struct task_struct *task)
     243                 :            : {
     244                 :            : }
     245                 :            : #else
     246                 :            : extern void user_enable_single_step(struct task_struct *);
     247                 :            : extern void user_disable_single_step(struct task_struct *);
     248                 :            : #endif  /* arch_has_single_step */
     249                 :            : 
     250                 :            : #ifndef arch_has_block_step
     251                 :            : /**
     252                 :            :  * arch_has_block_step - does this CPU support user-mode block-step?
     253                 :            :  *
     254                 :            :  * If this is defined, then there must be a function declaration or inline
     255                 :            :  * for user_enable_block_step(), and arch_has_single_step() must be defined
     256                 :            :  * too.  arch_has_block_step() should evaluate to nonzero iff the machine
     257                 :            :  * supports step-until-branch for user mode.  It can be a constant or it
     258                 :            :  * can test a CPU feature bit.
     259                 :            :  */
     260                 :            : #define arch_has_block_step()           (0)
     261                 :            : 
     262                 :            : /**
     263                 :            :  * user_enable_block_step - step until branch in user-mode task
     264                 :            :  * @task: either current or a task stopped in %TASK_TRACED
     265                 :            :  *
     266                 :            :  * This can only be called when arch_has_block_step() has returned nonzero,
     267                 :            :  * and will never be called when single-instruction stepping is being used.
     268                 :            :  * Set @task so that when it returns to user mode, it will trap after the
     269                 :            :  * next branch or trap taken.
     270                 :            :  */
     271                 :            : static inline void user_enable_block_step(struct task_struct *task)
     272                 :            : {
     273                 :            :         BUG();                  /* This can never be called.  */
     274                 :            : }
     275                 :            : #else
     276                 :            : extern void user_enable_block_step(struct task_struct *);
     277                 :            : #endif  /* arch_has_block_step */
     278                 :            : 
     279                 :            : #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
     280                 :            : extern void user_single_step_siginfo(struct task_struct *tsk,
     281                 :            :                                 struct pt_regs *regs, siginfo_t *info);
     282                 :            : #else
     283                 :            : static inline void user_single_step_siginfo(struct task_struct *tsk,
     284                 :            :                                 struct pt_regs *regs, siginfo_t *info)
     285                 :            : {
     286                 :            :         memset(info, 0, sizeof(*info));
     287                 :            :         info->si_signo = SIGTRAP;
     288                 :            : }
     289                 :            : #endif
     290                 :            : 
     291                 :            : #ifndef arch_ptrace_stop_needed
     292                 :            : /**
     293                 :            :  * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
     294                 :            :  * @code:       current->exit_code value ptrace will stop with
     295                 :            :  * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
     296                 :            :  *
     297                 :            :  * This is called with the siglock held, to decide whether or not it's
     298                 :            :  * necessary to release the siglock and call arch_ptrace_stop() with the
     299                 :            :  * same @code and @info arguments.  It can be defined to a constant if
     300                 :            :  * arch_ptrace_stop() is never required, or always is.  On machines where
     301                 :            :  * this makes sense, it should be defined to a quick test to optimize out
     302                 :            :  * calling arch_ptrace_stop() when it would be superfluous.  For example,
     303                 :            :  * if the thread has not been back to user mode since the last stop, the
     304                 :            :  * thread state might indicate that nothing needs to be done.
     305                 :            :  */
     306                 :            : #define arch_ptrace_stop_needed(code, info)     (0)
     307                 :            : #endif
     308                 :            : 
     309                 :            : #ifndef arch_ptrace_stop
     310                 :            : /**
     311                 :            :  * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
     312                 :            :  * @code:       current->exit_code value ptrace will stop with
     313                 :            :  * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
     314                 :            :  *
     315                 :            :  * This is called with no locks held when arch_ptrace_stop_needed() has
     316                 :            :  * just returned nonzero.  It is allowed to block, e.g. for user memory
     317                 :            :  * access.  The arch can have machine-specific work to be done before
     318                 :            :  * ptrace stops.  On ia64, register backing store gets written back to user
     319                 :            :  * memory here.  Since this can be costly (requires dropping the siglock),
     320                 :            :  * we only do it when the arch requires it for this particular stop, as
     321                 :            :  * indicated by arch_ptrace_stop_needed().
     322                 :            :  */
     323                 :            : #define arch_ptrace_stop(code, info)            do { } while (0)
     324                 :            : #endif
     325                 :            : 
     326                 :            : #ifndef current_pt_regs
     327                 :            : #define current_pt_regs() task_pt_regs(current)
     328                 :            : #endif
     329                 :            : 
     330                 :            : #ifndef ptrace_signal_deliver
     331                 :            : #define ptrace_signal_deliver() ((void)0)
     332                 :            : #endif
     333                 :            : 
     334                 :            : /*
     335                 :            :  * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
     336                 :            :  * on *all* architectures; the only reason to have a per-arch definition
     337                 :            :  * is optimisation.
     338                 :            :  */
     339                 :            : #ifndef signal_pt_regs
     340                 :            : #define signal_pt_regs() task_pt_regs(current)
     341                 :            : #endif
     342                 :            : 
     343                 :            : #ifndef current_user_stack_pointer
     344                 :            : #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
     345                 :            : #endif
     346                 :            : 
     347                 :            : extern int task_current_syscall(struct task_struct *target, long *callno,
     348                 :            :                                 unsigned long args[6], unsigned int maxargs,
     349                 :            :                                 unsigned long *sp, unsigned long *pc);
     350                 :            : 
     351                 :            : #endif

Generated by: LCOV version 1.9