1/* 2 * linux/kernel/futex_compat.c 3 * 4 * Futex compatibililty routines. 5 * 6 * Copyright 2006, Red Hat, Inc., Ingo Molnar 7 */ 8 9#include <linux/linkage.h> 10#include <linux/compat.h> 11#include <linux/nsproxy.h> 12#include <linux/futex.h> 13#include <linux/ptrace.h> 14#include <linux/syscalls.h> 15 16#include <asm/uaccess.h> 17 18 19/* 20 * Fetch a robust-list pointer. Bit 0 signals PI futexes: 21 */ 22static inline int 23fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, 24 compat_uptr_t __user *head, unsigned int *pi) 25{ 26 if (get_user(*uentry, head)) 27 return -EFAULT; 28 29 *entry = compat_ptr((*uentry) & ~1); 30 *pi = (unsigned int)(*uentry) & 1; 31 32 return 0; 33} 34 35static void __user *futex_uaddr(struct robust_list __user *entry, 36 compat_long_t futex_offset) 37{ 38 compat_uptr_t base = ptr_to_compat(entry); 39 void __user *uaddr = compat_ptr(base + futex_offset); 40 41 return uaddr; 42} 43 44/* 45 * Walk curr->robust_list (very carefully, it's a userspace list!) 46 * and mark any locks found there dead, and notify any waiters. 47 * 48 * We silently return on any sign of list-walking problem. 49 */ 50void compat_exit_robust_list(struct task_struct *curr) 51{ 52 struct compat_robust_list_head __user *head = curr->compat_robust_list; 53 struct robust_list __user *entry, *next_entry, *pending; 54 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 55 unsigned int uninitialized_var(next_pi); 56 compat_uptr_t uentry, next_uentry, upending; 57 compat_long_t futex_offset; 58 int rc; 59 60 if (!futex_cmpxchg_enabled) 61 return; 62 63 /* 64 * Fetch the list head (which was registered earlier, via 65 * sys_set_robust_list()): 66 */ 67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) 68 return; 69 /* 70 * Fetch the relative futex offset: 71 */ 72 if (get_user(futex_offset, &head->futex_offset)) 73 return; 74 /* 75 * Fetch any possibly pending lock-add first, and handle it 76 * if it exists: 77 */ 78 if (fetch_robust_entry(&upending, &pending, 79 &head->list_op_pending, &pip)) 80 return; 81 82 next_entry = NULL; /* avoid warning with gcc */ 83 while (entry != (struct robust_list __user *) &head->list) { 84 /* 85 * Fetch the next entry in the list before calling 86 * handle_futex_death: 87 */ 88 rc = fetch_robust_entry(&next_uentry, &next_entry, 89 (compat_uptr_t __user *)&entry->next, &next_pi); 90 /* 91 * A pending lock might already be on the list, so 92 * dont process it twice: 93 */ 94 if (entry != pending) { 95 void __user *uaddr = futex_uaddr(entry, futex_offset); 96 97 if (handle_futex_death(uaddr, curr, pi)) 98 return; 99 } 100 if (rc) 101 return; 102 uentry = next_uentry; 103 entry = next_entry; 104 pi = next_pi; 105 /* 106 * Avoid excessively long or circular lists: 107 */ 108 if (!--limit) 109 break; 110 111 cond_resched(); 112 } 113 if (pending) { 114 void __user *uaddr = futex_uaddr(pending, futex_offset); 115 116 handle_futex_death(uaddr, curr, pip); 117 } 118} 119 120COMPAT_SYSCALL_DEFINE2(set_robust_list, 121 struct compat_robust_list_head __user *, head, 122 compat_size_t, len) 123{ 124 if (!futex_cmpxchg_enabled) 125 return -ENOSYS; 126 127 if (unlikely(len != sizeof(*head))) 128 return -EINVAL; 129 130 current->compat_robust_list = head; 131 132 return 0; 133} 134 135COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, 136 compat_uptr_t __user *, head_ptr, 137 compat_size_t __user *, len_ptr) 138{ 139 struct compat_robust_list_head __user *head; 140 unsigned long ret; 141 struct task_struct *p; 142 143 if (!futex_cmpxchg_enabled) 144 return -ENOSYS; 145 146 rcu_read_lock(); 147 148 ret = -ESRCH; 149 if (!pid) 150 p = current; 151 else { 152 p = find_task_by_vpid(pid); 153 if (!p) 154 goto err_unlock; 155 } 156 157 ret = -EPERM; 158 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) 159 goto err_unlock; 160 161 head = p->compat_robust_list; 162 rcu_read_unlock(); 163 164 if (put_user(sizeof(*head), len_ptr)) 165 return -EFAULT; 166 return put_user(ptr_to_compat(head), head_ptr); 167 168err_unlock: 169 rcu_read_unlock(); 170 171 return ret; 172} 173 174COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, 175 struct compat_timespec __user *, utime, u32 __user *, uaddr2, 176 u32, val3) 177{ 178 struct timespec ts; 179 ktime_t t, *tp = NULL; 180 int val2 = 0; 181 int cmd = op & FUTEX_CMD_MASK; 182 183 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || 184 cmd == FUTEX_WAIT_BITSET || 185 cmd == FUTEX_WAIT_REQUEUE_PI)) { 186 if (compat_get_timespec(&ts, utime)) 187 return -EFAULT; 188 if (!timespec_valid(&ts)) 189 return -EINVAL; 190 191 t = timespec_to_ktime(ts); 192 if (cmd == FUTEX_WAIT) 193 t = ktime_add_safe(ktime_get(), t); 194 tp = &t; 195 } 196 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || 197 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) 198 val2 = (int) (unsigned long) utime; 199 200 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); 201} 202