root/kernel/locking/semaphore.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. down
  2. down_interruptible
  3. down_killable
  4. down_trylock
  5. down_timeout
  6. up
  7. __down_common
  8. __down
  9. __down_interruptible
  10. __down_killable
  11. __down_timeout
  12. __up

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2008 Intel Corporation
   4  * Author: Matthew Wilcox <willy@linux.intel.com>
   5  *
   6  * This file implements counting semaphores.
   7  * A counting semaphore may be acquired 'n' times before sleeping.
   8  * See mutex.c for single-acquisition sleeping locks which enforce
   9  * rules which allow code to be debugged more easily.
  10  */
  11 
  12 /*
  13  * Some notes on the implementation:
  14  *
  15  * The spinlock controls access to the other members of the semaphore.
  16  * down_trylock() and up() can be called from interrupt context, so we
  17  * have to disable interrupts when taking the lock.  It turns out various
  18  * parts of the kernel expect to be able to use down() on a semaphore in
  19  * interrupt context when they know it will succeed, so we have to use
  20  * irqsave variants for down(), down_interruptible() and down_killable()
  21  * too.
  22  *
  23  * The ->count variable represents how many more tasks can acquire this
  24  * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
  25  */
  26 
  27 #include <linux/compiler.h>
  28 #include <linux/kernel.h>
  29 #include <linux/export.h>
  30 #include <linux/sched.h>
  31 #include <linux/sched/debug.h>
  32 #include <linux/semaphore.h>
  33 #include <linux/spinlock.h>
  34 #include <linux/ftrace.h>
  35 
  36 static noinline void __down(struct semaphore *sem);
  37 static noinline int __down_interruptible(struct semaphore *sem);
  38 static noinline int __down_killable(struct semaphore *sem);
  39 static noinline int __down_timeout(struct semaphore *sem, long timeout);
  40 static noinline void __up(struct semaphore *sem);
  41 
  42 /**
  43  * down - acquire the semaphore
  44  * @sem: the semaphore to be acquired
  45  *
  46  * Acquires the semaphore.  If no more tasks are allowed to acquire the
  47  * semaphore, calling this function will put the task to sleep until the
  48  * semaphore is released.
  49  *
  50  * Use of this function is deprecated, please use down_interruptible() or
  51  * down_killable() instead.
  52  */
  53 void down(struct semaphore *sem)
  54 {
  55         unsigned long flags;
  56 
  57         raw_spin_lock_irqsave(&sem->lock, flags);
  58         if (likely(sem->count > 0))
  59                 sem->count--;
  60         else
  61                 __down(sem);
  62         raw_spin_unlock_irqrestore(&sem->lock, flags);
  63 }
  64 EXPORT_SYMBOL(down);
  65 
  66 /**
  67  * down_interruptible - acquire the semaphore unless interrupted
  68  * @sem: the semaphore to be acquired
  69  *
  70  * Attempts to acquire the semaphore.  If no more tasks are allowed to
  71  * acquire the semaphore, calling this function will put the task to sleep.
  72  * If the sleep is interrupted by a signal, this function will return -EINTR.
  73  * If the semaphore is successfully acquired, this function returns 0.
  74  */
  75 int down_interruptible(struct semaphore *sem)
  76 {
  77         unsigned long flags;
  78         int result = 0;
  79 
  80         raw_spin_lock_irqsave(&sem->lock, flags);
  81         if (likely(sem->count > 0))
  82                 sem->count--;
  83         else
  84                 result = __down_interruptible(sem);
  85         raw_spin_unlock_irqrestore(&sem->lock, flags);
  86 
  87         return result;
  88 }
  89 EXPORT_SYMBOL(down_interruptible);
  90 
  91 /**
  92  * down_killable - acquire the semaphore unless killed
  93  * @sem: the semaphore to be acquired
  94  *
  95  * Attempts to acquire the semaphore.  If no more tasks are allowed to
  96  * acquire the semaphore, calling this function will put the task to sleep.
  97  * If the sleep is interrupted by a fatal signal, this function will return
  98  * -EINTR.  If the semaphore is successfully acquired, this function returns
  99  * 0.
 100  */
 101 int down_killable(struct semaphore *sem)
 102 {
 103         unsigned long flags;
 104         int result = 0;
 105 
 106         raw_spin_lock_irqsave(&sem->lock, flags);
 107         if (likely(sem->count > 0))
 108                 sem->count--;
 109         else
 110                 result = __down_killable(sem);
 111         raw_spin_unlock_irqrestore(&sem->lock, flags);
 112 
 113         return result;
 114 }
 115 EXPORT_SYMBOL(down_killable);
 116 
 117 /**
 118  * down_trylock - try to acquire the semaphore, without waiting
 119  * @sem: the semaphore to be acquired
 120  *
 121  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
 122  * been acquired successfully or 1 if it it cannot be acquired.
 123  *
 124  * NOTE: This return value is inverted from both spin_trylock and
 125  * mutex_trylock!  Be careful about this when converting code.
 126  *
 127  * Unlike mutex_trylock, this function can be used from interrupt context,
 128  * and the semaphore can be released by any task or interrupt.
 129  */
 130 int down_trylock(struct semaphore *sem)
 131 {
 132         unsigned long flags;
 133         int count;
 134 
 135         raw_spin_lock_irqsave(&sem->lock, flags);
 136         count = sem->count - 1;
 137         if (likely(count >= 0))
 138                 sem->count = count;
 139         raw_spin_unlock_irqrestore(&sem->lock, flags);
 140 
 141         return (count < 0);
 142 }
 143 EXPORT_SYMBOL(down_trylock);
 144 
 145 /**
 146  * down_timeout - acquire the semaphore within a specified time
 147  * @sem: the semaphore to be acquired
 148  * @timeout: how long to wait before failing
 149  *
 150  * Attempts to acquire the semaphore.  If no more tasks are allowed to
 151  * acquire the semaphore, calling this function will put the task to sleep.
 152  * If the semaphore is not released within the specified number of jiffies,
 153  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
 154  */
 155 int down_timeout(struct semaphore *sem, long timeout)
 156 {
 157         unsigned long flags;
 158         int result = 0;
 159 
 160         raw_spin_lock_irqsave(&sem->lock, flags);
 161         if (likely(sem->count > 0))
 162                 sem->count--;
 163         else
 164                 result = __down_timeout(sem, timeout);
 165         raw_spin_unlock_irqrestore(&sem->lock, flags);
 166 
 167         return result;
 168 }
 169 EXPORT_SYMBOL(down_timeout);
 170 
 171 /**
 172  * up - release the semaphore
 173  * @sem: the semaphore to release
 174  *
 175  * Release the semaphore.  Unlike mutexes, up() may be called from any
 176  * context and even by tasks which have never called down().
 177  */
 178 void up(struct semaphore *sem)
 179 {
 180         unsigned long flags;
 181 
 182         raw_spin_lock_irqsave(&sem->lock, flags);
 183         if (likely(list_empty(&sem->wait_list)))
 184                 sem->count++;
 185         else
 186                 __up(sem);
 187         raw_spin_unlock_irqrestore(&sem->lock, flags);
 188 }
 189 EXPORT_SYMBOL(up);
 190 
 191 /* Functions for the contended case */
 192 
 193 struct semaphore_waiter {
 194         struct list_head list;
 195         struct task_struct *task;
 196         bool up;
 197 };
 198 
 199 /*
 200  * Because this function is inlined, the 'state' parameter will be
 201  * constant, and thus optimised away by the compiler.  Likewise the
 202  * 'timeout' parameter for the cases without timeouts.
 203  */
 204 static inline int __sched __down_common(struct semaphore *sem, long state,
 205                                                                 long timeout)
 206 {
 207         struct semaphore_waiter waiter;
 208 
 209         list_add_tail(&waiter.list, &sem->wait_list);
 210         waiter.task = current;
 211         waiter.up = false;
 212 
 213         for (;;) {
 214                 if (signal_pending_state(state, current))
 215                         goto interrupted;
 216                 if (unlikely(timeout <= 0))
 217                         goto timed_out;
 218                 __set_current_state(state);
 219                 raw_spin_unlock_irq(&sem->lock);
 220                 timeout = schedule_timeout(timeout);
 221                 raw_spin_lock_irq(&sem->lock);
 222                 if (waiter.up)
 223                         return 0;
 224         }
 225 
 226  timed_out:
 227         list_del(&waiter.list);
 228         return -ETIME;
 229 
 230  interrupted:
 231         list_del(&waiter.list);
 232         return -EINTR;
 233 }
 234 
 235 static noinline void __sched __down(struct semaphore *sem)
 236 {
 237         __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 238 }
 239 
 240 static noinline int __sched __down_interruptible(struct semaphore *sem)
 241 {
 242         return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 243 }
 244 
 245 static noinline int __sched __down_killable(struct semaphore *sem)
 246 {
 247         return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
 248 }
 249 
 250 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
 251 {
 252         return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
 253 }
 254 
 255 static noinline void __sched __up(struct semaphore *sem)
 256 {
 257         struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
 258                                                 struct semaphore_waiter, list);
 259         list_del(&waiter->list);
 260         waiter->up = true;
 261         wake_up_process(waiter->task);
 262 }

/* [<][>][^][v][top][bottom][index][help] */