root/arch/hexagon/include/asm/spinlock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. arch_read_lock
  2. arch_read_unlock
  3. arch_read_trylock
  4. arch_write_lock
  5. arch_write_trylock
  6. arch_write_unlock
  7. arch_spin_lock
  8. arch_spin_unlock
  9. arch_spin_trylock

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Spinlock support for the Hexagon architecture
   4  *
   5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
   6  */
   7 
   8 #ifndef _ASM_SPINLOCK_H
   9 #define _ASM_SPINLOCK_H
  10 
  11 #include <asm/irqflags.h>
  12 #include <asm/barrier.h>
  13 #include <asm/processor.h>
  14 
  15 /*
  16  * This file is pulled in for SMP builds.
  17  * Really need to check all the barrier stuff for "true" SMP
  18  */
  19 
  20 /*
  21  * Read locks:
  22  * - load the lock value
  23  * - increment it
  24  * - if the lock value is still negative, go back and try again.
  25  * - unsuccessful store is unsuccessful.  Go back and try again.  Loser.
  26  * - successful store new lock value if positive -> lock acquired
  27  */
  28 static inline void arch_read_lock(arch_rwlock_t *lock)
  29 {
  30         __asm__ __volatile__(
  31                 "1:     R6 = memw_locked(%0);\n"
  32                 "       { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  33                 "       { if (!P3) jump 1b; }\n"
  34                 "       memw_locked(%0,P3) = R6;\n"
  35                 "       { if (!P3) jump 1b; }\n"
  36                 :
  37                 : "r" (&lock->lock)
  38                 : "memory", "r6", "p3"
  39         );
  40 
  41 }
  42 
  43 static inline void arch_read_unlock(arch_rwlock_t *lock)
  44 {
  45         __asm__ __volatile__(
  46                 "1:     R6 = memw_locked(%0);\n"
  47                 "       R6 = add(R6,#-1);\n"
  48                 "       memw_locked(%0,P3) = R6\n"
  49                 "       if (!P3) jump 1b;\n"
  50                 :
  51                 : "r" (&lock->lock)
  52                 : "memory", "r6", "p3"
  53         );
  54 
  55 }
  56 
  57 /*  I think this returns 0 on fail, 1 on success.  */
  58 static inline int arch_read_trylock(arch_rwlock_t *lock)
  59 {
  60         int temp;
  61         __asm__ __volatile__(
  62                 "       R6 = memw_locked(%1);\n"
  63                 "       { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  64                 "       { if (!P3) jump 1f; }\n"
  65                 "       memw_locked(%1,P3) = R6;\n"
  66                 "       { %0 = P3 }\n"
  67                 "1:\n"
  68                 : "=&r" (temp)
  69                 : "r" (&lock->lock)
  70                 : "memory", "r6", "p3"
  71         );
  72         return temp;
  73 }
  74 
  75 /*  Stuffs a -1 in the lock value?  */
  76 static inline void arch_write_lock(arch_rwlock_t *lock)
  77 {
  78         __asm__ __volatile__(
  79                 "1:     R6 = memw_locked(%0)\n"
  80                 "       { P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
  81                 "       { if (!P3) jump 1b; }\n"
  82                 "       memw_locked(%0,P3) = R6;\n"
  83                 "       { if (!P3) jump 1b; }\n"
  84                 :
  85                 : "r" (&lock->lock)
  86                 : "memory", "r6", "p3"
  87         );
  88 }
  89 
  90 
  91 static inline int arch_write_trylock(arch_rwlock_t *lock)
  92 {
  93         int temp;
  94         __asm__ __volatile__(
  95                 "       R6 = memw_locked(%1)\n"
  96                 "       { %0 = #0; P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
  97                 "       { if (!P3) jump 1f; }\n"
  98                 "       memw_locked(%1,P3) = R6;\n"
  99                 "       %0 = P3;\n"
 100                 "1:\n"
 101                 : "=&r" (temp)
 102                 : "r" (&lock->lock)
 103                 : "memory", "r6", "p3"
 104         );
 105         return temp;
 106 
 107 }
 108 
 109 static inline void arch_write_unlock(arch_rwlock_t *lock)
 110 {
 111         smp_mb();
 112         lock->lock = 0;
 113 }
 114 
 115 static inline void arch_spin_lock(arch_spinlock_t *lock)
 116 {
 117         __asm__ __volatile__(
 118                 "1:     R6 = memw_locked(%0);\n"
 119                 "       P3 = cmp.eq(R6,#0);\n"
 120                 "       { if (!P3) jump 1b; R6 = #1; }\n"
 121                 "       memw_locked(%0,P3) = R6;\n"
 122                 "       { if (!P3) jump 1b; }\n"
 123                 :
 124                 : "r" (&lock->lock)
 125                 : "memory", "r6", "p3"
 126         );
 127 
 128 }
 129 
 130 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 131 {
 132         smp_mb();
 133         lock->lock = 0;
 134 }
 135 
 136 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
 137 {
 138         int temp;
 139         __asm__ __volatile__(
 140                 "       R6 = memw_locked(%1);\n"
 141                 "       P3 = cmp.eq(R6,#0);\n"
 142                 "       { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
 143                 "       memw_locked(%1,P3) = R6;\n"
 144                 "       %0 = P3;\n"
 145                 "1:\n"
 146                 : "=&r" (temp)
 147                 : "r" (&lock->lock)
 148                 : "memory", "r6", "p3"
 149         );
 150         return temp;
 151 }
 152 
 153 /*
 154  * SMP spinlocks are intended to allow only a single CPU at the lock
 155  */
 156 #define arch_spin_is_locked(x) ((x)->lock != 0)
 157 
 158 #endif

/* [<][>][^][v][top][bottom][index][help] */