1 #ifndef __ASM_METAG_ATOMIC_LOCK1_H
2 #define __ASM_METAG_ATOMIC_LOCK1_H
3 
4 #define ATOMIC_INIT(i)	{ (i) }
5 
6 #include <linux/compiler.h>
7 
8 #include <asm/barrier.h>
9 #include <asm/global_lock.h>
10 
atomic_read(const atomic_t * v)11 static inline int atomic_read(const atomic_t *v)
12 {
13 	return READ_ONCE((v)->counter);
14 }
15 
16 /*
17  * atomic_set needs to be take the lock to protect atomic_add_unless from a
18  * possible race, as it reads the counter twice:
19  *
20  *  CPU0                               CPU1
21  *  atomic_add_unless(1, 0)
22  *    ret = v->counter (non-zero)
23  *    if (ret != u)                    v->counter = 0
24  *      v->counter += 1 (counter set to 1)
25  *
26  * Making atomic_set take the lock ensures that ordering and logical
27  * consistency is preserved.
28  */
atomic_set(atomic_t * v,int i)29 static inline int atomic_set(atomic_t *v, int i)
30 {
31 	unsigned long flags;
32 
33 	__global_lock1(flags);
34 	fence();
35 	v->counter = i;
36 	__global_unlock1(flags);
37 	return i;
38 }
39 
40 #define ATOMIC_OP(op, c_op)						\
41 static inline void atomic_##op(int i, atomic_t *v)			\
42 {									\
43 	unsigned long flags;						\
44 									\
45 	__global_lock1(flags);						\
46 	fence();							\
47 	v->counter c_op i;						\
48 	__global_unlock1(flags);					\
49 }									\
50 
51 #define ATOMIC_OP_RETURN(op, c_op)					\
52 static inline int atomic_##op##_return(int i, atomic_t *v)		\
53 {									\
54 	unsigned long result;						\
55 	unsigned long flags;						\
56 									\
57 	__global_lock1(flags);						\
58 	result = v->counter;						\
59 	result c_op i;							\
60 	fence();							\
61 	v->counter = result;						\
62 	__global_unlock1(flags);					\
63 									\
64 	return result;							\
65 }
66 
67 #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
68 
69 ATOMIC_OPS(add, +=)
70 ATOMIC_OPS(sub, -=)
71 ATOMIC_OP(and, &=)
72 ATOMIC_OP(or, |=)
73 ATOMIC_OP(xor, ^=)
74 
75 #undef ATOMIC_OPS
76 #undef ATOMIC_OP_RETURN
77 #undef ATOMIC_OP
78 
atomic_cmpxchg(atomic_t * v,int old,int new)79 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
80 {
81 	int ret;
82 	unsigned long flags;
83 
84 	__global_lock1(flags);
85 	ret = v->counter;
86 	if (ret == old) {
87 		fence();
88 		v->counter = new;
89 	}
90 	__global_unlock1(flags);
91 
92 	return ret;
93 }
94 
95 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
96 
__atomic_add_unless(atomic_t * v,int a,int u)97 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
98 {
99 	int ret;
100 	unsigned long flags;
101 
102 	__global_lock1(flags);
103 	ret = v->counter;
104 	if (ret != u) {
105 		fence();
106 		v->counter += a;
107 	}
108 	__global_unlock1(flags);
109 
110 	return ret;
111 }
112 
atomic_sub_if_positive(int i,atomic_t * v)113 static inline int atomic_sub_if_positive(int i, atomic_t *v)
114 {
115 	int ret;
116 	unsigned long flags;
117 
118 	__global_lock1(flags);
119 	ret = v->counter - 1;
120 	if (ret >= 0) {
121 		fence();
122 		v->counter = ret;
123 	}
124 	__global_unlock1(flags);
125 
126 	return ret;
127 }
128 
129 #endif /* __ASM_METAG_ATOMIC_LOCK1_H */
130