This source file includes following definitions.
- lockref_get
- lockref_get_not_zero
- lockref_put_not_zero
- lockref_get_or_lock
- lockref_put_return
- lockref_put_or_lock
- lockref_mark_dead
- lockref_get_not_dead
1
2 #include <linux/export.h>
3 #include <linux/lockref.h>
4
5 #if USE_CMPXCHG_LOCKREF
6
7
8
9
10
11 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
12 int retry = 100; \
13 struct lockref old; \
14 BUILD_BUG_ON(sizeof(old) != 8); \
15 old.lock_count = READ_ONCE(lockref->lock_count); \
16 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
17 struct lockref new = old, prev = old; \
18 CODE \
19 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
20 old.lock_count, \
21 new.lock_count); \
22 if (likely(old.lock_count == prev.lock_count)) { \
23 SUCCESS; \
24 } \
25 if (!--retry) \
26 break; \
27 cpu_relax(); \
28 } \
29 } while (0)
30
31 #else
32
33 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
34
35 #endif
36
37
38
39
40
41
42
43
44 void lockref_get(struct lockref *lockref)
45 {
46 CMPXCHG_LOOP(
47 new.count++;
48 ,
49 return;
50 );
51
52 spin_lock(&lockref->lock);
53 lockref->count++;
54 spin_unlock(&lockref->lock);
55 }
56 EXPORT_SYMBOL(lockref_get);
57
58
59
60
61
62
63 int lockref_get_not_zero(struct lockref *lockref)
64 {
65 int retval;
66
67 CMPXCHG_LOOP(
68 new.count++;
69 if (old.count <= 0)
70 return 0;
71 ,
72 return 1;
73 );
74
75 spin_lock(&lockref->lock);
76 retval = 0;
77 if (lockref->count > 0) {
78 lockref->count++;
79 retval = 1;
80 }
81 spin_unlock(&lockref->lock);
82 return retval;
83 }
84 EXPORT_SYMBOL(lockref_get_not_zero);
85
86
87
88
89
90
91 int lockref_put_not_zero(struct lockref *lockref)
92 {
93 int retval;
94
95 CMPXCHG_LOOP(
96 new.count--;
97 if (old.count <= 1)
98 return 0;
99 ,
100 return 1;
101 );
102
103 spin_lock(&lockref->lock);
104 retval = 0;
105 if (lockref->count > 1) {
106 lockref->count--;
107 retval = 1;
108 }
109 spin_unlock(&lockref->lock);
110 return retval;
111 }
112 EXPORT_SYMBOL(lockref_put_not_zero);
113
114
115
116
117
118
119
120 int lockref_get_or_lock(struct lockref *lockref)
121 {
122 CMPXCHG_LOOP(
123 new.count++;
124 if (old.count <= 0)
125 break;
126 ,
127 return 1;
128 );
129
130 spin_lock(&lockref->lock);
131 if (lockref->count <= 0)
132 return 0;
133 lockref->count++;
134 spin_unlock(&lockref->lock);
135 return 1;
136 }
137 EXPORT_SYMBOL(lockref_get_or_lock);
138
139
140
141
142
143
144
145
146 int lockref_put_return(struct lockref *lockref)
147 {
148 CMPXCHG_LOOP(
149 new.count--;
150 if (old.count <= 0)
151 return -1;
152 ,
153 return new.count;
154 );
155 return -1;
156 }
157 EXPORT_SYMBOL(lockref_put_return);
158
159
160
161
162
163
164 int lockref_put_or_lock(struct lockref *lockref)
165 {
166 CMPXCHG_LOOP(
167 new.count--;
168 if (old.count <= 1)
169 break;
170 ,
171 return 1;
172 );
173
174 spin_lock(&lockref->lock);
175 if (lockref->count <= 1)
176 return 0;
177 lockref->count--;
178 spin_unlock(&lockref->lock);
179 return 1;
180 }
181 EXPORT_SYMBOL(lockref_put_or_lock);
182
183
184
185
186
187 void lockref_mark_dead(struct lockref *lockref)
188 {
189 assert_spin_locked(&lockref->lock);
190 lockref->count = -128;
191 }
192 EXPORT_SYMBOL(lockref_mark_dead);
193
194
195
196
197
198
199 int lockref_get_not_dead(struct lockref *lockref)
200 {
201 int retval;
202
203 CMPXCHG_LOOP(
204 new.count++;
205 if (old.count < 0)
206 return 0;
207 ,
208 return 1;
209 );
210
211 spin_lock(&lockref->lock);
212 retval = 0;
213 if (lockref->count >= 0) {
214 lockref->count++;
215 retval = 1;
216 }
217 spin_unlock(&lockref->lock);
218 return retval;
219 }
220 EXPORT_SYMBOL(lockref_get_not_dead);