1#ifndef _ASM_X86_BITOPS_H
2#define _ASM_X86_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 *
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16#include <asm/alternative.h>
17#include <asm/rmwcc.h>
18#include <asm/barrier.h>
19
20#if BITS_PER_LONG == 32
21# define _BITOPS_LONG_SHIFT 5
22#elif BITS_PER_LONG == 64
23# define _BITOPS_LONG_SHIFT 6
24#else
25# error "Unexpected BITS_PER_LONG"
26#endif
27
28#define BIT_64(n)			(U64_C(1) << (n))
29
30/*
31 * These have to be done with inline assembly: that way the bit-setting
32 * is guaranteed to be atomic. All bit operations return 0 if the bit
33 * was cleared before the operation and != 0 if it was not.
34 *
35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
36 */
37
38#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
39/* Technically wrong, but this avoids compilation errors on some gcc
40   versions. */
41#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42#else
43#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
44#endif
45
46#define ADDR				BITOP_ADDR(addr)
47
48/*
49 * We do the locked ops that don't return the old value as
50 * a mask operation on a byte.
51 */
52#define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
53#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
54#define CONST_MASK(nr)			(1 << ((nr) & 7))
55
56/**
57 * set_bit - Atomically set a bit in memory
58 * @nr: the bit to set
59 * @addr: the address to start counting from
60 *
61 * This function is atomic and may not be reordered.  See __set_bit()
62 * if you do not require the atomic guarantees.
63 *
64 * Note: there are no guarantees that this function will not be reordered
65 * on non x86 architectures, so if you are writing portable code,
66 * make sure not to rely on its reordering guarantees.
67 *
68 * Note that @nr may be almost arbitrarily large; this function is not
69 * restricted to acting on a single-word quantity.
70 */
71static __always_inline void
72set_bit(long nr, volatile unsigned long *addr)
73{
74	if (IS_IMMEDIATE(nr)) {
75		asm volatile(LOCK_PREFIX "orb %1,%0"
76			: CONST_MASK_ADDR(nr, addr)
77			: "iq" ((u8)CONST_MASK(nr))
78			: "memory");
79	} else {
80		asm volatile(LOCK_PREFIX "bts %1,%0"
81			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
82	}
83}
84
85/**
86 * __set_bit - Set a bit in memory
87 * @nr: the bit to set
88 * @addr: the address to start counting from
89 *
90 * Unlike set_bit(), this function is non-atomic and may be reordered.
91 * If it's called on the same region of memory simultaneously, the effect
92 * may be that only one operation succeeds.
93 */
94static inline void __set_bit(long nr, volatile unsigned long *addr)
95{
96	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
97}
98
99/**
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered.  However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
108 */
109static __always_inline void
110clear_bit(long nr, volatile unsigned long *addr)
111{
112	if (IS_IMMEDIATE(nr)) {
113		asm volatile(LOCK_PREFIX "andb %1,%0"
114			: CONST_MASK_ADDR(nr, addr)
115			: "iq" ((u8)~CONST_MASK(nr)));
116	} else {
117		asm volatile(LOCK_PREFIX "btr %1,%0"
118			: BITOP_ADDR(addr)
119			: "Ir" (nr));
120	}
121}
122
123/*
124 * clear_bit_unlock - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and implies release semantics before the memory
129 * operation. It can be used for an unlock.
130 */
131static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132{
133	barrier();
134	clear_bit(nr, addr);
135}
136
137static inline void __clear_bit(long nr, volatile unsigned long *addr)
138{
139	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140}
141
142/*
143 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear
145 * @addr: Address to start counting from
146 *
147 * __clear_bit() is non-atomic and implies release semantics before the memory
148 * operation. It can be used for an unlock if no other CPUs can concurrently
149 * modify other bits in the word.
150 *
151 * No memory barrier is required here, because x86 cannot reorder stores past
152 * older loads. Same principle as spin_unlock.
153 */
154static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
155{
156	barrier();
157	__clear_bit(nr, addr);
158}
159
160/**
161 * __change_bit - Toggle a bit in memory
162 * @nr: the bit to change
163 * @addr: the address to start counting from
164 *
165 * Unlike change_bit(), this function is non-atomic and may be reordered.
166 * If it's called on the same region of memory simultaneously, the effect
167 * may be that only one operation succeeds.
168 */
169static inline void __change_bit(long nr, volatile unsigned long *addr)
170{
171	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
172}
173
174/**
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183static inline void change_bit(long nr, volatile unsigned long *addr)
184{
185	if (IS_IMMEDIATE(nr)) {
186		asm volatile(LOCK_PREFIX "xorb %1,%0"
187			: CONST_MASK_ADDR(nr, addr)
188			: "iq" ((u8)CONST_MASK(nr)));
189	} else {
190		asm volatile(LOCK_PREFIX "btc %1,%0"
191			: BITOP_ADDR(addr)
192			: "Ir" (nr));
193	}
194}
195
196/**
197 * test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier.
203 */
204static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
205{
206	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
207}
208
209/**
210 * test_and_set_bit_lock - Set a bit and return its old value for lock
211 * @nr: Bit to set
212 * @addr: Address to count from
213 *
214 * This is the same as test_and_set_bit on x86.
215 */
216static __always_inline int
217test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218{
219	return test_and_set_bit(nr, addr);
220}
221
222/**
223 * __test_and_set_bit - Set a bit and return its old value
224 * @nr: Bit to set
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail.  You must protect multiple accesses with a lock.
230 */
231static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
232{
233	int oldbit;
234
235	asm("bts %2,%1\n\t"
236	    "sbb %0,%0"
237	    : "=r" (oldbit), ADDR
238	    : "Ir" (nr));
239	return oldbit;
240}
241
242/**
243 * test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to clear
245 * @addr: Address to count from
246 *
247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier.
249 */
250static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
251{
252	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
253}
254
255/**
256 * __test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
259 *
260 * This operation is non-atomic and can be reordered.
261 * If two examples of this operation race, one can appear to succeed
262 * but actually fail.  You must protect multiple accesses with a lock.
263 *
264 * Note: the operation is performed atomically with respect to
265 * the local CPU, but not other CPUs. Portable code should not
266 * rely on this behaviour.
267 * KVM relies on this behaviour on x86 for modifying memory that is also
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c
270 */
271static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
272{
273	int oldbit;
274
275	asm volatile("btr %2,%1\n\t"
276		     "sbb %0,%0"
277		     : "=r" (oldbit), ADDR
278		     : "Ir" (nr));
279	return oldbit;
280}
281
282/* WARNING: non atomic and it can be reordered! */
283static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
284{
285	int oldbit;
286
287	asm volatile("btc %2,%1\n\t"
288		     "sbb %0,%0"
289		     : "=r" (oldbit), ADDR
290		     : "Ir" (nr) : "memory");
291
292	return oldbit;
293}
294
295/**
296 * test_and_change_bit - Change a bit and return its old value
297 * @nr: Bit to change
298 * @addr: Address to count from
299 *
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
302 */
303static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
304{
305	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
306}
307
308static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
309{
310	return ((1UL << (nr & (BITS_PER_LONG-1))) &
311		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312}
313
314static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
315{
316	int oldbit;
317
318	asm volatile("bt %2,%1\n\t"
319		     "sbb %0,%0"
320		     : "=r" (oldbit)
321		     : "m" (*(unsigned long *)addr), "Ir" (nr));
322
323	return oldbit;
324}
325
326#if 0 /* Fool kernel-doc since it doesn't do macros yet */
327/**
328 * test_bit - Determine whether a bit is set
329 * @nr: bit number to test
330 * @addr: Address to start counting from
331 */
332static int test_bit(int nr, const volatile unsigned long *addr);
333#endif
334
335#define test_bit(nr, addr)			\
336	(__builtin_constant_p((nr))		\
337	 ? constant_test_bit((nr), (addr))	\
338	 : variable_test_bit((nr), (addr)))
339
340/**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346static inline unsigned long __ffs(unsigned long word)
347{
348	asm("rep; bsf %1,%0"
349		: "=r" (word)
350		: "rm" (word));
351	return word;
352}
353
354/**
355 * ffz - find first zero bit in word
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360static inline unsigned long ffz(unsigned long word)
361{
362	asm("rep; bsf %1,%0"
363		: "=r" (word)
364		: "r" (~word));
365	return word;
366}
367
368/*
369 * __fls: find last set bit in word
370 * @word: The word to search
371 *
372 * Undefined if no set bit exists, so code should check against 0 first.
373 */
374static inline unsigned long __fls(unsigned long word)
375{
376	asm("bsr %1,%0"
377	    : "=r" (word)
378	    : "rm" (word));
379	return word;
380}
381
382#undef ADDR
383
384#ifdef __KERNEL__
385/**
386 * ffs - find first set bit in word
387 * @x: the word to search
388 *
389 * This is defined the same way as the libc and compiler builtin ffs
390 * routines, therefore differs in spirit from the other bitops.
391 *
392 * ffs(value) returns 0 if value is 0 or the position of the first
393 * set bit if value is nonzero. The first (least significant) bit
394 * is at position 1.
395 */
396static inline int ffs(int x)
397{
398	int r;
399
400#ifdef CONFIG_X86_64
401	/*
402	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
403	 * dest reg is undefined if x==0, but their CPU architect says its
404	 * value is written to set it to the same as before, except that the
405	 * top 32 bits will be cleared.
406	 *
407	 * We cannot do this on 32 bits because at the very least some
408	 * 486 CPUs did not behave this way.
409	 */
410	asm("bsfl %1,%0"
411	    : "=r" (r)
412	    : "rm" (x), "0" (-1));
413#elif defined(CONFIG_X86_CMOV)
414	asm("bsfl %1,%0\n\t"
415	    "cmovzl %2,%0"
416	    : "=&r" (r) : "rm" (x), "r" (-1));
417#else
418	asm("bsfl %1,%0\n\t"
419	    "jnz 1f\n\t"
420	    "movl $-1,%0\n"
421	    "1:" : "=r" (r) : "rm" (x));
422#endif
423	return r + 1;
424}
425
426/**
427 * fls - find last set bit in word
428 * @x: the word to search
429 *
430 * This is defined in a similar way as the libc and compiler builtin
431 * ffs, but returns the position of the most significant set bit.
432 *
433 * fls(value) returns 0 if value is 0 or the position of the last
434 * set bit if value is nonzero. The last (most significant) bit is
435 * at position 32.
436 */
437static inline int fls(int x)
438{
439	int r;
440
441#ifdef CONFIG_X86_64
442	/*
443	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
444	 * dest reg is undefined if x==0, but their CPU architect says its
445	 * value is written to set it to the same as before, except that the
446	 * top 32 bits will be cleared.
447	 *
448	 * We cannot do this on 32 bits because at the very least some
449	 * 486 CPUs did not behave this way.
450	 */
451	asm("bsrl %1,%0"
452	    : "=r" (r)
453	    : "rm" (x), "0" (-1));
454#elif defined(CONFIG_X86_CMOV)
455	asm("bsrl %1,%0\n\t"
456	    "cmovzl %2,%0"
457	    : "=&r" (r) : "rm" (x), "rm" (-1));
458#else
459	asm("bsrl %1,%0\n\t"
460	    "jnz 1f\n\t"
461	    "movl $-1,%0\n"
462	    "1:" : "=r" (r) : "rm" (x));
463#endif
464	return r + 1;
465}
466
467/**
468 * fls64 - find last set bit in a 64-bit word
469 * @x: the word to search
470 *
471 * This is defined in a similar way as the libc and compiler builtin
472 * ffsll, but returns the position of the most significant set bit.
473 *
474 * fls64(value) returns 0 if value is 0 or the position of the last
475 * set bit if value is nonzero. The last (most significant) bit is
476 * at position 64.
477 */
478#ifdef CONFIG_X86_64
479static __always_inline int fls64(__u64 x)
480{
481	int bitpos = -1;
482	/*
483	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
484	 * dest reg is undefined if x==0, but their CPU architect says its
485	 * value is written to set it to the same as before.
486	 */
487	asm("bsrq %1,%q0"
488	    : "+r" (bitpos)
489	    : "rm" (x));
490	return bitpos + 1;
491}
492#else
493#include <asm-generic/bitops/fls64.h>
494#endif
495
496#include <asm-generic/bitops/find.h>
497
498#include <asm-generic/bitops/sched.h>
499
500#include <asm/arch_hweight.h>
501
502#include <asm-generic/bitops/const_hweight.h>
503
504#include <asm-generic/bitops/le.h>
505
506#include <asm-generic/bitops/ext2-atomic-setbit.h>
507
508#endif /* __KERNEL__ */
509#endif /* _ASM_X86_BITOPS_H */
510