1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4#include <linux/kernel.h>
5
6/*
7 * This is largely generic for little-endian machines, but the
8 * optimal byte mask counting is probably going to be something
9 * that is architecture-specific. If you have a reliably fast
10 * bit count instruction, that might be better than the multiply
11 * and shift, for example.
12 */
13struct word_at_a_time {
14	const unsigned long one_bits, high_bits;
15};
16
17#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
18
19#ifdef CONFIG_64BIT
20
21/*
22 * Jan Achrenius on G+: microoptimized version of
23 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
24 * that works for the bytemasks without having to
25 * mask them first.
26 */
27static inline long count_masked_bytes(unsigned long mask)
28{
29	return mask*0x0001020304050608ul >> 56;
30}
31
32#else	/* 32-bit case */
33
34/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
35static inline long count_masked_bytes(long mask)
36{
37	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
38	long a = (0x0ff0001+mask) >> 23;
39	/* Fix the 1 for 00 case */
40	return a & mask;
41}
42
43#endif
44
45/* Return nonzero if it has a zero */
46static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
47{
48	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
49	*bits = mask;
50	return mask;
51}
52
53static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
54{
55	return bits;
56}
57
58static inline unsigned long create_zero_mask(unsigned long bits)
59{
60	bits = (bits - 1) & ~bits;
61	return bits >> 7;
62}
63
64/* The mask we created is directly usable as a bytemask */
65#define zero_bytemask(mask) (mask)
66
67static inline unsigned long find_zero(unsigned long mask)
68{
69	return count_masked_bytes(mask);
70}
71
72/*
73 * Load an unaligned word from kernel space.
74 *
75 * In the (very unlikely) case of the word being a page-crosser
76 * and the next page not being mapped, take the exception and
77 * return zeroes in the non-existing part.
78 */
79static inline unsigned long load_unaligned_zeropad(const void *addr)
80{
81	unsigned long ret, dummy;
82
83	asm(
84		"1:\tmov %2,%0\n"
85		"2:\n"
86		".section .fixup,\"ax\"\n"
87		"3:\t"
88		"lea %2,%1\n\t"
89		"and %3,%1\n\t"
90		"mov (%1),%0\n\t"
91		"leal %2,%%ecx\n\t"
92		"andl %4,%%ecx\n\t"
93		"shll $3,%%ecx\n\t"
94		"shr %%cl,%0\n\t"
95		"jmp 2b\n"
96		".previous\n"
97		_ASM_EXTABLE(1b, 3b)
98		:"=&r" (ret),"=&c" (dummy)
99		:"m" (*(unsigned long *)addr),
100		 "i" (-sizeof(unsigned long)),
101		 "i" (sizeof(unsigned long)-1));
102	return ret;
103}
104
105#endif /* _ASM_WORD_AT_A_TIME_H */
106