1#ifndef _M68K_CHECKSUM_H
2#define _M68K_CHECKSUM_H
3
4#include <linux/in6.h>
5
6#ifdef CONFIG_GENERIC_CSUM
7#include <asm-generic/checksum.h>
8#else
9
10/*
11 * computes the checksum of a memory block at buff, length len,
12 * and adds in "sum" (32-bit)
13 *
14 * returns a 32-bit number suitable for feeding into itself
15 * or csum_tcpudp_magic
16 *
17 * this function must be called with even lengths, except
18 * for the last fragment, which may be odd
19 *
20 * it's best to have buff aligned on a 32-bit boundary
21 */
22__wsum csum_partial(const void *buff, int len, __wsum sum);
23
24/*
25 * the same as csum_partial, but copies from src while it
26 * checksums
27 *
28 * here even more important to align src and dst on a 32-bit (or even
29 * better 64-bit) boundary
30 */
31
32extern __wsum csum_partial_copy_from_user(const void __user *src,
33						void *dst,
34						int len, __wsum sum,
35						int *csum_err);
36
37extern __wsum csum_partial_copy_nocheck(const void *src,
38					      void *dst, int len,
39					      __wsum sum);
40
41/*
42 *	This is a version of ip_fast_csum() optimized for IP headers,
43 *	which always checksum on 4 octet boundaries.
44 */
45static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
46{
47	unsigned int sum = 0;
48	unsigned long tmp;
49
50	__asm__ ("subqw #1,%2\n"
51		 "1:\t"
52		 "movel %1@+,%3\n\t"
53		 "addxl %3,%0\n\t"
54		 "dbra  %2,1b\n\t"
55		 "movel %0,%3\n\t"
56		 "swap  %3\n\t"
57		 "addxw %3,%0\n\t"
58		 "clrw  %3\n\t"
59		 "addxw %3,%0\n\t"
60		 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
61		 : "0" (sum), "1" (iph), "2" (ihl)
62		 : "memory");
63	return (__force __sum16)~sum;
64}
65
66static inline __sum16 csum_fold(__wsum sum)
67{
68	unsigned int tmp = (__force u32)sum;
69
70	__asm__("swap %1\n\t"
71		"addw %1, %0\n\t"
72		"clrw %1\n\t"
73		"addxw %1, %0"
74		: "=&d" (sum), "=&d" (tmp)
75		: "0" (sum), "1" (tmp));
76
77	return (__force __sum16)~sum;
78}
79
80static inline __wsum
81csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
82		  unsigned short proto, __wsum sum)
83{
84	__asm__ ("addl  %2,%0\n\t"
85		 "addxl %3,%0\n\t"
86		 "addxl %4,%0\n\t"
87		 "clrl %1\n\t"
88		 "addxl %1,%0"
89		 : "=&d" (sum), "=d" (saddr)
90		 : "g" (daddr), "1" (saddr), "d" (len + proto),
91		   "0" (sum));
92	return sum;
93}
94
95
96/*
97 * computes the checksum of the TCP/UDP pseudo-header
98 * returns a 16-bit checksum, already complemented
99 */
100static inline __sum16
101csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
102		  unsigned short proto, __wsum sum)
103{
104	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
105}
106
107/*
108 * this routine is used for miscellaneous IP-like checksums, mainly
109 * in icmp.c
110 */
111
112static inline __sum16 ip_compute_csum(const void *buff, int len)
113{
114	return csum_fold (csum_partial(buff, len, 0));
115}
116
117#define _HAVE_ARCH_IPV6_CSUM
118static __inline__ __sum16
119csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
120		__u32 len, unsigned short proto, __wsum sum)
121{
122	register unsigned long tmp;
123	__asm__("addl %2@,%0\n\t"
124		"movel %2@(4),%1\n\t"
125		"addxl %1,%0\n\t"
126		"movel %2@(8),%1\n\t"
127		"addxl %1,%0\n\t"
128		"movel %2@(12),%1\n\t"
129		"addxl %1,%0\n\t"
130		"movel %3@,%1\n\t"
131		"addxl %1,%0\n\t"
132		"movel %3@(4),%1\n\t"
133		"addxl %1,%0\n\t"
134		"movel %3@(8),%1\n\t"
135		"addxl %1,%0\n\t"
136		"movel %3@(12),%1\n\t"
137		"addxl %1,%0\n\t"
138		"addxl %4,%0\n\t"
139		"clrl %1\n\t"
140		"addxl %1,%0"
141		: "=&d" (sum), "=&d" (tmp)
142		: "a" (saddr), "a" (daddr), "d" (len + proto),
143		  "0" (sum));
144
145	return csum_fold(sum);
146}
147
148#endif /* CONFIG_GENERIC_CSUM */
149#endif /* _M68K_CHECKSUM_H */
150