1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 *
14 * Do not include directly; use <linux/atomic.h>.
15 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
22#include <asm/barrier.h>
23#include <arch/spr_def.h>
24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26
27#define atomic_set(v, i) ((v)->counter = (i))
28
29/*
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
33 */
34
35static inline void atomic_add(int i, atomic_t *v)
36{
37	__insn_fetchadd4((void *)&v->counter, i);
38}
39
40static inline int atomic_add_return(int i, atomic_t *v)
41{
42	int val;
43	smp_mb();  /* barrier for proper semantics */
44	val = __insn_fetchadd4((void *)&v->counter, i) + i;
45	barrier();  /* the "+ i" above will wait on memory */
46	return val;
47}
48
49static inline int __atomic_add_unless(atomic_t *v, int a, int u)
50{
51	int guess, oldval = v->counter;
52	do {
53		if (oldval == u)
54			break;
55		guess = oldval;
56		oldval = cmpxchg(&v->counter, guess, guess + a);
57	} while (guess != oldval);
58	return oldval;
59}
60
61/* Now the true 64-bit operations. */
62
63#define ATOMIC64_INIT(i)	{ (i) }
64
65#define atomic64_read(v)		((v)->counter)
66#define atomic64_set(v, i) ((v)->counter = (i))
67
68static inline void atomic64_add(long i, atomic64_t *v)
69{
70	__insn_fetchadd((void *)&v->counter, i);
71}
72
73static inline long atomic64_add_return(long i, atomic64_t *v)
74{
75	int val;
76	smp_mb();  /* barrier for proper semantics */
77	val = __insn_fetchadd((void *)&v->counter, i) + i;
78	barrier();  /* the "+ i" above will wait on memory */
79	return val;
80}
81
82static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
83{
84	long guess, oldval = v->counter;
85	do {
86		if (oldval == u)
87			break;
88		guess = oldval;
89		oldval = cmpxchg(&v->counter, guess, guess + a);
90	} while (guess != oldval);
91	return oldval != u;
92}
93
94#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
95#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
96#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
97#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
98#define atomic64_inc(v)			atomic64_add(1, (v))
99#define atomic64_dec(v)			atomic64_sub(1, (v))
100
101#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
102#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
103#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
104#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
105
106#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
107
108/* Define this to indicate that cmpxchg is an efficient operation. */
109#define __HAVE_ARCH_CMPXCHG
110
111#endif /* !__ASSEMBLY__ */
112
113#endif /* _ASM_TILE_ATOMIC_64_H */
114