1/*
2 *  arch/arm/include/asm/assembler.h
3 *
4 *  Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *  This file contains arm architecture specific defines
11 *  for the different processors.
12 *
13 *  Do not include any C declarations in this file - it is included by
14 *  assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
29
30#define IOMEM(x)	(x)
31
32/*
33 * Endian independent macros for shifting bytes within registers.
34 */
35#ifndef __ARMEB__
36#define lspull          lsr
37#define lspush          lsl
38#define get_byte_0      lsl #0
39#define get_byte_1	lsr #8
40#define get_byte_2	lsr #16
41#define get_byte_3	lsr #24
42#define put_byte_0      lsl #0
43#define put_byte_1	lsl #8
44#define put_byte_2	lsl #16
45#define put_byte_3	lsl #24
46#else
47#define lspull          lsl
48#define lspush          lsr
49#define get_byte_0	lsr #24
50#define get_byte_1	lsr #16
51#define get_byte_2	lsr #8
52#define get_byte_3      lsl #0
53#define put_byte_0	lsl #24
54#define put_byte_1	lsl #16
55#define put_byte_2	lsl #8
56#define put_byte_3      lsl #0
57#endif
58
59/* Select code for any configuration running in BE8 mode */
60#ifdef CONFIG_CPU_ENDIAN_BE8
61#define ARM_BE8(code...) code
62#else
63#define ARM_BE8(code...)
64#endif
65
66/*
67 * Data preload for architectures that support it
68 */
69#if __LINUX_ARM_ARCH__ >= 5
70#define PLD(code...)	code
71#else
72#define PLD(code...)
73#endif
74
75/*
76 * This can be used to enable code to cacheline align the destination
77 * pointer when bulk writing to memory.  Experiments on StrongARM and
78 * XScale didn't show this a worthwhile thing to do when the cache is not
79 * set to write-allocate (this would need further testing on XScale when WA
80 * is used).
81 *
82 * On Feroceon there is much to gain however, regardless of cache mode.
83 */
84#ifdef CONFIG_CPU_FEROCEON
85#define CALGN(code...) code
86#else
87#define CALGN(code...)
88#endif
89
90/*
91 * Enable and disable interrupts
92 */
93#if __LINUX_ARM_ARCH__ >= 6
94	.macro	disable_irq_notrace
95	cpsid	i
96	.endm
97
98	.macro	enable_irq_notrace
99	cpsie	i
100	.endm
101#else
102	.macro	disable_irq_notrace
103	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
104	.endm
105
106	.macro	enable_irq_notrace
107	msr	cpsr_c, #SVC_MODE
108	.endm
109#endif
110
111	.macro asm_trace_hardirqs_off
112#if defined(CONFIG_TRACE_IRQFLAGS)
113	stmdb   sp!, {r0-r3, ip, lr}
114	bl	trace_hardirqs_off
115	ldmia	sp!, {r0-r3, ip, lr}
116#endif
117	.endm
118
119	.macro asm_trace_hardirqs_on_cond, cond
120#if defined(CONFIG_TRACE_IRQFLAGS)
121	/*
122	 * actually the registers should be pushed and pop'd conditionally, but
123	 * after bl the flags are certainly clobbered
124	 */
125	stmdb   sp!, {r0-r3, ip, lr}
126	bl\cond	trace_hardirqs_on
127	ldmia	sp!, {r0-r3, ip, lr}
128#endif
129	.endm
130
131	.macro asm_trace_hardirqs_on
132	asm_trace_hardirqs_on_cond al
133	.endm
134
135	.macro disable_irq
136	disable_irq_notrace
137	asm_trace_hardirqs_off
138	.endm
139
140	.macro enable_irq
141	asm_trace_hardirqs_on
142	enable_irq_notrace
143	.endm
144/*
145 * Save the current IRQ state and disable IRQs.  Note that this macro
146 * assumes FIQs are enabled, and that the processor is in SVC mode.
147 */
148	.macro	save_and_disable_irqs, oldcpsr
149#ifdef CONFIG_CPU_V7M
150	mrs	\oldcpsr, primask
151#else
152	mrs	\oldcpsr, cpsr
153#endif
154	disable_irq
155	.endm
156
157	.macro	save_and_disable_irqs_notrace, oldcpsr
158	mrs	\oldcpsr, cpsr
159	disable_irq_notrace
160	.endm
161
162/*
163 * Restore interrupt state previously stored in a register.  We don't
164 * guarantee that this will preserve the flags.
165 */
166	.macro	restore_irqs_notrace, oldcpsr
167#ifdef CONFIG_CPU_V7M
168	msr	primask, \oldcpsr
169#else
170	msr	cpsr_c, \oldcpsr
171#endif
172	.endm
173
174	.macro restore_irqs, oldcpsr
175	tst	\oldcpsr, #PSR_I_BIT
176	asm_trace_hardirqs_on_cond eq
177	restore_irqs_notrace \oldcpsr
178	.endm
179
180/*
181 * Get current thread_info.
182 */
183	.macro	get_thread_info, rd
184 ARM(	mov	\rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT	)
185 THUMB(	mov	\rd, sp			)
186 THUMB(	lsr	\rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT	)
187	mov	\rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
188	.endm
189
190/*
191 * Increment/decrement the preempt count.
192 */
193#ifdef CONFIG_PREEMPT_COUNT
194	.macro	inc_preempt_count, ti, tmp
195	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
196	add	\tmp, \tmp, #1			@ increment it
197	str	\tmp, [\ti, #TI_PREEMPT]
198	.endm
199
200	.macro	dec_preempt_count, ti, tmp
201	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
202	sub	\tmp, \tmp, #1			@ decrement it
203	str	\tmp, [\ti, #TI_PREEMPT]
204	.endm
205
206	.macro	dec_preempt_count_ti, ti, tmp
207	get_thread_info \ti
208	dec_preempt_count \ti, \tmp
209	.endm
210#else
211	.macro	inc_preempt_count, ti, tmp
212	.endm
213
214	.macro	dec_preempt_count, ti, tmp
215	.endm
216
217	.macro	dec_preempt_count_ti, ti, tmp
218	.endm
219#endif
220
221#define USER(x...)				\
2229999:	x;					\
223	.pushsection __ex_table,"a";		\
224	.align	3;				\
225	.long	9999b,9001f;			\
226	.popsection
227
228#ifdef CONFIG_SMP
229#define ALT_SMP(instr...)					\
2309998:	instr
231/*
232 * Note: if you get assembler errors from ALT_UP() when building with
233 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
234 * ALT_SMP( W(instr) ... )
235 */
236#define ALT_UP(instr...)					\
237	.pushsection ".alt.smp.init", "a"			;\
238	.long	9998b						;\
2399997:	instr							;\
240	.if . - 9997b == 2					;\
241		nop						;\
242	.endif							;\
243	.if . - 9997b != 4					;\
244		.error "ALT_UP() content must assemble to exactly 4 bytes";\
245	.endif							;\
246	.popsection
247#define ALT_UP_B(label)					\
248	.equ	up_b_offset, label - 9998b			;\
249	.pushsection ".alt.smp.init", "a"			;\
250	.long	9998b						;\
251	W(b)	. + up_b_offset					;\
252	.popsection
253#else
254#define ALT_SMP(instr...)
255#define ALT_UP(instr...) instr
256#define ALT_UP_B(label) b label
257#endif
258
259/*
260 * Instruction barrier
261 */
262	.macro	instr_sync
263#if __LINUX_ARM_ARCH__ >= 7
264	isb
265#elif __LINUX_ARM_ARCH__ == 6
266	mcr	p15, 0, r0, c7, c5, 4
267#endif
268	.endm
269
270/*
271 * SMP data memory barrier
272 */
273	.macro	smp_dmb mode
274#ifdef CONFIG_SMP
275#if __LINUX_ARM_ARCH__ >= 7
276	.ifeqs "\mode","arm"
277	ALT_SMP(dmb	ish)
278	.else
279	ALT_SMP(W(dmb)	ish)
280	.endif
281#elif __LINUX_ARM_ARCH__ == 6
282	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
283#else
284#error Incompatible SMP platform
285#endif
286	.ifeqs "\mode","arm"
287	ALT_UP(nop)
288	.else
289	ALT_UP(W(nop))
290	.endif
291#endif
292	.endm
293
294#if defined(CONFIG_CPU_V7M)
295	/*
296	 * setmode is used to assert to be in svc mode during boot. For v7-M
297	 * this is done in __v7m_setup, so setmode can be empty here.
298	 */
299	.macro	setmode, mode, reg
300	.endm
301#elif defined(CONFIG_THUMB2_KERNEL)
302	.macro	setmode, mode, reg
303	mov	\reg, #\mode
304	msr	cpsr_c, \reg
305	.endm
306#else
307	.macro	setmode, mode, reg
308	msr	cpsr_c, #\mode
309	.endm
310#endif
311
312/*
313 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
314 * a scratch register for the macro to overwrite.
315 *
316 * This macro is intended for forcing the CPU into SVC mode at boot time.
317 * you cannot return to the original mode.
318 */
319.macro safe_svcmode_maskall reg:req
320#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
321	mrs	\reg , cpsr
322	eor	\reg, \reg, #HYP_MODE
323	tst	\reg, #MODE_MASK
324	bic	\reg , \reg , #MODE_MASK
325	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
326THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
327	bne	1f
328	orr	\reg, \reg, #PSR_A_BIT
329	adr	lr, BSYM(2f)
330	msr	spsr_cxsf, \reg
331	__MSR_ELR_HYP(14)
332	__ERET
3331:	msr	cpsr_c, \reg
3342:
335#else
336/*
337 * workaround for possibly broken pre-v6 hardware
338 * (akita, Sharp Zaurus C-1000, PXA270-based)
339 */
340	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
341#endif
342.endm
343
344/*
345 * STRT/LDRT access macros with ARM and Thumb-2 variants
346 */
347#ifdef CONFIG_THUMB2_KERNEL
348
349	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3509999:
351	.if	\inc == 1
352	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
353	.elseif	\inc == 4
354	\instr\cond\()\t\().w \reg, [\ptr, #\off]
355	.else
356	.error	"Unsupported inc macro argument"
357	.endif
358
359	.pushsection __ex_table,"a"
360	.align	3
361	.long	9999b, \abort
362	.popsection
363	.endm
364
365	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
366	@ explicit IT instruction needed because of the label
367	@ introduced by the USER macro
368	.ifnc	\cond,al
369	.if	\rept == 1
370	itt	\cond
371	.elseif	\rept == 2
372	ittt	\cond
373	.else
374	.error	"Unsupported rept macro argument"
375	.endif
376	.endif
377
378	@ Slightly optimised to avoid incrementing the pointer twice
379	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
380	.if	\rept == 2
381	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
382	.endif
383
384	add\cond \ptr, #\rept * \inc
385	.endm
386
387#else	/* !CONFIG_THUMB2_KERNEL */
388
389	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
390	.rept	\rept
3919999:
392	.if	\inc == 1
393	\instr\cond\()b\()\t \reg, [\ptr], #\inc
394	.elseif	\inc == 4
395	\instr\cond\()\t \reg, [\ptr], #\inc
396	.else
397	.error	"Unsupported inc macro argument"
398	.endif
399
400	.pushsection __ex_table,"a"
401	.align	3
402	.long	9999b, \abort
403	.popsection
404	.endr
405	.endm
406
407#endif	/* CONFIG_THUMB2_KERNEL */
408
409	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
410	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
411	.endm
412
413	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
414	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
415	.endm
416
417/* Utility macro for declaring string literals */
418	.macro	string name:req, string
419	.type \name , #object
420\name:
421	.asciz "\string"
422	.size \name , . - \name
423	.endm
424
425	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
426#ifndef CONFIG_CPU_USE_DOMAINS
427	adds	\tmp, \addr, #\size - 1
428	sbcccs	\tmp, \tmp, \limit
429	bcs	\bad
430#endif
431	.endm
432
433	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
434	.macro	ret\c, reg
435#if __LINUX_ARM_ARCH__ < 6
436	mov\c	pc, \reg
437#else
438	.ifeqs	"\reg", "lr"
439	bx\c	\reg
440	.else
441	mov\c	pc, \reg
442	.endif
443#endif
444	.endm
445	.endr
446
447	.macro	ret.w, reg
448	ret	\reg
449#ifdef CONFIG_THUMB2_KERNEL
450	nop
451#endif
452	.endm
453
454#endif /* __ASM_ASSEMBLER_H__ */
455