to                 25 arch/alpha/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                303 arch/alpha/include/asm/uaccess.h extern long __copy_user(void *to, const void *from, long len);
to                306 arch/alpha/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long len)
to                308 arch/alpha/include/asm/uaccess.h 	return __copy_user(to, (__force const void *)from, len);
to                312 arch/alpha/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long len)
to                314 arch/alpha/include/asm/uaccess.h 	return __copy_user((__force void *)to, from, len);
to                317 arch/alpha/include/asm/uaccess.h extern long __clear_user(void __user *to, long len);
to                320 arch/alpha/include/asm/uaccess.h clear_user(void __user *to, long len)
to                322 arch/alpha/include/asm/uaccess.h 	if (__access_ok((unsigned long)to, len))
to                323 arch/alpha/include/asm/uaccess.h 		len = __clear_user(to, len);
to                421 arch/alpha/kernel/io.c void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
to                426 arch/alpha/kernel/io.c 	if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
to                429 arch/alpha/kernel/io.c 			*(u64 *)to = __raw_readq(from);
to                431 arch/alpha/kernel/io.c 			to += 8;
to                437 arch/alpha/kernel/io.c 	if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
to                440 arch/alpha/kernel/io.c 			*(u32 *)to = __raw_readl(from);
to                442 arch/alpha/kernel/io.c 			to += 4;
to                448 arch/alpha/kernel/io.c 	if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
to                451 arch/alpha/kernel/io.c 			*(u16 *)to = __raw_readw(from);
to                453 arch/alpha/kernel/io.c 			to += 2;
to                460 arch/alpha/kernel/io.c 		*(u8 *) to = __raw_readb(from);
to                462 arch/alpha/kernel/io.c 		to++;
to                475 arch/alpha/kernel/io.c void memcpy_toio(volatile void __iomem *to, const void *from, long count)
to                481 arch/alpha/kernel/io.c 	if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
to                484 arch/alpha/kernel/io.c 			__raw_writeq(*(const u64 *)from, to);
to                486 arch/alpha/kernel/io.c 			to += 8;
to                492 arch/alpha/kernel/io.c 	if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
to                495 arch/alpha/kernel/io.c 			__raw_writel(*(const u32 *)from, to);
to                497 arch/alpha/kernel/io.c 			to += 4;
to                503 arch/alpha/kernel/io.c 	if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
to                506 arch/alpha/kernel/io.c 			__raw_writew(*(const u16 *)from, to);
to                508 arch/alpha/kernel/io.c 			to += 2;
to                515 arch/alpha/kernel/io.c 		__raw_writeb(*(const u8 *) from, to);
to                517 arch/alpha/kernel/io.c 		to++;
to                529 arch/alpha/kernel/io.c void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
to                532 arch/alpha/kernel/io.c 	if (count > 0 && ((u64)to & 1)) {
to                533 arch/alpha/kernel/io.c 		__raw_writeb(c, to);
to                534 arch/alpha/kernel/io.c 		to++;
to                539 arch/alpha/kernel/io.c 	if (count >= 2 && ((u64)to & 2)) {
to                540 arch/alpha/kernel/io.c 		__raw_writew(c, to);
to                541 arch/alpha/kernel/io.c 		to += 2;
to                546 arch/alpha/kernel/io.c 	if (count >= 4 && ((u64)to & 4)) {
to                547 arch/alpha/kernel/io.c 		__raw_writel(c, to);
to                548 arch/alpha/kernel/io.c 		to += 4;
to                557 arch/alpha/kernel/io.c 			__raw_writeq(c, to);
to                558 arch/alpha/kernel/io.c 			to += 8;
to                566 arch/alpha/kernel/io.c 		__raw_writel(c, to);
to                567 arch/alpha/kernel/io.c 		to += 4;
to                573 arch/alpha/kernel/io.c 		__raw_writew(c, to);
to                574 arch/alpha/kernel/io.c 		to += 2;
to                580 arch/alpha/kernel/io.c 		__raw_writeb(c, to);
to               1104 arch/alpha/kernel/osf_sys.c 	struct timespec64 end_time, *to = NULL;
to               1107 arch/alpha/kernel/osf_sys.c 		to = &end_time;
to               1115 arch/alpha/kernel/osf_sys.c 		if (poll_select_set_timeout(to, tv.tv_sec, tv.tv_nsec))
to               1121 arch/alpha/kernel/osf_sys.c 	return core_sys_select(n, inp, outp, exp, to);
to                 51 arch/arc/include/asm/entry-arcv2.h 	; (A) Before jumping to Interrupt Vector, hardware micro-ops did following:
to                 52 arch/arc/include/asm/entry-arcv2.h 	;   1. SP auto-switched to kernel mode stack
to                 76 arch/arc/include/asm/entry-arcv2.h 	; (A) Before jumping to Exception Vector, hardware micro-ops did following:
to                 77 arch/arc/include/asm/entry-arcv2.h 	;   1. SP auto-switched to kernel mode stack
to                142 arch/arc/include/asm/entry-arcv2.h 	; Saving pt_regs->sp correctly requires some extra work due to the way
to                153 arch/arc/include/asm/entry-arcv2.h 	; ISA requires ADD.nz to have same dest and src reg operands
to                178 arch/arc/include/asm/entry-arcv2.h 	; Restore SP (into AUX_USER_SP) only if returning to U mode
to                222 arch/arc/include/asm/entry-arcv2.h 	; INPUT: Z flag set if returning to K mode
to                225 arch/arc/include/asm/entry-compact.h 	mov	lp_count, r9	;LD to lp_count is not allowed
to                309 arch/arc/include/asm/entry-compact.h 	mov	lp_count, r9	;LD to lp_count is not allowed
to                134 arch/arc/include/asm/entry.h 	mov	r12, sp		; save SP as ref to pt_regs
to                193 arch/arc/include/asm/entry.h 	; SP is back to start of pt_regs
to                 13 arch/arc/include/asm/page.h #define copy_page(to, from)		memcpy((to), (from), PAGE_SIZE)
to                 20 arch/arc/include/asm/page.h void copy_user_highpage(struct page *to, struct page *from,
to                 22 arch/arc/include/asm/page.h void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
to                 28 arch/arc/include/asm/tlb-mmu1.h ;    hence extra instruction to clean
to                 47 arch/arc/include/asm/tlb-mmu1.h ;  Inefficient due to two-register paradigm of this miss handler
to                168 arch/arc/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                179 arch/arc/include/asm/uaccess.h 	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
to                205 arch/arc/include/asm/uaccess.h 		  "=&r" (tmp), "+r" (to), "+r" (from)
to                246 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from),
to                270 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from),
to                291 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
to                311 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
to                329 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
to                385 arch/arc/include/asm/uaccess.h 		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
to                395 arch/arc/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                406 arch/arc/include/asm/uaccess.h 	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
to                431 arch/arc/include/asm/uaccess.h 		  "=&r" (tmp), "+r" (to), "+r" (from)
to                468 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from),
to                492 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from),
to                513 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
to                533 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
to                551 arch/arc/include/asm/uaccess.h 			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
to                607 arch/arc/include/asm/uaccess.h 		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
to                616 arch/arc/include/asm/uaccess.h static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
to                619 arch/arc/include/asm/uaccess.h 	unsigned char *d_char = to;
to                730 arch/arc/include/asm/uaccess.h extern unsigned long arc_clear_user_noinline(void __user *to,
to               1071 arch/arc/mm/cache.c void copy_user_highpage(struct page *to, struct page *from,
to               1075 arch/arc/mm/cache.c 	void *kto = kmap_atomic(to);
to               1104 arch/arc/mm/cache.c 	clear_bit(PG_dc_clean, &to->flags);
to               1121 arch/arc/mm/cache.c void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
to               1123 arch/arc/mm/cache.c 	clear_page(to);
to                 28 arch/arc/mm/extable.c unsigned long arc_clear_user_noinline(void __user *to,
to                 31 arch/arc/mm/extable.c 	return __arc_clear_user(to, n);
to                402 arch/arm/include/asm/assembler.h 	@ Slightly optimised to avoid incrementing the pointer twice
to                326 arch/arm/include/asm/io.h static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
to                330 arch/arm/include/asm/io.h 	mmiocpy(to, (const void __force *)from, count);
to                332 arch/arm/include/asm/io.h #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
to                334 arch/arm/include/asm/io.h static inline void memcpy_toio(volatile void __iomem *to, const void *from,
to                338 arch/arm/include/asm/io.h 	mmiocpy((void __force *)to, from, count);
to                340 arch/arm/include/asm/io.h #define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
to                 46 arch/arm/include/asm/kvm_mmu.h int create_hyp_mappings(void *from, void *to, pgprot_t prot);
to                186 arch/arm/include/asm/memory.h #define __pv_stub(from,to,instr,type)			\
to                192 arch/arm/include/asm/memory.h 	: "=r" (to)					\
to                 12 arch/arm/include/asm/page-nommu.h #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
to                 15 arch/arm/include/asm/page-nommu.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                112 arch/arm/include/asm/page.h 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
to                128 arch/arm/include/asm/page.h extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
to                136 arch/arm/include/asm/page.h #define copy_user_highpage(to,from,vaddr,vma)	\
to                137 arch/arm/include/asm/page.h 	__cpu_copy_user_highpage(to, from, vaddr, vma)
to                140 arch/arm/include/asm/page.h extern void copy_page(void *to, const void *from);
to                513 arch/arm/include/asm/uaccess.h arm_copy_from_user(void *to, const void __user *from, unsigned long n);
to                516 arch/arm/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                521 arch/arm/include/asm/uaccess.h 	n = arm_copy_from_user(to, from, n);
to                527 arch/arm/include/asm/uaccess.h arm_copy_to_user(void __user *to, const void *from, unsigned long n);
to                529 arch/arm/include/asm/uaccess.h __copy_to_user_std(void __user *to, const void *from, unsigned long n);
to                532 arch/arm/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                537 arch/arm/include/asm/uaccess.h 	n = arm_copy_to_user(to, from, n);
to                541 arch/arm/include/asm/uaccess.h 	return arm_copy_to_user(to, from, n);
to                561 arch/arm/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                563 arch/arm/include/asm/uaccess.h 	memcpy(to, (const void __force *)from, n);
to                567 arch/arm/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                569 arch/arm/include/asm/uaccess.h 	memcpy((void __force *)to, from, n);
to                577 arch/arm/include/asm/uaccess.h static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
to                579 arch/arm/include/asm/uaccess.h 	if (access_ok(to, n))
to                580 arch/arm/include/asm/uaccess.h 		n = __clear_user(to, n);
to                 11 arch/arm/include/asm/vfpmacros.h @ Macros to allow building with old toolkits (with no VFP support)
to                 45 arch/arm/kernel/io.c void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
to                 47 arch/arm/kernel/io.c 	unsigned char *t = to;
to                 61 arch/arm/kernel/io.c void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
to                 66 arch/arm/kernel/io.c 		writeb(*f, to);
to                 68 arch/arm/kernel/io.c 		to++;
to                 85 arch/arm/lib/uaccess_with_memcpy.c __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
to                 91 arch/arm/lib/uaccess_with_memcpy.c 		memcpy((void *)to, from, n);
to                105 arch/arm/lib/uaccess_with_memcpy.c 		while (!pin_page_for_write(to, &pte, &ptl)) {
to                108 arch/arm/lib/uaccess_with_memcpy.c 			if (__put_user(0, (char __user *)to))
to                114 arch/arm/lib/uaccess_with_memcpy.c 		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
to                119 arch/arm/lib/uaccess_with_memcpy.c 		memcpy((void *)to, from, tocopy);
to                121 arch/arm/lib/uaccess_with_memcpy.c 		to += tocopy;
to                138 arch/arm/lib/uaccess_with_memcpy.c arm_copy_to_user(void __user *to, const void *from, unsigned long n)
to                149 arch/arm/lib/uaccess_with_memcpy.c 		n = __copy_to_user_std(to, from, n);
to                152 arch/arm/lib/uaccess_with_memcpy.c 		n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
to                 44 arch/arm/mach-omap2/pm44xx.c 	const char *to;
to                182 arch/arm/mach-omap2/pm44xx.c 	{.from = "mpuss_clkdm", .to = "l3_emif_clkdm"},
to                183 arch/arm/mach-omap2/pm44xx.c 	{.from = "mpuss_clkdm", .to = "l3_1_clkdm"},
to                184 arch/arm/mach-omap2/pm44xx.c 	{.from = "mpuss_clkdm", .to = "l3_2_clkdm"},
to                185 arch/arm/mach-omap2/pm44xx.c 	{.from = "ducati_clkdm", .to = "l3_1_clkdm"},
to                186 arch/arm/mach-omap2/pm44xx.c 	{.from = "ducati_clkdm", .to = "l3_2_clkdm"},
to                191 arch/arm/mach-omap2/pm44xx.c 	{.from = "mpu_clkdm", .to = "emif_clkdm"},
to                202 arch/arm/mach-omap2/pm44xx.c 	struct clockdomain *from, *to;
to                209 arch/arm/mach-omap2/pm44xx.c 		to = clkdm_lookup(map->to);
to                210 arch/arm/mach-omap2/pm44xx.c 		if (!from || !to) {
to                212 arch/arm/mach-omap2/pm44xx.c 			       map->from, map->to);
to                215 arch/arm/mach-omap2/pm44xx.c 		ret = clkdm_add_wkdep(from, to);
to                218 arch/arm/mach-omap2/pm44xx.c 			       map->from, map->to, ret);
to                 43 arch/arm/mach-s3c24xx/simtec-usb.c usb_simtec_powercontrol(int port, int to)
to                 45 arch/arm/mach-s3c24xx/simtec-usb.c 	pr_debug("usb_simtec_powercontrol(%d,%d)\n", port, to);
to                 47 arch/arm/mach-s3c24xx/simtec-usb.c 	power_state[port] = to;
to                 70 arch/arm/mach-s3c64xx/mach-smartq.c static void smartq_usb_host_powercontrol(int port, int to)
to                 72 arch/arm/mach-s3c64xx/mach-smartq.c 	pr_debug("%s(%d, %d)\n", __func__, port, to);
to                 75 arch/arm/mach-s3c64xx/mach-smartq.c 		gpio_set_value(S3C64XX_GPL(0), to);
to                 76 arch/arm/mach-s3c64xx/mach-smartq.c 		gpio_set_value(S3C64XX_GPL(1), to);
to                 38 arch/arm/mm/copypage-fa.c void fa_copy_user_highpage(struct page *to, struct page *from,
to                 43 arch/arm/mm/copypage-fa.c 	kto = kmap_atomic(to);
to                 65 arch/arm/mm/copypage-feroceon.c void feroceon_copy_user_highpage(struct page *to, struct page *from,
to                 70 arch/arm/mm/copypage-feroceon.c 	kto = kmap_atomic(to);
to                 40 arch/arm/mm/copypage-v4mc.c static void mc_copy_user_page(void *from, void *to)
to                 59 arch/arm/mm/copypage-v4mc.c 	: "+&r" (from), "+&r" (to), "=&r" (tmp)
to                 64 arch/arm/mm/copypage-v4mc.c void v4_mc_copy_user_highpage(struct page *to, struct page *from,
to                 67 arch/arm/mm/copypage-v4mc.c 	void *kto = kmap_atomic(to);
to                 47 arch/arm/mm/copypage-v4wb.c void v4wb_copy_user_highpage(struct page *to, struct page *from,
to                 52 arch/arm/mm/copypage-v4wb.c 	kto = kmap_atomic(to);
to                 43 arch/arm/mm/copypage-v4wt.c void v4wt_copy_user_highpage(struct page *to, struct page *from,
to                 48 arch/arm/mm/copypage-v4wt.c 	kto = kmap_atomic(to);
to                 30 arch/arm/mm/copypage-v6.c static void v6_copy_user_highpage_nonaliasing(struct page *to,
to                 36 arch/arm/mm/copypage-v6.c 	kto = kmap_atomic(to);
to                 69 arch/arm/mm/copypage-v6.c static void v6_copy_user_highpage_aliasing(struct page *to,
to                 79 arch/arm/mm/copypage-v6.c 	discard_old_kernel_data(page_address(to));
to                 91 arch/arm/mm/copypage-v6.c 	set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
to                105 arch/arm/mm/copypage-v6.c 	unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
to                116 arch/arm/mm/copypage-v6.c 	set_top_pte(to, mk_pte(page, PAGE_KERNEL));
to                117 arch/arm/mm/copypage-v6.c 	clear_page((void *)to);
to                 63 arch/arm/mm/copypage-xsc3.c void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
to                 68 arch/arm/mm/copypage-xsc3.c 	kto = kmap_atomic(to);
to                 36 arch/arm/mm/copypage-xscale.c static void mc_copy_user_page(void *from, void *to)
to                 79 arch/arm/mm/copypage-xscale.c 	: "+&r" (from), "+&r" (to), "=&r" (tmp)
to                 84 arch/arm/mm/copypage-xscale.c void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
to                 87 arch/arm/mm/copypage-xscale.c 	void *kto = kmap_atomic(to);
to                 40 arch/arm/mm/flush.c 	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
to                 43 arch/arm/mm/flush.c 	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
to                 48 arch/arm/mm/flush.c 	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
to                 56 arch/arm/mm/flush.c 	unsigned long to;
to                 59 arch/arm/mm/flush.c 	to = va + offset;
to                 60 arch/arm/mm/flush.c 	flush_icache_range(to, to + len);
to                401 arch/arm/net/bpf_jit_32.c 	int to, from;
to                405 arch/arm/net/bpf_jit_32.c 	to = ctx->offsets[bpf_to];
to                408 arch/arm/net/bpf_jit_32.c 	return to - from - 1;
to                455 arch/arm/net/bpf_jit_32.c 	int to, from;
to                459 arch/arm/net/bpf_jit_32.c 	to = ctx->epilogue_offset;
to                462 arch/arm/net/bpf_jit_32.c 	return to - from - 2;
to                719 arch/arm/plat-samsung/devs.c 		struct s3c2410_nand_set *to;
to                722 arch/arm/plat-samsung/devs.c 		to = kmemdup(from, size, GFP_KERNEL);
to                723 arch/arm/plat-samsung/devs.c 		npd->sets = to;	/* set, even if we failed */
to                725 arch/arm/plat-samsung/devs.c 		if (!to) {
to                731 arch/arm/plat-samsung/devs.c 			ret = s3c_nand_copy_set(to);
to                737 arch/arm/plat-samsung/devs.c 			to++;
to                 95 arch/arm/plat-samsung/include/plat/gpio-cfg.h extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to);
to                138 arch/arm64/include/asm/assembler.h 	.macro		_asm_extable, from, to
to                167 arch/arm64/include/asm/fpsimdmacros.h .macro __for from:req, to:req
to                176 arch/arm64/include/asm/fpsimdmacros.h .macro _for var:req, from:req, to:req, insn:vararg
to                147 arch/arm64/include/asm/kvm_mmu.h int create_hyp_mappings(void *from, void *to, pgprot_t prot);
to                 11 arch/arm64/include/asm/numa.h int __node_distance(int from, int to);
to                 34 arch/arm64/include/asm/numa.h void __init numa_set_distance(int from, int to, int distance);
to                 19 arch/arm64/include/asm/page.h extern void __cpu_copy_user_page(void *to, const void *from,
to                 21 arch/arm64/include/asm/page.h extern void copy_page(void *to, const void *from);
to                 22 arch/arm64/include/asm/page.h extern void clear_page(void *to);
to                 25 arch/arm64/include/asm/page.h #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
to                 98 arch/arm64/include/asm/uaccess.h #define _ASM_EXTABLE(from, to)						\
to                101 arch/arm64/include/asm/uaccess.h 	"	.long		(" #from " - .), (" #to " - .)\n"	\
to                383 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
to                384 arch/arm64/include/asm/uaccess.h #define raw_copy_from_user(to, from, n)					\
to                388 arch/arm64/include/asm/uaccess.h 	__acfu_ret = __arch_copy_from_user((to),			\
to                394 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
to                395 arch/arm64/include/asm/uaccess.h #define raw_copy_to_user(to, from, n)					\
to                399 arch/arm64/include/asm/uaccess.h 	__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),	\
to                405 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
to                406 arch/arm64/include/asm/uaccess.h #define raw_copy_in_user(to, from, n)					\
to                410 arch/arm64/include/asm/uaccess.h 	__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),	\
to                419 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
to                420 arch/arm64/include/asm/uaccess.h static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
to                422 arch/arm64/include/asm/uaccess.h 	if (access_ok(to, n)) {
to                424 arch/arm64/include/asm/uaccess.h 		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
to                437 arch/arm64/include/asm/uaccess.h void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
to                438 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
to                 15 arch/arm64/kernel/io.c void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
to                 18 arch/arm64/kernel/io.c 		*(u8 *)to = __raw_readb(from);
to                 20 arch/arm64/kernel/io.c 		to++;
to                 25 arch/arm64/kernel/io.c 		*(u64 *)to = __raw_readq(from);
to                 27 arch/arm64/kernel/io.c 		to += 8;
to                 32 arch/arm64/kernel/io.c 		*(u8 *)to = __raw_readb(from);
to                 34 arch/arm64/kernel/io.c 		to++;
to                 43 arch/arm64/kernel/io.c void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
to                 45 arch/arm64/kernel/io.c 	while (count && !IS_ALIGNED((unsigned long)to, 8)) {
to                 46 arch/arm64/kernel/io.c 		__raw_writeb(*(u8 *)from, to);
to                 48 arch/arm64/kernel/io.c 		to++;
to                 53 arch/arm64/kernel/io.c 		__raw_writeq(*(u64 *)from, to);
to                 55 arch/arm64/kernel/io.c 		to += 8;
to                 60 arch/arm64/kernel/io.c 		__raw_writeb(*(u8 *)from, to);
to                 62 arch/arm64/kernel/io.c 		to++;
to                 22 arch/arm64/lib/uaccess_flushcache.c void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
to                 25 arch/arm64/lib/uaccess_flushcache.c 	memcpy_flushcache(to, page_address(page) + offset, len);
to                 28 arch/arm64/lib/uaccess_flushcache.c unsigned long __copy_user_flushcache(void *to, const void __user *from,
to                 34 arch/arm64/lib/uaccess_flushcache.c 	rc = __arch_copy_from_user(to, from, n);
to                 38 arch/arm64/lib/uaccess_flushcache.c 	__clean_dcache_area_pop(to, n - rc);
to                149 arch/arm64/mm/numa.c static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
to                151 arch/arm64/mm/numa.c 	return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
to                312 arch/arm64/mm/numa.c void __init numa_set_distance(int from, int to, int distance)
to                319 arch/arm64/mm/numa.c 	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
to                320 arch/arm64/mm/numa.c 			from < 0 || to < 0) {
to                322 arch/arm64/mm/numa.c 			    from, to, distance);
to                327 arch/arm64/mm/numa.c 	    (from == to && distance != LOCAL_DISTANCE)) {
to                329 arch/arm64/mm/numa.c 			     from, to, distance);
to                333 arch/arm64/mm/numa.c 	numa_distance[from * numa_distance_cnt + to] = distance;
to                339 arch/arm64/mm/numa.c int __node_distance(int from, int to)
to                341 arch/arm64/mm/numa.c 	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
to                342 arch/arm64/mm/numa.c 		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
to                343 arch/arm64/mm/numa.c 	return numa_distance[from * numa_distance_cnt + to];
to                147 arch/arm64/net/bpf_jit_comp.c 	int to = ctx->offset[bpf_to];
to                151 arch/arm64/net/bpf_jit_comp.c 	return to - from;
to                164 arch/arm64/net/bpf_jit_comp.c 	int to = ctx->epilogue_offset;
to                167 arch/arm64/net/bpf_jit_comp.c 	return to - from;
to                 14 arch/c6x/include/asm/string.h asmlinkage extern void *memcpy(void *to, const void *from, size_t n);
to                 17 arch/c6x/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                 25 arch/c6x/include/asm/uaccess.h 			*(u8 *)to = *(u8 __force *)from;
to                 32 arch/c6x/include/asm/uaccess.h 				      : "A"(to), "a"(from)
to                 40 arch/c6x/include/asm/uaccess.h 				      : "a"(to), "a"(from)
to                 48 arch/c6x/include/asm/uaccess.h 	memcpy(to, (const void __force *)from, n);
to                 53 arch/c6x/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                 61 arch/c6x/include/asm/uaccess.h 			*(u8 __force *)to = *(u8 *)from;
to                 68 arch/c6x/include/asm/uaccess.h 				      : "a"(to), "a"(from)
to                 76 arch/c6x/include/asm/uaccess.h 				      : "a"(to), "a"(from)
to                 84 arch/c6x/include/asm/uaccess.h 	memcpy((void __force *)to, from, n);
to                 22 arch/csky/abiv1/inc/abi/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
to                 25 arch/csky/abiv1/inc/abi/page.h 	copy_page(to, from);
to                 26 arch/csky/abiv1/inc/abi/page.h 	if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK))
to                 10 arch/csky/abiv2/inc/abi/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
to                 13 arch/csky/abiv2/inc/abi/page.h 	copy_page(to, from);
to                 45 arch/csky/include/asm/page.h extern void *memcpy(void *to, const void *from, size_t l);
to                 48 arch/csky/include/asm/page.h #define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
to                257 arch/csky/include/asm/uaccess.h #define ___copy_to_user(to, from, n)			\
to                313 arch/csky/include/asm/uaccess.h 	: "=r"(n), "=r"(to), "=r"(from), "=r"(w0),	\
to                315 arch/csky/include/asm/uaccess.h 	: "0"(n), "1"(to), "2"(from)			\
to                319 arch/csky/include/asm/uaccess.h #define ___copy_from_user(to, from, n)			\
to                380 arch/csky/include/asm/uaccess.h 	: "=r"(n), "=r"(to), "=r"(from), "=r"(nsave),	\
to                382 arch/csky/include/asm/uaccess.h 	: "0"(n), "1"(to), "2"(from)			\
to                386 arch/csky/include/asm/uaccess.h unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
to                387 arch/csky/include/asm/uaccess.h unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
to                389 arch/csky/include/asm/uaccess.h unsigned long clear_user(void *to, unsigned long n);
to                390 arch/csky/include/asm/uaccess.h unsigned long __clear_user(void __user *to, unsigned long n);
to                  7 arch/csky/lib/usercopy.c unsigned long raw_copy_from_user(void *to, const void *from,
to                 10 arch/csky/lib/usercopy.c 	___copy_from_user(to, from, n);
to                 15 arch/csky/lib/usercopy.c unsigned long raw_copy_to_user(void *to, const void *from,
to                 18 arch/csky/lib/usercopy.c 	___copy_to_user(to, from, n);
to                233 arch/csky/lib/usercopy.c clear_user(void __user *to, unsigned long n)
to                235 arch/csky/lib/usercopy.c 	if (access_ok(to, n))
to                236 arch/csky/lib/usercopy.c 		__do_clear_user(to, n);
to                253 arch/csky/lib/usercopy.c __clear_user(void __user *to, unsigned long n)
to                255 arch/csky/lib/usercopy.c 	__do_clear_user(to, n);
to                 22 arch/h8300/kernel/h8300_ksyms.c asmlinkage long strncpy_from_user(void *to, void *from, size_t n);
to                116 arch/hexagon/include/asm/page.h #define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
to                122 arch/hexagon/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                 51 arch/hexagon/include/asm/uaccess.h unsigned long raw_copy_from_user(void *to, const void __user *from,
to                 53 arch/hexagon/include/asm/uaccess.h unsigned long raw_copy_to_user(void __user *to, const void *from,
to                 62 arch/ia64/include/asm/numa.h #define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
to                 63 arch/ia64/include/asm/numa.h extern int __node_distance(int from, int to);
to                 64 arch/ia64/include/asm/numa.h #define node_distance(from,to) __node_distance(from, to)
to                 66 arch/ia64/include/asm/page.h extern void copy_page (void *to, void *from);
to                 78 arch/ia64/include/asm/page.h #define copy_user_page(to, from, vaddr, page)	\
to                 80 arch/ia64/include/asm/page.h 	copy_page((to), (from));		\
to                198 arch/ia64/include/asm/uaccess.h extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
to                202 arch/ia64/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long count)
to                204 arch/ia64/include/asm/uaccess.h 	return __copy_user(to, (__force void __user *) from, count);
to                208 arch/ia64/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long count)
to                210 arch/ia64/include/asm/uaccess.h 	return __copy_user((__force void __user *) to, from, count);
to                218 arch/ia64/include/asm/uaccess.h #define __clear_user(to, n)		__do_clear_user(to, n)
to                220 arch/ia64/include/asm/uaccess.h #define clear_user(to, n)					\
to                223 arch/ia64/include/asm/uaccess.h 	if (__access_ok(to, __cu_len))				\
to                224 arch/ia64/include/asm/uaccess.h 		__cu_len = __do_clear_user(to, __cu_len);	\
to                233 arch/ia64/include/asm/uaccess.h extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
to                235 arch/ia64/include/asm/uaccess.h #define strncpy_from_user(to, from, n)					\
to                240 arch/ia64/include/asm/uaccess.h 		__sfu_ret = __strncpy_from_user((to), __sfu_from, (n));	\
to                 67 arch/ia64/kernel/kprobes.c static void __kprobes set_brl_inst(void *from, void *to)
to                 69 arch/ia64/kernel/kprobes.c 	s64 rel = ((s64) to - (s64) from) >> 4;
to                 11 arch/ia64/lib/io.c void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
to                 13 arch/ia64/lib/io.c 	char *dst = to;
to                 26 arch/ia64/lib/io.c void memcpy_toio(volatile void __iomem *to, const void *from, long count)
to                 32 arch/ia64/lib/io.c 		writeb(*src++, to++);
to                 39 arch/ia64/mm/numa.c int __node_distance(int from, int to)
to                 41 arch/ia64/mm/numa.c 	return slit_distance(from, to);
to                 13 arch/m68k/fpsp040/fpsp.h |	These equates are used to access the exception frame, the fsave
to                 44 arch/m68k/fpsp040/fpsp.h |	Positive offsets from A6 refer to the exception frame.  Negative
to                 45 arch/m68k/fpsp040/fpsp.h |	offsets refer to the Local Variable area and the fsave area.
to                 58 arch/m68k/fpsp040/fpsp.h |	label to a routine that will process a real exception of the
to                 63 arch/m68k/fpsp040/fpsp.h |	because it needs to report an exception back to the user.  This
to                 71 arch/m68k/fpsp040/fpsp.h |	temporaries.  If a routine needs to change any
to                118 arch/m68k/fpsp040/fpsp.h |NEXT		equ	LV+192		;need to increase LOCAL_SIZE
to                135 arch/m68k/fpsp040/fpsp.h 	.set	WBTEMP_SGN,WBTEMP+2	| used to store sign
to                204 arch/m68k/fpsp040/fpsp.h 	.set	FPTEMP_SGN,FPTEMP+2	| used to store sign
to                211 arch/m68k/fpsp040/fpsp.h 	.set	ETEMP_SGN,ETEMP+2		| used to store sign
to                289 arch/m68k/fpsp040/fpsp.h 	.set	x_mode,0x00	|  round to extended
to                290 arch/m68k/fpsp040/fpsp.h 	.set	s_mode,0x40	|  round to single
to                291 arch/m68k/fpsp040/fpsp.h 	.set	d_mode,0x80	|  round to double
to                294 arch/m68k/fpsp040/fpsp.h 	.set	rz_mode,0x10	|  round to zero
to                295 arch/m68k/fpsp040/fpsp.h 	.set	rm_mode,0x20	|  round to minus infinity
to                296 arch/m68k/fpsp040/fpsp.h 	.set	rp_mode,0x30	|  round to plus infinity
to               8321 arch/m68k/ifpsp060/src/fplsp.S # if the input is exactly equal to one, then exit through ld_pzero.
to               8427 arch/m68k/ifpsp060/src/fpsp.S # if the input is exactly equal to one, then exit through ld_pzero.
to               11815 arch/m68k/ifpsp060/src/fpsp.S # if the rnd mode is anything but RZ, then we have to re-do the above
to               19789 arch/m68k/ifpsp060/src/fpsp.S #		      if the ea is -() or ()+, need to know # of bytes.	#
to               24765 arch/m68k/ifpsp060/src/fpsp.S # if it's a fmove out instruction, we don't have to fix a7
to                578 arch/m68k/ifpsp060/src/isp.S # if exception occurred in user mode, then we have to restore a7 in case it
to               8246 arch/m68k/ifpsp060/src/pfpsp.S # if the rnd mode is anything but RZ, then we have to re-do the above
to               14725 arch/m68k/ifpsp060/src/pfpsp.S # if it's a fmove out instruction, we don't have to fix a7
to                 14 arch/m68k/include/asm/page_mm.h static inline void copy_page(void *to, void *from)
to                 24 arch/m68k/include/asm/page_mm.h 		       : "=a" (to), "=a" (from), "=d" (tmp)
to                 25 arch/m68k/include/asm/page_mm.h 		       : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
to                 53 arch/m68k/include/asm/page_mm.h #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
to                 60 arch/m68k/include/asm/page_mm.h #define copy_user_page(to, from, vaddr, page)	\
to                 61 arch/m68k/include/asm/page_mm.h 	do {	copy_page(to, from);		\
to                 11 arch/m68k/include/asm/page_no.h #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
to                 14 arch/m68k/include/asm/page_no.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                182 arch/m68k/include/asm/uaccess_mm.h unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
to                183 arch/m68k/include/asm/uaccess_mm.h unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
to                190 arch/m68k/include/asm/uaccess_mm.h #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
to                225 arch/m68k/include/asm/uaccess_mm.h 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
to                228 arch/m68k/include/asm/uaccess_mm.h #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
to                229 arch/m68k/include/asm/uaccess_mm.h 	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
to                230 arch/m68k/include/asm/uaccess_mm.h #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
to                231 arch/m68k/include/asm/uaccess_mm.h 	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
to                235 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
to                241 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
to                244 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
to                247 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
to                250 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
to                253 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
to                256 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
to                259 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
to                262 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
to                265 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
to                268 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
to                271 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
to                275 arch/m68k/include/asm/uaccess_mm.h 		return __generic_copy_from_user(to, from, n);
to                281 arch/m68k/include/asm/uaccess_mm.h #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
to                312 arch/m68k/include/asm/uaccess_mm.h 		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
to                316 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
to                322 arch/m68k/include/asm/uaccess_mm.h 		__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
to                325 arch/m68k/include/asm/uaccess_mm.h 		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
to                328 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
to                331 arch/m68k/include/asm/uaccess_mm.h 		__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
to                334 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
to                337 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
to                340 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
to                343 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
to                346 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
to                349 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
to                352 arch/m68k/include/asm/uaccess_mm.h 		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
to                356 arch/m68k/include/asm/uaccess_mm.h 		return __generic_copy_to_user(to, from, n);
to                363 arch/m68k/include/asm/uaccess_mm.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                366 arch/m68k/include/asm/uaccess_mm.h 		return __constant_copy_from_user(to, from, n);
to                367 arch/m68k/include/asm/uaccess_mm.h 	return __generic_copy_from_user(to, from, n);
to                371 arch/m68k/include/asm/uaccess_mm.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                374 arch/m68k/include/asm/uaccess_mm.h 		return __constant_copy_to_user(to, from, n);
to                375 arch/m68k/include/asm/uaccess_mm.h 	return __generic_copy_to_user(to, from, n);
to                386 arch/m68k/include/asm/uaccess_mm.h unsigned long __clear_user(void __user *to, unsigned long n);
to                106 arch/m68k/include/asm/uaccess_no.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                108 arch/m68k/include/asm/uaccess_no.h 	memcpy(to, (__force const void *)from, n);
to                113 arch/m68k/include/asm/uaccess_no.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                115 arch/m68k/include/asm/uaccess_no.h 	memcpy((__force void *)to, from, n);
to                150 arch/m68k/include/asm/uaccess_no.h __clear_user(void *to, unsigned long n)
to                152 arch/m68k/include/asm/uaccess_no.h 	memset(to, 0, n);
to                156 arch/m68k/include/asm/uaccess_no.h #define	clear_user(to,n)	__clear_user(to,n)
to                 10 arch/m68k/lib/memcpy.c void *memcpy(void *to, const void *from, size_t n)
to                 12 arch/m68k/lib/memcpy.c 	void *xto = to;
to                 17 arch/m68k/lib/memcpy.c 	if ((long)to & 1) {
to                 18 arch/m68k/lib/memcpy.c 		char *cto = to;
to                 21 arch/m68k/lib/memcpy.c 		to = cto;
to                 27 arch/m68k/lib/memcpy.c 		char *cto = to;
to                 34 arch/m68k/lib/memcpy.c 	if (n > 2 && (long)to & 2) {
to                 35 arch/m68k/lib/memcpy.c 		short *sto = to;
to                 38 arch/m68k/lib/memcpy.c 		to = sto;
to                 44 arch/m68k/lib/memcpy.c 		long *lto = to;
to                 72 arch/m68k/lib/memcpy.c 		to = lto;
to                 76 arch/m68k/lib/memcpy.c 		short *sto = to;
to                 79 arch/m68k/lib/memcpy.c 		to = sto;
to                 83 arch/m68k/lib/memcpy.c 		char *cto = to;
to                 10 arch/m68k/lib/uaccess.c unsigned long __generic_copy_from_user(void *to, const void __user *from,
to                 49 arch/m68k/lib/uaccess.c 		: "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp)
to                 56 arch/m68k/lib/uaccess.c unsigned long __generic_copy_to_user(void __user *to, const void *from,
to                 93 arch/m68k/lib/uaccess.c 		: "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp)
to                104 arch/m68k/lib/uaccess.c unsigned long __clear_user(void __user *to, unsigned long n)
to                137 arch/m68k/lib/uaccess.c 		: "=d" (res), "+a" (to)
to                227 arch/m68k/math-emu/fp_decode.h | adjust stack for byte moves from/to stack
to                271 arch/m68k/math-emu/fp_decode.h 	lea	(-12,%a0),%a1		| setup to addr of 1st reg to move
to                 78 arch/microblaze/include/asm/page.h # define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
to                 94 arch/microblaze/include/asm/uaccess.h extern unsigned long __copy_tofrom_user(void __user *to,
to                 98 arch/microblaze/include/asm/uaccess.h static inline unsigned long __must_check __clear_user(void __user *to,
to                111 arch/microblaze/include/asm/uaccess.h 		: "=r"(n), "=r"(to)			\
to                112 arch/microblaze/include/asm/uaccess.h 		: "0"(n), "1"(to)
to                117 arch/microblaze/include/asm/uaccess.h static inline unsigned long __must_check clear_user(void __user *to,
to                121 arch/microblaze/include/asm/uaccess.h 	if (unlikely(!access_ok(to, n)))
to                124 arch/microblaze/include/asm/uaccess.h 	return __clear_user(to, n);
to                314 arch/microblaze/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                316 arch/microblaze/include/asm/uaccess.h 	return __copy_tofrom_user((__force void __user *)to, from, n);
to                320 arch/microblaze/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                322 arch/microblaze/include/asm/uaccess.h 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
to                330 arch/microblaze/include/asm/uaccess.h extern int __strncpy_user(char *to, const char __user *from, int len);
to                 49 arch/mips/cavium-octeon/flash_setup.c static void octeon_flash_map_copy_from(struct map_info *map, void *to,
to                 53 arch/mips/cavium-octeon/flash_setup.c 	inline_map_copy_from(map, to, from, len);
to                 57 arch/mips/cavium-octeon/flash_setup.c static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to,
to                 61 arch/mips/cavium-octeon/flash_setup.c 	inline_map_copy_to(map, to, from, len);
to                659 arch/mips/include/asm/io.h void __ioread64_copy(void *to, const void __iomem *from, size_t count);
to                 21 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# addresses, and need to have the appropriate memory region set
to                 41 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# counters interrupt to IRQ 6
to                 72 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Zero all of CVMSEG to make sure parity is correct
to                 91 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Jump the master to kernel_entry
to                 98 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# All cores other than the master need to wait here for SMP bootstrap
to                114 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# This is the variable where the next core to boot is stored
to                116 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Get the core id of the next to be booted
to                127 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Set the SP global variable to zero so the master knows we've started
to                135 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Jump to the normal Linux SMP entry point
to                141 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Someone tried to boot SMP with a non SMP kernel. All extra cores
to                 92 arch/mips/include/asm/mach-ip27/kernel-entry-init.h 	ld	t0, 0(t0)			# t0 points to kern_vars struct
to                 28 arch/mips/include/asm/mach-ip27/topology.h #define node_distance(from, to) (__node_distances[(from)][(to)])
to                 17 arch/mips/include/asm/mach-loongson64/topology.h #define node_distance(from, to)	(__node_distances[(from)][(to)])
to                 44 arch/mips/include/asm/mach-paravirt/kernel-entry-init.h 	beqz	sp, 3b			# Spin until told to proceed.
to                 19 arch/mips/include/asm/msa.h extern void read_msa_wr_b(unsigned idx, union fpureg *to);
to                 20 arch/mips/include/asm/msa.h extern void read_msa_wr_h(unsigned idx, union fpureg *to);
to                 21 arch/mips/include/asm/msa.h extern void read_msa_wr_w(unsigned idx, union fpureg *to);
to                 22 arch/mips/include/asm/msa.h extern void read_msa_wr_d(unsigned idx, union fpureg *to);
to                 33 arch/mips/include/asm/msa.h static inline void read_msa_wr(unsigned idx, union fpureg *to,
to                 38 arch/mips/include/asm/msa.h 		read_msa_wr_b(idx, to);
to                 42 arch/mips/include/asm/msa.h 		read_msa_wr_h(idx, to);
to                 46 arch/mips/include/asm/msa.h 		read_msa_wr_w(idx, to);
to                 50 arch/mips/include/asm/msa.h 		read_msa_wr_d(idx, to);
to                 91 arch/mips/include/asm/page.h extern void copy_page(void * to, void * from);
to                114 arch/mips/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from,
to                519 arch/mips/include/asm/uaccess.h #define __invoke_copy_from(func, to, from, n)				\
to                525 arch/mips/include/asm/uaccess.h 	__cu_to_r = (to);						\
to                542 arch/mips/include/asm/uaccess.h #define __invoke_copy_to(func, to, from, n)				\
to                548 arch/mips/include/asm/uaccess.h 	__cu_to_r = (to);						\
to                560 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_kernel(to, from, n)				\
to                561 arch/mips/include/asm/uaccess.h 	__invoke_copy_from(__copy_user, to, from, n)
to                563 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_kernel(to, from, n)				\
to                564 arch/mips/include/asm/uaccess.h 	__invoke_copy_to(__copy_user, to, from, n)
to                566 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_kernel(to, from, n)				\
to                567 arch/mips/include/asm/uaccess.h 	__invoke_copy_from(__copy_user, to, from, n)
to                570 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_user(to, from, n)				\
to                571 arch/mips/include/asm/uaccess.h 	__invoke_copy_from(__copy_user, to, from, n)
to                573 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_user(to, from, n)				\
to                574 arch/mips/include/asm/uaccess.h 	__invoke_copy_to(__copy_user, to, from, n)
to                576 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_user(to, from, n)				\
to                577 arch/mips/include/asm/uaccess.h 	__invoke_copy_from(__copy_user, to, from, n)
to                593 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_user(to, from, n)				\
to                594 arch/mips/include/asm/uaccess.h 	__invoke_copy_from(__copy_from_user_eva, to, from, n)
to                596 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_user(to, from, n)				\
to                597 arch/mips/include/asm/uaccess.h 	__invoke_copy_to(__copy_to_user_eva, to, from, n)
to                599 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_user(to, from, n)				\
to                600 arch/mips/include/asm/uaccess.h 	__invoke_copy_from(__copy_in_user_eva, to, from, n)
to                605 arch/mips/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                608 arch/mips/include/asm/uaccess.h 		return __invoke_copy_to_kernel(to, from, n);
to                610 arch/mips/include/asm/uaccess.h 		return __invoke_copy_to_user(to, from, n);
to                614 arch/mips/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                617 arch/mips/include/asm/uaccess.h 		return __invoke_copy_from_kernel(to, from, n);
to                619 arch/mips/include/asm/uaccess.h 		return __invoke_copy_from_user(to, from, n);
to                626 arch/mips/include/asm/uaccess.h raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
to                629 arch/mips/include/asm/uaccess.h 		return ___invoke_copy_in_kernel(to, from, n);
to                631 arch/mips/include/asm/uaccess.h 		return ___invoke_copy_in_user(to, from,	n);
to                 16 arch/mips/lib/iomap_copy.c void __ioread64_copy(void *to, const void __iomem *from, size_t count)
to                 19 arch/mips/lib/iomap_copy.c 	u64 *dst = to;
to                 26 arch/mips/lib/iomap_copy.c 	__ioread32_copy(to, from, count * 2);
to                171 arch/mips/mm/init.c void copy_user_highpage(struct page *to, struct page *from,
to                176 arch/mips/mm/init.c 	vto = kmap_atomic(to);
to                613 arch/mips/mm/page.c extern void copy_page_cpu(void *to, void *from);
to                651 arch/mips/mm/page.c void copy_page(void *to, void *from)
to                654 arch/mips/mm/page.c 	u64 to_phys = CPHYSADDR((unsigned long)to);
to                658 arch/mips/mm/page.c 	if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
to                660 arch/mips/mm/page.c 		return copy_page_cpu(to, from);
to                326 arch/mips/txx9/rbtx4939/setup.c static void rbtx4939_flash_copy_from(struct map_info *map, void *to,
to                340 arch/mips/txx9/rbtx4939/setup.c 			memcpy(to,
to                346 arch/mips/txx9/rbtx4939/setup.c 			to += curlen;
to                356 arch/mips/txx9/rbtx4939/setup.c 			memcpy(to, (void *)(from ^ 0x400000), curlen);
to                359 arch/mips/txx9/rbtx4939/setup.c 			to += curlen;
to                364 arch/mips/txx9/rbtx4939/setup.c 	memcpy(to, (void *)from, len);
to                 26 arch/nds32/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from,
to                 31 arch/nds32/include/asm/page.h 		    struct page *to);
to                 37 arch/nds32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
to                 41 arch/nds32/include/asm/page.h void copy_page(void *to, void *from);
to                266 arch/nds32/include/asm/uaccess.h extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
to                268 arch/nds32/include/asm/uaccess.h extern unsigned long __arch_copy_to_user(void __user * to, const void *from,
to                276 arch/nds32/include/asm/uaccess.h static inline unsigned long clear_user(void __user * to, unsigned long n)
to                278 arch/nds32/include/asm/uaccess.h 	if (access_ok(to, n))
to                279 arch/nds32/include/asm/uaccess.h 		n = __arch_clear_user(to, n);
to                283 arch/nds32/include/asm/uaccess.h static inline unsigned long __clear_user(void __user * to, unsigned long n)
to                285 arch/nds32/include/asm/uaccess.h 	return __arch_clear_user(to, n);
to                179 arch/nds32/mm/cacheflush.c 		    struct page *to)
to                197 arch/nds32/mm/cacheflush.c void copy_user_highpage(struct page *to, struct page *from,
to                201 arch/nds32/mm/cacheflush.c 	kto = ((unsigned long)page_address(to) & PAGE_MASK);
to                203 arch/nds32/mm/cacheflush.c 	pto = page_to_phys(to);
to                 49 arch/nios2/include/asm/page.h #define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
to                 55 arch/nios2/include/asm/page.h 				struct page *to);
to                 50 arch/nios2/include/asm/uaccess.h static inline unsigned long __must_check __clear_user(void __user *to,
to                 62 arch/nios2/include/asm/uaccess.h 		: "=r" (n), "=r" (to)
to                 63 arch/nios2/include/asm/uaccess.h 		: "0" (n), "1" (to)
to                 69 arch/nios2/include/asm/uaccess.h static inline unsigned long __must_check clear_user(void __user *to,
to                 72 arch/nios2/include/asm/uaccess.h 	if (!access_ok(to, n))
to                 74 arch/nios2/include/asm/uaccess.h 	return __clear_user(to, n);
to                 78 arch/nios2/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n);
to                 80 arch/nios2/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n);
to                234 arch/nios2/mm/cacheflush.c 		    struct page *to)
to                 40 arch/openrisc/include/asm/page.h #define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
to                 43 arch/openrisc/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
to                237 arch/openrisc/include/asm/uaccess.h __copy_tofrom_user(void *to, const void *from, unsigned long size);
to                239 arch/openrisc/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long size)
to                241 arch/openrisc/include/asm/uaccess.h 	return __copy_tofrom_user(to, (__force const void *)from, size);
to                244 arch/openrisc/include/asm/uaccess.h raw_copy_to_user(void *to, const void __user *from, unsigned long size)
to                246 arch/openrisc/include/asm/uaccess.h 	return __copy_tofrom_user((__force void *)to, from, size);
to                 45 arch/parisc/include/asm/alternative.h #define ALTERNATIVE(from, to, cond, replacement)\
to                 47 arch/parisc/include/asm/alternative.h 	.word (from - .), (to - from)/4	!	\
to                 26 arch/parisc/include/asm/page.h #define copy_page(to, from)	copy_page_asm((void *)(to), (void *)(from))
to                 31 arch/parisc/include/asm/page.h void copy_page_asm(void *to, void *from);
to                105 arch/powerpc/include/asm/book3s/32/kup.h static __always_inline void allow_user_access(void __user *to, const void __user *from,
to                114 arch/powerpc/include/asm/book3s/32/kup.h 	addr = (__force u32)to;
to                124 arch/powerpc/include/asm/book3s/32/kup.h static __always_inline void prevent_user_access(void __user *to, const void __user *from,
to                127 arch/powerpc/include/asm/book3s/32/kup.h 	u32 addr = (__force u32)to;
to                 80 arch/powerpc/include/asm/book3s/64/kup-radix.h static __always_inline void allow_user_access(void __user *to, const void __user *from,
to                 93 arch/powerpc/include/asm/book3s/64/kup-radix.h static inline void prevent_user_access(void __user *to, const void __user *from,
to                 48 arch/powerpc/include/asm/kup.h static inline void allow_user_access(void __user *to, const void __user *from,
to                 50 arch/powerpc/include/asm/kup.h static inline void prevent_user_access(void __user *to, const void __user *from,
to                 64 arch/powerpc/include/asm/kup.h static inline void allow_write_to_user(void __user *to, unsigned long size)
to                 66 arch/powerpc/include/asm/kup.h 	allow_user_access(to, NULL, size, KUAP_WRITE);
to                 69 arch/powerpc/include/asm/kup.h static inline void allow_read_write_user(void __user *to, const void __user *from,
to                 72 arch/powerpc/include/asm/kup.h 	allow_user_access(to, from, size, KUAP_READ_WRITE);
to                 80 arch/powerpc/include/asm/kup.h static inline void prevent_write_to_user(void __user *to, unsigned long size)
to                 82 arch/powerpc/include/asm/kup.h 	prevent_user_access(to, NULL, size, KUAP_WRITE);
to                 85 arch/powerpc/include/asm/kup.h static inline void prevent_read_write_user(void __user *to, const void __user *from,
to                 88 arch/powerpc/include/asm/kup.h 	prevent_user_access(to, from, size, KUAP_READ_WRITE);
to                181 arch/powerpc/include/asm/kvm_book3s.h 					gva_t eaddr, void *to, void *from,
to                184 arch/powerpc/include/asm/kvm_book3s.h 					void *to, unsigned long n);
to                 74 arch/powerpc/include/asm/kvm_fpu.h extern void kvm_cvt_fd(u32 *from, u64 *to);
to                 75 arch/powerpc/include/asm/kvm_fpu.h extern void kvm_cvt_df(u64 *from, u32 *to);
to                 36 arch/powerpc/include/asm/nohash/32/kup-8xx.h static inline void allow_user_access(void __user *to, const void __user *from,
to                 42 arch/powerpc/include/asm/nohash/32/kup-8xx.h static inline void prevent_user_access(void __user *to, const void __user *from,
to                380 arch/powerpc/include/asm/opal.h ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count);
to                322 arch/powerpc/include/asm/page.h extern void copy_user_page(void *to, void *from, unsigned long vaddr,
to                 59 arch/powerpc/include/asm/page_32.h extern void copy_page(void *to, void *from);
to                 80 arch/powerpc/include/asm/page_64.h extern void copy_page(void *to, void *from);
to                429 arch/powerpc/include/asm/processor.h extern void cvt_fd(float *from, double *to);
to                430 arch/powerpc/include/asm/processor.h extern void cvt_df(double *from, float *to);
to                 34 arch/powerpc/include/asm/string.h void *__memcpy(void *to, const void *from, __kernel_size_t n);
to                 35 arch/powerpc/include/asm/string.h void *__memmove(void *to, const void *from, __kernel_size_t n);
to                306 arch/powerpc/include/asm/uaccess.h extern unsigned long __copy_tofrom_user(void __user *to,
to                311 arch/powerpc/include/asm/uaccess.h raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
to                316 arch/powerpc/include/asm/uaccess.h 	allow_read_write_user(to, from, n);
to                317 arch/powerpc/include/asm/uaccess.h 	ret = __copy_tofrom_user(to, from, n);
to                318 arch/powerpc/include/asm/uaccess.h 	prevent_read_write_user(to, from, n);
to                323 arch/powerpc/include/asm/uaccess.h static inline unsigned long raw_copy_from_user(void *to,
to                333 arch/powerpc/include/asm/uaccess.h 			__get_user_size(*(u8 *)to, from, 1, ret);
to                337 arch/powerpc/include/asm/uaccess.h 			__get_user_size(*(u16 *)to, from, 2, ret);
to                341 arch/powerpc/include/asm/uaccess.h 			__get_user_size(*(u32 *)to, from, 4, ret);
to                345 arch/powerpc/include/asm/uaccess.h 			__get_user_size(*(u64 *)to, from, 8, ret);
to                354 arch/powerpc/include/asm/uaccess.h 	ret = __copy_tofrom_user((__force void __user *)to, from, n);
to                359 arch/powerpc/include/asm/uaccess.h static inline unsigned long raw_copy_to_user(void __user *to,
to                368 arch/powerpc/include/asm/uaccess.h 			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
to                371 arch/powerpc/include/asm/uaccess.h 			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
to                374 arch/powerpc/include/asm/uaccess.h 			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
to                377 arch/powerpc/include/asm/uaccess.h 			__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
to                384 arch/powerpc/include/asm/uaccess.h 	allow_write_to_user(to, n);
to                385 arch/powerpc/include/asm/uaccess.h 	ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
to                386 arch/powerpc/include/asm/uaccess.h 	prevent_write_to_user(to, n);
to                391 arch/powerpc/include/asm/uaccess.h copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
to                394 arch/powerpc/include/asm/uaccess.h 		if (access_ok(to, n)) {
to                395 arch/powerpc/include/asm/uaccess.h 			allow_write_to_user(to, n);
to                396 arch/powerpc/include/asm/uaccess.h 			n = memcpy_mcsafe((void *)to, from, n);
to                397 arch/powerpc/include/asm/uaccess.h 			prevent_write_to_user(to, n);
to                428 arch/powerpc/include/asm/uaccess.h extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
to                773 arch/powerpc/kernel/setup_64.c static int pcpu_cpu_distance(unsigned int from, unsigned int to)
to                775 arch/powerpc/kernel/setup_64.c 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
to                 24 arch/powerpc/kernel/signal.h extern unsigned long copy_fpr_to_user(void __user *to,
to                 26 arch/powerpc/kernel/signal.h extern unsigned long copy_ckfpr_to_user(void __user *to,
to                 35 arch/powerpc/kernel/signal.h extern unsigned long copy_vsx_to_user(void __user *to,
to                 37 arch/powerpc/kernel/signal.h extern unsigned long copy_ckvsx_to_user(void __user *to,
to                239 arch/powerpc/kernel/signal_32.c unsigned long copy_fpr_to_user(void __user *to,
to                249 arch/powerpc/kernel/signal_32.c 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
to                267 arch/powerpc/kernel/signal_32.c unsigned long copy_vsx_to_user(void __user *to,
to                276 arch/powerpc/kernel/signal_32.c 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
to                293 arch/powerpc/kernel/signal_32.c unsigned long copy_ckfpr_to_user(void __user *to,
to                303 arch/powerpc/kernel/signal_32.c 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
to                321 arch/powerpc/kernel/signal_32.c unsigned long copy_ckvsx_to_user(void __user *to,
to                330 arch/powerpc/kernel/signal_32.c 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
to                347 arch/powerpc/kernel/signal_32.c inline unsigned long copy_fpr_to_user(void __user *to,
to                350 arch/powerpc/kernel/signal_32.c 	return __copy_to_user(to, task->thread.fp_state.fpr,
to                362 arch/powerpc/kernel/signal_32.c inline unsigned long copy_ckfpr_to_user(void __user *to,
to                365 arch/powerpc/kernel/signal_32.c 	return __copy_to_user(to, task->thread.ckfp_state.fpr,
to                 31 arch/powerpc/kvm/book3s_64_mmu_radix.c 					      gva_t eaddr, void *to, void *from,
to                 36 arch/powerpc/kvm/book3s_64_mmu_radix.c 	bool is_load = !!to;
to                 41 arch/powerpc/kvm/book3s_64_mmu_radix.c 					  __pa(to), __pa(from), n);
to                 49 arch/powerpc/kvm/book3s_64_mmu_radix.c 		to = (void *) (eaddr | (quadrant << 62));
to                 66 arch/powerpc/kvm/book3s_64_mmu_radix.c 		ret = raw_copy_from_user(to, from, n);
to                 68 arch/powerpc/kvm/book3s_64_mmu_radix.c 		ret = raw_copy_to_user(to, from, n);
to                 85 arch/powerpc/kvm/book3s_64_mmu_radix.c 					  void *to, void *from, unsigned long n)
to                104 arch/powerpc/kvm/book3s_64_mmu_radix.c 	return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
to                107 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
to                112 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
to                114 arch/powerpc/kvm/book3s_64_mmu_radix.c 		memset(to + (n - ret), 0, ret);
to                801 arch/powerpc/kvm/book3s_hv.c static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
to                822 arch/powerpc/kvm/book3s_hv.c 	to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
to                825 arch/powerpc/kvm/book3s_hv.c 	if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
to                828 arch/powerpc/kvm/book3s_hv.c 	to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
to                831 arch/powerpc/kvm/book3s_hv.c 	to_addr |= (to & (PAGE_SIZE - 1));
to                838 arch/powerpc/kvm/book3s_hv.c 	mark_page_dirty(kvm, to >> PAGE_SHIFT);
to                 54 arch/powerpc/lib/pmem.c void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
to                 57 arch/powerpc/lib/pmem.c 	memcpy_flushcache(to, page_to_virt(page) + offset, len);
to                501 arch/powerpc/perf/core-book3s.c 				cpuhw->bhrb_entries[u_index].to = addr;
to                519 arch/powerpc/perf/core-book3s.c 				cpuhw->bhrb_entries[u_index].to =
to                161 arch/powerpc/platforms/powernv/opal-core.c 			     struct bin_attribute *bin_attr, char *to,
to                182 arch/powerpc/platforms/powernv/opal-core.c 		memcpy(to, oc_conf->opalcorebuf + tpos, tsz);
to                183 arch/powerpc/platforms/powernv/opal-core.c 		to += tsz;
to                198 arch/powerpc/platforms/powernv/opal-core.c 			memcpy(to, __va(addr), tsz);
to                199 arch/powerpc/platforms/powernv/opal-core.c 			to += tsz;
to                 32 arch/powerpc/platforms/powernv/opal-msglog.c ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count)
to                 58 arch/powerpc/platforms/powernv/opal-msglog.c 		ret = memory_read_from_buffer(to, count, &pos,
to                 65 arch/powerpc/platforms/powernv/opal-msglog.c 		to += first_read;
to                 79 arch/powerpc/platforms/powernv/opal-msglog.c 	ret = memory_read_from_buffer(to, count, &pos, conbuf, out_pos);
to                 89 arch/powerpc/platforms/powernv/opal-msglog.c ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
to                 91 arch/powerpc/platforms/powernv/opal-msglog.c 	return memcons_copy(opal_memcons, to, pos, count);
to                 95 arch/powerpc/platforms/powernv/opal-msglog.c 				struct bin_attribute *bin_attr, char *to,
to                 98 arch/powerpc/platforms/powernv/opal-msglog.c 	return opal_msglog_copy(to, pos, count);
to                 34 arch/powerpc/platforms/powernv/powernv.h ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
to                 35 arch/powerpc/platforms/powernv/ultravisor.c 			      struct bin_attribute *bin_attr, char *to,
to                 38 arch/powerpc/platforms/powernv/ultravisor.c 	return memcons_copy(uv_memcons, to, pos, count);
to               2305 arch/powerpc/xmon/ppc-opc.c #define OPTO(x,to) (OP (x) | ((((unsigned long)(to)) & 0x1f) << 21))
to               2749 arch/powerpc/xmon/ppc-opc.c #define XTO(op, xop, to) (X ((op), (xop)) | ((((unsigned long)(to)) & 0x1f) << 21))
to                 51 arch/riscv/include/asm/page.h #define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
to                370 arch/riscv/include/asm/uaccess.h extern unsigned long __must_check __asm_copy_to_user(void __user *to,
to                372 arch/riscv/include/asm/uaccess.h extern unsigned long __must_check __asm_copy_from_user(void *to,
to                376 arch/riscv/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                378 arch/riscv/include/asm/uaccess.h 	return __asm_copy_from_user(to, from, n);
to                382 arch/riscv/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                384 arch/riscv/include/asm/uaccess.h 	return __asm_copy_to_user(to, from, n);
to                396 arch/riscv/include/asm/uaccess.h unsigned long __must_check clear_user(void __user *to, unsigned long n)
to                399 arch/riscv/include/asm/uaccess.h 	return access_ok(to, n) ?
to                400 arch/riscv/include/asm/uaccess.h 		__clear_user(to, n) : n;
to                555 arch/riscv/net/bpf_jit_comp.c 	int from = ctx->offset[bpf_from] - 1, to = ctx->offset[bpf_to];
to                557 arch/riscv/net/bpf_jit_comp.c 	return (to - from) << 2;
to                562 arch/riscv/net/bpf_jit_comp.c 	int to = ctx->epilogue_offset, from = ctx->ninsns;
to                564 arch/riscv/net/bpf_jit_comp.c 	return (to - from) << 2;
to                144 arch/s390/hypfs/inode.c static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                154 arch/s390/hypfs/inode.c 	if (pos >= available || !iov_iter_count(to))
to                156 arch/s390/hypfs/inode.c 	count = copy_to_iter(data + pos, available - pos, to);
to                110 arch/s390/include/asm/gmap.h 		     unsigned long to, unsigned long len);
to                111 arch/s390/include/asm/gmap.h int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
to                116 arch/s390/include/asm/gmap.h void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
to                196 arch/s390/include/asm/idals.h idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
to                203 arch/s390/include/asm/idals.h 		left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
to                206 arch/s390/include/asm/idals.h 		to = (void __user *) to + IDA_BLOCK_SIZE;
to                209 arch/s390/include/asm/idals.h 	return copy_to_user(to, ib->data[i], count);
to                110 arch/s390/include/asm/nospec-insn.h 	# Be very careful when adding instructions to this macro!
to                 56 arch/s390/include/asm/page.h static inline void copy_page(void *to, void *from)
to                 58 arch/s390/include/asm/page.h 	register void *reg2 asm ("2") = to;
to                 69 arch/s390/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                 53 arch/s390/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n);
to                 56 arch/s390/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n);
to                 65 arch/s390/include/asm/uaccess.h #define __put_get_user_asm(to, from, size, spec)		\
to                 79 arch/s390/include/asm/uaccess.h 		: "=d" (__rc), "+Q" (*(to))			\
to                244 arch/s390/include/asm/uaccess.h raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
to                270 arch/s390/include/asm/uaccess.h unsigned long __must_check __clear_user(void __user *to, unsigned long size);
to                272 arch/s390/include/asm/uaccess.h static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
to                275 arch/s390/include/asm/uaccess.h 	return __clear_user(to, n);
to                368 arch/s390/include/asm/vx-insn.h 	VX_NUM	v3, \vto
to                378 arch/s390/include/asm/vx-insn.h 	VX_NUM	v3, \vto
to                288 arch/s390/kvm/trace-s390.h 	    TP_PROTO(__u8 isc, __u16 from, __u16 to),
to                289 arch/s390/kvm/trace-s390.h 	    TP_ARGS(isc, from, to),
to                294 arch/s390/kvm/trace-s390.h 		    __field(__u16, to)
to                300 arch/s390/kvm/trace-s390.h 		    __entry->to = to;
to                309 arch/s390/kvm/trace-s390.h 		      (__entry->to == KVM_S390_AIS_MODE_ALL) ?
to                311 arch/s390/kvm/trace-s390.h 		      (__entry->to == KVM_S390_AIS_MODE_SINGLE) ?
to                171 arch/s390/lib/uaccess.c unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                174 arch/s390/lib/uaccess.c 		return copy_from_user_mvcos(to, from, n);
to                175 arch/s390/lib/uaccess.c 	return copy_from_user_mvcp(to, from, n);
to                246 arch/s390/lib/uaccess.c unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                249 arch/s390/lib/uaccess.c 		return copy_to_user_mvcos(to, from, n);
to                250 arch/s390/lib/uaccess.c 	return copy_to_user_mvcs(to, from, n);
to                254 arch/s390/lib/uaccess.c static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
to                272 arch/s390/lib/uaccess.c 		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
to                277 arch/s390/lib/uaccess.c static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
to                305 arch/s390/lib/uaccess.c 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
to                311 arch/s390/lib/uaccess.c unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
to                314 arch/s390/lib/uaccess.c 		return copy_in_user_mvcos(to, from, n);
to                315 arch/s390/lib/uaccess.c 	return copy_in_user_mvc(to, from, n);
to                319 arch/s390/lib/uaccess.c static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
to                342 arch/s390/lib/uaccess.c 		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
to                347 arch/s390/lib/uaccess.c static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
to                379 arch/s390/lib/uaccess.c 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
to                385 arch/s390/lib/uaccess.c unsigned long __clear_user(void __user *to, unsigned long size)
to                388 arch/s390/lib/uaccess.c 			return clear_user_mvcos(to, size);
to                389 arch/s390/lib/uaccess.c 	return clear_user_xc(to, size);
to                396 arch/s390/mm/gmap.c int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
to                402 arch/s390/mm/gmap.c 	if ((to | len) & (PMD_SIZE - 1))
to                404 arch/s390/mm/gmap.c 	if (len == 0 || to + len < to)
to                410 arch/s390/mm/gmap.c 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
to                428 arch/s390/mm/gmap.c 		     unsigned long to, unsigned long len)
to                434 arch/s390/mm/gmap.c 	if ((from | to | len) & (PMD_SIZE - 1))
to                436 arch/s390/mm/gmap.c 	if (len == 0 || from + len < from || to + len < to ||
to                437 arch/s390/mm/gmap.c 	    from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
to                444 arch/s390/mm/gmap.c 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
to                447 arch/s390/mm/gmap.c 				      (to + off) >> PMD_SHIFT,
to                456 arch/s390/mm/gmap.c 	gmap_unmap_segment(gmap, to, len);
to                694 arch/s390/mm/gmap.c void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
to                700 arch/s390/mm/gmap.c 	for (gaddr = from; gaddr < to;
to                719 arch/s390/mm/gmap.c 		size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
to                224 arch/s390/pci/pci.c void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
to                226 arch/s390/pci/pci.c        zpci_memcpy_toio(to, from, count);
to                283 arch/sh/drivers/dma/dma-api.c 	     unsigned long to, size_t size, unsigned int mode)
to                289 arch/sh/drivers/dma/dma-api.c 	channel->dar	= to;
to                110 arch/sh/include/asm/dma.h 		    unsigned long to, size_t size, unsigned int mode);
to                112 arch/sh/include/asm/dma.h #define dma_write(chan, from, to, size)	\
to                113 arch/sh/include/asm/dma.h 	dma_xfer(chan, from, to, size, DMA_MODE_WRITE)
to                114 arch/sh/include/asm/dma.h #define dma_write_page(chan, from, to)	\
to                115 arch/sh/include/asm/dma.h 	dma_write(chan, from, to, PAGE_SIZE)
to                117 arch/sh/include/asm/dma.h #define dma_read(chan, from, to, size)	\
to                118 arch/sh/include/asm/dma.h 	dma_xfer(chan, from, to, size, DMA_MODE_READ)
to                119 arch/sh/include/asm/dma.h #define dma_read_page(chan, from, to)	\
to                120 arch/sh/include/asm/dma.h 	dma_read(chan, from, to, PAGE_SIZE)
to                 62 arch/sh/include/asm/page.h extern void copy_page(void *to, void *from);
to                 63 arch/sh/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)
to                 68 arch/sh/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from,
to                111 arch/sh/include/asm/uaccess.h __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
to                114 arch/sh/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                116 arch/sh/include/asm/uaccess.h 	return __copy_user(to, (__force void *)from, n);
to                120 arch/sh/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                122 arch/sh/include/asm/uaccess.h 	return __copy_user((__force void *)to, from, n);
to                153 arch/sh/include/asm/uaccess.h 	unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
to                 16 arch/sh/kernel/io.c void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
to                 24 arch/sh/kernel/io.c 	     (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
to                 50 arch/sh/kernel/io.c 			: "=&r" (to), "=&r" (count),
to                 53 arch/sh/kernel/io.c 			: "7"(from), "0" (to), "1" (count)
to                 58 arch/sh/kernel/io.c 	if ((((u32)to | (u32)from) & 0x3) == 0) {
to                 60 arch/sh/kernel/io.c 			*(u32 *)to = *(volatile u32 *)from;
to                 61 arch/sh/kernel/io.c 			to += 4;
to                 67 arch/sh/kernel/io.c 		*(u8 *)to = *(volatile u8 *)from;
to                 68 arch/sh/kernel/io.c 		to++;
to                 79 arch/sh/kernel/io.c void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
to                 81 arch/sh/kernel/io.c 	if ((((u32)to | (u32)from) & 0x3) == 0) {
to                 83 arch/sh/kernel/io.c 			*(volatile u32 *)to = *(u32 *)from;
to                 84 arch/sh/kernel/io.c 			to += 4;
to                 90 arch/sh/kernel/io.c 		*(volatile u8 *)to = *(u8 *)from;
to                 91 arch/sh/kernel/io.c 		to++;
to                134 arch/sh/kernel/traps_32.c 			if (ma->to(dstu, src, count))
to                145 arch/sh/kernel/traps_32.c 		if (ma->to(dstu, src, 4))
to                158 arch/sh/kernel/traps_32.c 		if (ma->to(dstu, src, count))
to                200 arch/sh/kernel/traps_32.c 			if (ma->to(dstu, src, 2))
to                 95 arch/sh/mm/cache.c void copy_user_highpage(struct page *to, struct page *from,
to                100 arch/sh/mm/cache.c 	vto = kmap_atomic(to);
to                 21 arch/sh/mm/nommu.c void copy_page(void *to, void *from)
to                 23 arch/sh/mm/nommu.c 	memcpy(to, from, PAGE_SIZE);
to                 26 arch/sh/mm/nommu.c __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
to                 28 arch/sh/mm/nommu.c 	memcpy(to, from, n);
to                 32 arch/sh/mm/nommu.c __kernel_size_t __clear_user(void *to, __kernel_size_t n)
to                 34 arch/sh/mm/nommu.c 	memset(to, 0, n);
to                 21 arch/sparc/include/asm/page_32.h #define copy_page(to,from) 	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
to                 26 arch/sparc/include/asm/page_32.h #define copy_user_page(to, from, vaddr, page)	\
to                 27 arch/sparc/include/asm/page_32.h 	do {	copy_page(to, from);		\
to                 50 arch/sparc/include/asm/page_64.h void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
to                 53 arch/sparc/include/asm/page_64.h void copy_user_highpage(struct page *to, struct page *from,
to                 56 arch/sparc/include/asm/page_64.h void copy_highpage(struct page *to, struct page *from);
to                235 arch/sparc/include/asm/uaccess_32.h unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
to                237 arch/sparc/include/asm/uaccess_32.h static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                239 arch/sparc/include/asm/uaccess_32.h 	return __copy_user(to, (__force void __user *) from, n);
to                242 arch/sparc/include/asm/uaccess_32.h static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                244 arch/sparc/include/asm/uaccess_32.h 	return __copy_user((__force void __user *) to, from, n);
to                178 arch/sparc/include/asm/uaccess_64.h unsigned long __must_check raw_copy_from_user(void *to,
to                182 arch/sparc/include/asm/uaccess_64.h unsigned long __must_check raw_copy_to_user(void __user *to,
to                188 arch/sparc/include/asm/uaccess_64.h unsigned long __must_check raw_copy_in_user(void __user *to,
to               1613 arch/sparc/kernel/smp_64.c static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
to               1615 arch/sparc/kernel/smp_64.c 	if (cpu_to_node(from) == cpu_to_node(to))
to               1374 arch/sparc/mm/init_64.c int __node_distance(int from, int to)
to               1376 arch/sparc/mm/init_64.c 	if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
to               1378 arch/sparc/mm/init_64.c 			from, to);
to               1379 arch/sparc/mm/init_64.c 		return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
to               1381 arch/sparc/mm/init_64.c 	return numa_latency[from][to];
to               3128 arch/sparc/mm/init_64.c void copy_user_highpage(struct page *to, struct page *from,
to               3134 arch/sparc/mm/init_64.c 	vto = kmap_atomic(to);
to               3135 arch/sparc/mm/init_64.c 	copy_user_page(vto, vfrom, vaddr, to);
to               3146 arch/sparc/mm/init_64.c 		pto = page_to_phys(to);
to               3163 arch/sparc/mm/init_64.c void copy_highpage(struct page *to, struct page *from)
to               3168 arch/sparc/mm/init_64.c 	vto = kmap_atomic(to);
to               3180 arch/sparc/mm/init_64.c 		pto = page_to_phys(to);
to                261 arch/sparc/net/bpf_jit_comp_64.c static void emit_reg_move(u32 from, u32 to, struct jit_ctx *ctx)
to                263 arch/sparc/net/bpf_jit_comp_64.c 	emit(OR | RS1(G0) | RS2(from) | RD(to), ctx);
to                145 arch/um/drivers/cow_user.c static int absolutize(char *to, int size, char *from)
to                165 arch/um/drivers/cow_user.c 		if (getcwd(to, size) == NULL) {
to                170 arch/um/drivers/cow_user.c 		remaining = size - strlen(to);
to                176 arch/um/drivers/cow_user.c 		strcat(to, slash);
to                184 arch/um/drivers/cow_user.c 		strcpy(to, save_cwd);
to                185 arch/um/drivers/cow_user.c 		strcat(to, "/");
to                186 arch/um/drivers/cow_user.c 		strcat(to, from);
to                658 arch/um/drivers/mconsole_kern.c 	struct task_struct *to = NULL;
to                678 arch/um/drivers/mconsole_kern.c 	to = find_task_by_pid_ns(pid_requested, &init_pid_ns);
to                679 arch/um/drivers/mconsole_kern.c 	if ((to == NULL) || (pid_requested == 0)) {
to                683 arch/um/drivers/mconsole_kern.c 	with_console(req, stack_proc, to);
to                151 arch/um/drivers/net_user.c int net_sendto(int fd, void *buf, int len, void *to, int sock_len)
to                155 arch/um/drivers/net_user.c 	CATCH_EINTR(n = sendto(fd, buf, len, 0, (struct sockaddr *) to,
to                 30 arch/um/include/asm/page.h #define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
to                 33 arch/um/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                 45 arch/um/include/asm/page.h #define pte_copy(to, from) ({ (to).pte = (from).pte; })
to                 72 arch/um/include/asm/page.h #define pte_copy(to, from) ((to).pte = (from).pte)
to                 24 arch/um/include/asm/uaccess.h extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n);
to                 25 arch/um/include/asm/uaccess.h extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n);
to                 54 arch/um/include/shared/kern_util.h extern int copy_from_user_proc(void *to, void *from, int size);
to                 44 arch/um/include/shared/net_user.h extern int net_sendto(int fd, void *buf, int len, void *to, int sock_len);
to                584 arch/um/kernel/irq.c 	struct thread_info *ti, *to;
to                591 arch/um/kernel/irq.c 	to = ti->real_thread;
to                592 arch/um/kernel/irq.c 	current->stack = to;
to                594 arch/um/kernel/irq.c 	*to = *ti;
to                 84 arch/um/kernel/process.c extern void arch_switch_to(struct task_struct *to);
to                 86 arch/um/kernel/process.c void *__switch_to(struct task_struct *from, struct task_struct *to)
to                 88 arch/um/kernel/process.c 	to->thread.prev_sched = from;
to                 89 arch/um/kernel/process.c 	set_current(to);
to                 91 arch/um/kernel/process.c 	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
to                291 arch/um/kernel/process.c int copy_to_user_proc(void __user *to, void *from, int size)
to                293 arch/um/kernel/process.c 	return copy_to_user(to, from, size);
to                296 arch/um/kernel/process.c int copy_from_user_proc(void *to, void __user *from, int size)
to                298 arch/um/kernel/process.c 	return copy_from_user(to, from, size);
to                135 arch/um/kernel/skas/uaccess.c 	unsigned long *to_ptr = arg, to = *to_ptr;
to                137 arch/um/kernel/skas/uaccess.c 	memcpy((void *) to, (void *) from, len);
to                142 arch/um/kernel/skas/uaccess.c unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                145 arch/um/kernel/skas/uaccess.c 		memcpy(to, (__force void*)from, n);
to                149 arch/um/kernel/skas/uaccess.c 	return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
to                153 arch/um/kernel/skas/uaccess.c static int copy_chunk_to_user(unsigned long to, int len, void *arg)
to                157 arch/um/kernel/skas/uaccess.c 	memcpy((void *) to, (void *) from, len);
to                162 arch/um/kernel/skas/uaccess.c unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                165 arch/um/kernel/skas/uaccess.c 		memcpy((__force void *) to, from, n);
to                169 arch/um/kernel/skas/uaccess.c 	return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
to                175 arch/um/kernel/skas/uaccess.c 	char **to_ptr = arg, *to = *to_ptr;
to                178 arch/um/kernel/skas/uaccess.c 	strncpy(to, (void *) from, len);
to                179 arch/um/kernel/skas/uaccess.c 	n = strnlen(to, len);
to                 23 arch/unicore32/include/asm/page.h extern void copy_page(void *to, const void *from);
to                 26 arch/unicore32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                 24 arch/unicore32/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n);
to                 26 arch/unicore32/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n);
to                 30 arch/unicore32/include/asm/uaccess.h __strncpy_from_user(char *to, const char __user *from, unsigned long count);
to                  5 arch/x86/boot/code16gcc.h # This file is added to the assembler via -Wa when compiling 16-bit C code.
to                  6 arch/x86/boot/code16gcc.h # This is done this way instead via asm() to make sure gcc does not reorder
to                586 arch/x86/events/intel/ds.c 		u64	to;
to                632 arch/x86/events/intel/ds.c 		    (kernel_ip(at->from) || kernel_ip(at->to)))
to                651 arch/x86/events/intel/ds.c 		    (kernel_ip(at->from) || kernel_ip(at->to)))
to                655 arch/x86/events/intel/ds.c 		data.addr	= at->to;
to               1203 arch/x86/events/intel/ds.c 	unsigned long old_to, to = cpuc->lbr_entries[0].to;
to               1218 arch/x86/events/intel/ds.c 	if (!cpuc->lbr_stack.nr || !from || !to)
to               1224 arch/x86/events/intel/ds.c 	if (kernel_ip(ip) != kernel_ip(to))
to               1231 arch/x86/events/intel/ds.c 	if ((ip - to) > PEBS_FIXUP_SIZE)
to               1237 arch/x86/events/intel/ds.c 	if (ip == to) {
to               1242 arch/x86/events/intel/ds.c 	size = ip - to;
to               1248 arch/x86/events/intel/ds.c 		bytes = copy_from_user_nmi(buf, (void __user *)to, size);
to               1254 arch/x86/events/intel/ds.c 		kaddr = (void *)to;
to               1260 arch/x86/events/intel/ds.c 		old_to = to;
to               1263 arch/x86/events/intel/ds.c 		is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
to               1276 arch/x86/events/intel/ds.c 		to += insn.length;
to               1279 arch/x86/events/intel/ds.c 	} while (to < ip);
to               1281 arch/x86/events/intel/ds.c 	if (to == ip) {
to                547 arch/x86/events/intel/lbr.c 				u32 to;
to                555 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
to                590 arch/x86/events/intel/lbr.c 		u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
to                596 arch/x86/events/intel/lbr.c 		to   = rdlbr_to(lbr_idx);
to                620 arch/x86/events/intel/lbr.c 			cycles = ((to >> 48) & LBR_INFO_CYCLES);
to                622 arch/x86/events/intel/lbr.c 			to = (u64)((((s64)to) << 16) >> 16);
to                649 arch/x86/events/intel/lbr.c 		cpuc->lbr_entries[out].to	 = to;
to                832 arch/x86/events/intel/lbr.c static int branch_type(unsigned long from, unsigned long to, int abort)
to                842 arch/x86/events/intel/lbr.c 	to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
to                849 arch/x86/events/intel/lbr.c 	if (from == 0 || to == 0)
to               1044 arch/x86/events/intel/lbr.c 	u64 from, to;
to               1057 arch/x86/events/intel/lbr.c 		to = cpuc->lbr_entries[i].to;
to               1059 arch/x86/events/intel/lbr.c 		type = branch_type(from, to, cpuc->lbr_entries[i].abort);
to               1105 arch/x86/events/intel/lbr.c 		e->to		= lbr->lbr[i].to;
to                122 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_HANDLE(from, to, handler)			\
to                126 arch/x86/include/asm/asm.h 	.long (to) - . ;					\
to                130 arch/x86/include/asm/asm.h # define _ASM_EXTABLE(from, to)					\
to                131 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
to                133 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_UA(from, to)				\
to                134 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
to                136 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_FAULT(from, to)				\
to                137 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
to                139 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_EX(from, to)				\
to                140 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
to                142 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_REFCOUNT(from, to)			\
to                143 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
to                153 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_HANDLE(from, to, handler)			\
to                157 arch/x86/include/asm/asm.h 	" .long (" #to ") - .\n"				\
to                161 arch/x86/include/asm/asm.h # define _ASM_EXTABLE(from, to)					\
to                162 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
to                164 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_UA(from, to)				\
to                165 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
to                167 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_FAULT(from, to)				\
to                168 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
to                170 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_EX(from, to)				\
to                171 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
to                173 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_REFCOUNT(from, to)			\
to                174 arch/x86/include/asm/asm.h 	_ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
to                234 arch/x86/include/asm/compat.h int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
to                 11 arch/x86/include/asm/mmx.h extern void *_mmx_memcpy(void *to, const void *from, size_t size);
to                 13 arch/x86/include/asm/mmx.h extern void mmx_copy_page(void *to, void *from);
to                 35 arch/x86/include/asm/numa.h extern void __init numa_set_distance(int from, int to, int distance);
to                 31 arch/x86/include/asm/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
to                 34 arch/x86/include/asm/page.h 	copy_page(to, from);
to                 30 arch/x86/include/asm/page_32.h static inline void copy_page(void *to, void *from)
to                 32 arch/x86/include/asm/page_32.h 	mmx_copy_page(to, from);
to                 42 arch/x86/include/asm/page_32.h static inline void copy_page(void *to, void *from)
to                 44 arch/x86/include/asm/page_32.h 	memcpy(to, from, PAGE_SIZE);
to                 57 arch/x86/include/asm/page_64.h void copy_page(void *to, void *from);
to                215 arch/x86/include/asm/perf_event.h 	u64 from, to, info;
to                 33 arch/x86/include/asm/string_32.h static __always_inline void *__memcpy(void *to, const void *from, size_t n)
to                 43 arch/x86/include/asm/string_32.h 		     : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
to                 45 arch/x86/include/asm/string_32.h 	return to;
to                 52 arch/x86/include/asm/string_32.h static __always_inline void *__constant_memcpy(void *to, const void *from,
to                 57 arch/x86/include/asm/string_32.h 		return to;
to                 61 arch/x86/include/asm/string_32.h 		*(char *)to = *(char *)from;
to                 62 arch/x86/include/asm/string_32.h 		return to;
to                 64 arch/x86/include/asm/string_32.h 		*(short *)to = *(short *)from;
to                 65 arch/x86/include/asm/string_32.h 		return to;
to                 67 arch/x86/include/asm/string_32.h 		*(int *)to = *(int *)from;
to                 68 arch/x86/include/asm/string_32.h 		return to;
to                 70 arch/x86/include/asm/string_32.h 		*(short *)to = *(short *)from;
to                 71 arch/x86/include/asm/string_32.h 		*((char *)to + 2) = *((char *)from + 2);
to                 72 arch/x86/include/asm/string_32.h 		return to;
to                 74 arch/x86/include/asm/string_32.h 		*(int *)to = *(int *)from;
to                 75 arch/x86/include/asm/string_32.h 		*((char *)to + 4) = *((char *)from + 4);
to                 76 arch/x86/include/asm/string_32.h 		return to;
to                 78 arch/x86/include/asm/string_32.h 		*(int *)to = *(int *)from;
to                 79 arch/x86/include/asm/string_32.h 		*((short *)to + 2) = *((short *)from + 2);
to                 80 arch/x86/include/asm/string_32.h 		return to;
to                 82 arch/x86/include/asm/string_32.h 		*(int *)to = *(int *)from;
to                 83 arch/x86/include/asm/string_32.h 		*((int *)to + 1) = *((int *)from + 1);
to                 84 arch/x86/include/asm/string_32.h 		return to;
to                 88 arch/x86/include/asm/string_32.h 	edi = (long)to;
to                123 arch/x86/include/asm/string_32.h 		return to;
to                129 arch/x86/include/asm/string_32.h 		return to;
to                135 arch/x86/include/asm/string_32.h 		return to;
to                141 arch/x86/include/asm/string_32.h 		return to;
to                157 arch/x86/include/asm/string_32.h static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
to                160 arch/x86/include/asm/string_32.h 		return __constant_memcpy(to, from, len);
to                161 arch/x86/include/asm/string_32.h 	return _mmx_memcpy(to, from, len);
to                164 arch/x86/include/asm/string_32.h static inline void *__memcpy3d(void *to, const void *from, size_t len)
to                167 arch/x86/include/asm/string_32.h 		return __memcpy(to, from, len);
to                168 arch/x86/include/asm/string_32.h 	return _mmx_memcpy(to, from, len);
to                 14 arch/x86/include/asm/string_64.h extern void *memcpy(void *to, const void *from, size_t len);
to                 15 arch/x86/include/asm/string_64.h extern void *__memcpy(void *to, const void *from, size_t len);
to                578 arch/x86/include/asm/uaccess.h copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
to                 13 arch/x86/include/asm/uaccess_32.h 		(void *to, const void *from, unsigned long n);
to                 15 arch/x86/include/asm/uaccess_32.h 		(void *to, const void __user *from, unsigned long n);
to                 18 arch/x86/include/asm/uaccess_32.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                 20 arch/x86/include/asm/uaccess_32.h 	return __copy_user_ll((__force void *)to, from, n);
to                 24 arch/x86/include/asm/uaccess_32.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                 33 arch/x86/include/asm/uaccess_32.h 			__get_user_asm_nozero(*(u8 *)to, from, ret,
to                 40 arch/x86/include/asm/uaccess_32.h 			__get_user_asm_nozero(*(u16 *)to, from, ret,
to                 47 arch/x86/include/asm/uaccess_32.h 			__get_user_asm_nozero(*(u32 *)to, from, ret,
to                 53 arch/x86/include/asm/uaccess_32.h 	return __copy_user_ll(to, (__force const void *)from, n);
to                 57 arch/x86/include/asm/uaccess_32.h __copy_from_user_inatomic_nocache(void *to, const void __user *from,
to                 60 arch/x86/include/asm/uaccess_32.h        return __copy_from_user_ll_nocache_nozero(to, from, n);
to                 21 arch/x86/include/asm/uaccess_64.h copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
to                 23 arch/x86/include/asm/uaccess_64.h copy_user_generic_string(void *to, const void *from, unsigned len);
to                 25 arch/x86/include/asm/uaccess_64.h copy_user_generic_unrolled(void *to, const void *from, unsigned len);
to                 28 arch/x86/include/asm/uaccess_64.h copy_user_generic(void *to, const void *from, unsigned len)
to                 42 arch/x86/include/asm/uaccess_64.h 			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
to                 44 arch/x86/include/asm/uaccess_64.h 			 "1" (to), "2" (from), "3" (len)
to                 50 arch/x86/include/asm/uaccess_64.h copy_to_user_mcsafe(void *to, const void *from, unsigned len)
to                 60 arch/x86/include/asm/uaccess_64.h 	ret = __memcpy_mcsafe(to, from, len);
to                192 arch/x86/include/asm/uaccess_64.h extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
to                211 arch/x86/include/asm/uaccess_64.h mcsafe_handle_tail(char *to, char *from, unsigned len);
to                 88 arch/x86/kernel/cpu/hypervisor.c 	const void **to = (const void **)target;
to                 92 arch/x86/kernel/cpu/hypervisor.c 			to[i] = from[i];
to               2182 arch/x86/kernel/cpu/resctrl/rdtgroup.c static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
to               2191 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			t->closid = to->closid;
to               2192 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			t->rmid = to->mon.rmid;
to                944 arch/x86/kernel/e820.c 		enum e820_type from = 0, to = 0;
to                950 arch/x86/kernel/e820.c 			to = simple_strtoull(p + 1, &p, 0);
to                953 arch/x86/kernel/e820.c 		if (from && to)
to                954 arch/x86/kernel/e820.c 			e820__range_update(start_at, mem_size, from, to);
to                955 arch/x86/kernel/e820.c 		else if (to)
to                956 arch/x86/kernel/e820.c 			e820__range_add(start_at, mem_size, to);
to                235 arch/x86/kernel/fpu/regset.c 	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
to                265 arch/x86/kernel/fpu/regset.c 		memcpy(&to[i], &from[i], sizeof(to[0]));
to                273 arch/x86/kernel/fpu/regset.c 	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
to                292 arch/x86/kernel/fpu/regset.c 		memcpy(&to[i], &from[i], sizeof(from[0]));
to                955 arch/x86/kernel/fpu/xstate.c static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
to                957 arch/x86/kernel/fpu/xstate.c 	if (*pos < to) {
to                958 arch/x86/kernel/fpu/xstate.c 		unsigned size = to - *pos;
to                 91 arch/x86/kernel/kprobes/common.h extern void synthesize_reljump(void *dest, void *from, void *to);
to                 92 arch/x86/kernel/kprobes/common.h extern void synthesize_relcall(void *dest, void *from, void *to);
to                107 arch/x86/kernel/kprobes/core.c __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
to                115 arch/x86/kernel/kprobes/core.c 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
to                120 arch/x86/kernel/kprobes/core.c void synthesize_reljump(void *dest, void *from, void *to)
to                122 arch/x86/kernel/kprobes/core.c 	__synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
to                127 arch/x86/kernel/kprobes/core.c void synthesize_relcall(void *dest, void *from, void *to)
to                129 arch/x86/kernel/kprobes/core.c 	__synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
to                141 arch/x86/kernel/setup_percpu.c static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
to                144 arch/x86/kernel/setup_percpu.c 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
to               1629 arch/x86/kvm/vmx/vmx.c static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
to               1633 arch/x86/kvm/vmx/vmx.c 	tmp = vmx->guest_msrs[to];
to               1634 arch/x86/kvm/vmx/vmx.c 	vmx->guest_msrs[to] = vmx->guest_msrs[from];
to                  5 arch/x86/lib/iomem.c #define movs(type,to,from) \
to                  6 arch/x86/lib/iomem.c 	asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
to                  9 arch/x86/lib/iomem.c static __always_inline void rep_movs(void *to, const void *from, size_t n)
to                 21 arch/x86/lib/iomem.c 		     : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
to                 25 arch/x86/lib/iomem.c void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
to                 32 arch/x86/lib/iomem.c 		movs("b", to, from);
to                 36 arch/x86/lib/iomem.c 		movs("w", to, from);
to                 39 arch/x86/lib/iomem.c 	rep_movs(to, (const void *)from, n);
to                 43 arch/x86/lib/iomem.c void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
to                 49 arch/x86/lib/iomem.c 	if (unlikely(1 & (unsigned long)to)) {
to                 50 arch/x86/lib/iomem.c 		movs("b", to, from);
to                 53 arch/x86/lib/iomem.c 	if (n > 1 && unlikely(2 & (unsigned long)to)) {
to                 54 arch/x86/lib/iomem.c 		movs("w", to, from);
to                 57 arch/x86/lib/iomem.c 	rep_movs((void *)to, (const void *) from, n);
to                  8 arch/x86/lib/memcpy_32.c __visible void *memcpy(void *to, const void *from, size_t n)
to                 11 arch/x86/lib/memcpy_32.c 	return __memcpy3d(to, from, n);
to                 13 arch/x86/lib/memcpy_32.c 	return __memcpy(to, from, n);
to                 29 arch/x86/lib/mmx_32.c void *_mmx_memcpy(void *to, const void *from, size_t len)
to                 35 arch/x86/lib/mmx_32.c 		return __memcpy(to, from, len);
to                 37 arch/x86/lib/mmx_32.c 	p = to;
to                 80 arch/x86/lib/mmx_32.c 			: : "r" (from), "r" (to) : "memory");
to                 83 arch/x86/lib/mmx_32.c 		to += 64;
to                104 arch/x86/lib/mmx_32.c 			: : "r" (from), "r" (to) : "memory");
to                107 arch/x86/lib/mmx_32.c 		to += 64;
to                112 arch/x86/lib/mmx_32.c 	__memcpy(to, from, len & 63);
to                159 arch/x86/lib/mmx_32.c static void fast_copy_page(void *to, void *from)
to                205 arch/x86/lib/mmx_32.c 		_ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
to                208 arch/x86/lib/mmx_32.c 		to += 64;
to                229 arch/x86/lib/mmx_32.c 			: : "r" (from), "r" (to) : "memory");
to                231 arch/x86/lib/mmx_32.c 		to += 64;
to                281 arch/x86/lib/mmx_32.c static void fast_copy_page(void *to, void *from)
to                324 arch/x86/lib/mmx_32.c 			: : "r" (from), "r" (to) : "memory");
to                327 arch/x86/lib/mmx_32.c 		to += 64;
to                359 arch/x86/lib/mmx_32.c static void slow_copy_page(void *to, void *from)
to                367 arch/x86/lib/mmx_32.c 		: "0" (1024), "1" ((long) to), "2" ((long) from)
to                371 arch/x86/lib/mmx_32.c void mmx_copy_page(void *to, void *from)
to                374 arch/x86/lib/mmx_32.c 		slow_copy_page(to, from);
to                376 arch/x86/lib/mmx_32.c 		fast_copy_page(to, from);
to                 17 arch/x86/lib/usercopy.c copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
to                 33 arch/x86/lib/usercopy.c 	ret = __copy_from_user_inatomic(to, from, n);
to                 67 arch/x86/lib/usercopy_32.c clear_user(void __user *to, unsigned long n)
to                 70 arch/x86/lib/usercopy_32.c 	if (access_ok(to, n))
to                 71 arch/x86/lib/usercopy_32.c 		__do_clear_user(to, n);
to                 88 arch/x86/lib/usercopy_32.c __clear_user(void __user *to, unsigned long n)
to                 90 arch/x86/lib/usercopy_32.c 	__do_clear_user(to, n);
to                 97 arch/x86/lib/usercopy_32.c __copy_user_intel(void __user *to, const void *from, unsigned long size)
to                195 arch/x86/lib/usercopy_32.c 		       :  "1"(to), "2"(from), "0"(size)
to                200 arch/x86/lib/usercopy_32.c static unsigned long __copy_user_intel_nocache(void *to,
to                283 arch/x86/lib/usercopy_32.c 	       :  "1"(to), "2"(from), "0"(size)
to                294 arch/x86/lib/usercopy_32.c unsigned long __copy_user_intel(void __user *to, const void *from,
to                299 arch/x86/lib/usercopy_32.c #define __copy_user(to, from, size)					\
to                328 arch/x86/lib/usercopy_32.c 		: "3"(size), "0"(size), "1"(to), "2"(from)		\
to                332 arch/x86/lib/usercopy_32.c unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
to                335 arch/x86/lib/usercopy_32.c 	if (movsl_is_ok(to, from, n))
to                336 arch/x86/lib/usercopy_32.c 		__copy_user(to, from, n);
to                338 arch/x86/lib/usercopy_32.c 		n = __copy_user_intel(to, from, n);
to                344 arch/x86/lib/usercopy_32.c unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
to                350 arch/x86/lib/usercopy_32.c 		n = __copy_user_intel_nocache(to, from, n);
to                352 arch/x86/lib/usercopy_32.c 		__copy_user(to, from, n);
to                354 arch/x86/lib/usercopy_32.c 	__copy_user(to, from, n);
to                 50 arch/x86/lib/usercopy_64.c unsigned long clear_user(void __user *to, unsigned long n)
to                 52 arch/x86/lib/usercopy_64.c 	if (access_ok(to, n))
to                 53 arch/x86/lib/usercopy_64.c 		return __clear_user(to, n);
to                 64 arch/x86/lib/usercopy_64.c mcsafe_handle_tail(char *to, char *from, unsigned len)
to                 66 arch/x86/lib/usercopy_64.c 	for (; len; --len, to++, from++) {
to                 71 arch/x86/lib/usercopy_64.c 		unsigned long rem = __memcpy_mcsafe(to, from, 1);
to                202 arch/x86/lib/usercopy_64.c void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
to                207 arch/x86/lib/usercopy_64.c 	memcpy_flushcache(to, from + offset, len);
to                110 arch/x86/math-emu/fpu_system.h #define FPU_copy_from_user(to, from, n)	\
to                111 arch/x86/math-emu/fpu_system.h 		do { if (copy_from_user(to, from, n)) FPU_abort; } while (0)
to                402 arch/x86/mm/numa.c void __init numa_set_distance(int from, int to, int distance)
to                407 arch/x86/mm/numa.c 	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
to                408 arch/x86/mm/numa.c 			from < 0 || to < 0) {
to                410 arch/x86/mm/numa.c 			     from, to, distance);
to                415 arch/x86/mm/numa.c 	    (from == to && distance != LOCAL_DISTANCE)) {
to                417 arch/x86/mm/numa.c 			     from, to, distance);
to                421 arch/x86/mm/numa.c 	numa_distance[from * numa_distance_cnt + to] = distance;
to                424 arch/x86/mm/numa.c int __node_distance(int from, int to)
to                426 arch/x86/mm/numa.c 	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
to                427 arch/x86/mm/numa.c 		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
to                428 arch/x86/mm/numa.c 	return numa_distance[from * numa_distance_cnt + to];
to                809 arch/x86/mm/pat.c 	u64 to = from + size;
to                815 arch/x86/mm/pat.c 	while (cursor < to) {
to                 13 arch/x86/realmode/rm/realmode.h #define LJMPW_RM(to)	.byte 0xea ; .word (to), real_mode_seg
to                 45 arch/x86/um/asm/processor_32.h                                     struct arch_thread *to)
to                 47 arch/x86/um/asm/processor_32.h         memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
to                 29 arch/x86/um/asm/processor_64.h                                     struct arch_thread *to)
to                 31 arch/x86/um/asm/processor_64.h 	to->fs = from->fs;
to                 12 arch/x86/um/ptrace_32.c extern int arch_switch_tls(struct task_struct *to);
to                 14 arch/x86/um/ptrace_32.c void arch_switch_to(struct task_struct *to)
to                 16 arch/x86/um/ptrace_32.c 	int err = arch_switch_tls(to);
to                 86 arch/x86/um/signal.c 	struct _fpreg __user *to;
to                101 arch/x86/um/signal.c 	to = &buf->_st[0];
to                103 arch/x86/um/signal.c 	for (i = 0; i < 8; i++, to++, from++) {
to                104 arch/x86/um/signal.c 		unsigned long __user *t = (unsigned long __user *)to;
to                109 arch/x86/um/signal.c 				__put_user(from->exponent, &to->exponent))
to                119 arch/x86/um/signal.c 	struct _fpxreg *to;
to                135 arch/x86/um/signal.c 	to = (struct _fpxreg *) &fxsave->st_space[0];
to                137 arch/x86/um/signal.c 	for (i = 0; i < 8; i++, to++, from++) {
to                138 arch/x86/um/signal.c 		unsigned long *t = (unsigned long *)to;
to                143 arch/x86/um/signal.c 		    __get_user(to->exponent, &from->exponent))
to                236 arch/x86/um/signal.c static int copy_sc_to_user(struct sigcontext __user *to,
to                286 arch/x86/um/signal.c 	err = copy_to_user(to, &sc, sizeof(struct sigcontext));
to                 83 arch/x86/um/syscalls_64.c void arch_switch_to(struct task_struct *to)
to                 85 arch/x86/um/syscalls_64.c 	if ((to->thread.arch.fs == 0) || (to->mm == NULL))
to                 88 arch/x86/um/syscalls_64.c 	arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
to                 92 arch/x86/um/tls_32.c static int load_TLS(int flags, struct task_struct *to)
to                 99 arch/x86/um/tls_32.c 			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
to                187 arch/x86/um/tls_32.c int arch_switch_tls(struct task_struct *to)
to                197 arch/x86/um/tls_32.c 	if (likely(to->mm))
to                198 arch/x86/um/tls_32.c 		return load_TLS(O_FORCE, to);
to                681 arch/x86/xen/setup.c 	void *from, *to;
to                693 arch/x86/xen/setup.c 		to = early_memremap(dest - dest_off, dest_len + dest_off);
to                695 arch/x86/xen/setup.c 		memcpy(to, from, len);
to                696 arch/x86/xen/setup.c 		early_memunmap(to, dest_len + dest_off);
to                133 arch/xtensa/include/asm/page.h extern void copy_page(void *to, void *from);
to                142 arch/xtensa/include/asm/page.h extern void copy_page_alias(void *to, void *from,
to                148 arch/xtensa/include/asm/page.h void copy_user_highpage(struct page *to, struct page *from,
to                152 arch/xtensa/include/asm/page.h # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                248 arch/xtensa/include/asm/uaccess.h extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
to                251 arch/xtensa/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n)
to                253 arch/xtensa/include/asm/uaccess.h 	prefetchw(to);
to                254 arch/xtensa/include/asm/uaccess.h 	return __xtensa_copy_user(to, (__force const void *)from, n);
to                257 arch/xtensa/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                260 arch/xtensa/include/asm/uaccess.h 	return __xtensa_copy_user((__force void *)to, from, n);
to                 75 block/bfq-cgroup.c static inline void bfq_stat_add_aux(struct bfq_stat *to,
to                 79 block/bfq-cgroup.c 		     &to->aux_cnt);
to                369 block/bfq-cgroup.c static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
to                371 block/bfq-cgroup.c 	if (!to || !from)
to                376 block/bfq-cgroup.c 	blkg_rwstat_add_aux(&to->merged, &from->merged);
to                377 block/bfq-cgroup.c 	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
to                378 block/bfq-cgroup.c 	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
to                380 block/bfq-cgroup.c 	bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
to                381 block/bfq-cgroup.c 	bfq_stat_add_aux(&to->avg_queue_size_samples,
to                383 block/bfq-cgroup.c 	bfq_stat_add_aux(&to->dequeue, &from->dequeue);
to                384 block/bfq-cgroup.c 	bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
to                385 block/bfq-cgroup.c 	bfq_stat_add_aux(&to->idle_time, &from->idle_time);
to                386 block/bfq-cgroup.c 	bfq_stat_add_aux(&to->empty_time, &from->empty_time);
to                 76 block/bounce.c static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
to                 80 block/bounce.c 	vto = kmap_atomic(to->bv_page);
to                 81 block/bounce.c 	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
to                 87 block/bounce.c #define bounce_copy_vec(to, vfrom)	\
to                 88 block/bounce.c 	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
to                132 block/bounce.c static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
to                144 block/bounce.c 	bio_for_each_segment(tovec, to, iter) {
to                291 block/bounce.c 	struct bio_vec *to, from;
to                321 block/bounce.c 	for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
to                322 block/bounce.c 		struct page *page = to->bv_page;
to                327 block/bounce.c 		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
to                328 block/bounce.c 		inc_zone_page_state(to->bv_page, NR_BOUNCE);
to                335 block/bounce.c 			vto = page_address(to->bv_page) + to->bv_offset;
to                336 block/bounce.c 			vfrom = kmap_atomic(page) + to->bv_offset;
to                337 block/bounce.c 			memcpy(vto, vfrom, to->bv_len);
to                859 drivers/atm/firestream.c 	int to;
to                900 drivers/atm/firestream.c 			for (to=33;to;to--, dev->channo++) {
to                913 drivers/atm/firestream.c 			if (!to) {
to               1660 drivers/atm/firestream.c 	int isr, to;
to               1700 drivers/atm/firestream.c 	to = 100;
to               1701 drivers/atm/firestream.c 	while (--to) {
to               1718 drivers/atm/firestream.c 	if (!to) {
to                599 drivers/atm/iphase.h #define TABLE_ADDRESS(db, dn, to) \
to                600 drivers/atm/iphase.h 	(((unsigned long)(db & 0x04)) << 16) | (dn << 5) | (to << 1)  
to                217 drivers/base/regmap/regmap-debugfs.c 				   unsigned int to, char __user *user_buf,
to                239 drivers/base/regmap/regmap-debugfs.c 	for (i = start_reg; i >= 0 && i <= to;
to                229 drivers/base/regmap/trace.h 		 unsigned int to),
to                231 drivers/base/regmap/trace.h 	TP_ARGS(map, from, to),
to                236 drivers/base/regmap/trace.h 		__field(	unsigned int,	to			)
to                242 drivers/base/regmap/trace.h 		__entry->to = to;
to                246 drivers/base/regmap/trace.h 		  (unsigned int)__entry->to)
to                647 drivers/block/ataflop.c static inline void copy_buffer(void *from, void *to)
to                649 drivers/block/ataflop.c 	ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
to                902 drivers/block/mtip32xx/mtip32xx.c 	unsigned long to;
to                907 drivers/block/mtip32xx/mtip32xx.c 	to = jiffies + msecs_to_jiffies(timeout);
to                923 drivers/block/mtip32xx/mtip32xx.c 	} while (time_before(jiffies, to));
to               1491 drivers/block/mtip32xx/mtip32xx.c 	unsigned int to;
to               1505 drivers/block/mtip32xx/mtip32xx.c 	mtip_set_timeout(port->dd, &fis, &to, 0);
to               1524 drivers/block/mtip32xx/mtip32xx.c 				 to) < 0) {
to               1564 drivers/block/mtip32xx/mtip32xx.c 	unsigned int to;
to               1595 drivers/block/mtip32xx/mtip32xx.c 	mtip_set_timeout(port->dd, &fis, &to, 0);
to               1618 drivers/block/mtip32xx/mtip32xx.c 				 to)
to               2633 drivers/block/mtip32xx/mtip32xx.c 	unsigned long slot, slot_start, slot_wrap, to;
to               2671 drivers/block/mtip32xx/mtip32xx.c 			to = jiffies + msecs_to_jiffies(5000);
to               2676 drivers/block/mtip32xx/mtip32xx.c 				time_before(jiffies, to));
to               3386 drivers/block/mtip32xx/mtip32xx.c 			unsigned long to = dd->port->ic_pause_timer +
to               3388 drivers/block/mtip32xx/mtip32xx.c 			if (time_after(jiffies, to)) {
to               4189 drivers/block/mtip32xx/mtip32xx.c 	unsigned long flags, to;
to               4202 drivers/block/mtip32xx/mtip32xx.c 	to = jiffies + msecs_to_jiffies(4000);
to               4206 drivers/block/mtip32xx/mtip32xx.c 		time_before(jiffies, to));
to                673 drivers/block/nbd.c 	struct iov_iter to;
to                677 drivers/block/nbd.c 	iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
to                678 drivers/block/nbd.c 	result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
to                738 drivers/block/nbd.c 			iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
to                739 drivers/block/nbd.c 			result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
to                290 drivers/block/paride/pg.c 	int j, r, e, s, p, to;
to                303 drivers/block/paride/pg.c 	to = time_after_eq(jiffies, tmo);
to                305 drivers/block/paride/pg.c 	if ((r & (STAT_ERR & stop)) || to) {
to                311 drivers/block/paride/pg.c 			       dev->name, msg, s, e, p, to ? " timeout" : "");
to                312 drivers/block/paride/pg.c 		if (to)
to                475 drivers/block/ps3vram.c static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
to                481 drivers/block/ps3vram.c 	if (to >= priv->size)
to                484 drivers/block/ps3vram.c 	if (len > priv->size - to)
to                485 drivers/block/ps3vram.c 		len = priv->size - to;
to                493 drivers/block/ps3vram.c 		offset = (unsigned int) (to & (priv->cache.page_size - 1));
to                496 drivers/block/ps3vram.c 		entry = ps3vram_cache_match(dev, to);
to                500 drivers/block/ps3vram.c 			"avail=%08x count=%08x\n", __func__, (unsigned int)to,
to                511 drivers/block/ps3vram.c 		to += avail;
to                467 drivers/char/applicom.c 		void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
to                471 drivers/char/applicom.c 			writeb(*(from++), to++);
to                489 drivers/char/applicom.c 	unsigned char *to = (unsigned char *)mailbox;
to                502 drivers/char/applicom.c 			*(to++) = readb(from++);
to                201 drivers/char/ipmi/ipmi_devintf.c 			void __user *to)
to                268 drivers/char/ipmi/ipmi_devintf.c 	rv = copyout(rsp, to);
to                287 drivers/char/ipmi/ipmi_devintf.c static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
to                289 drivers/char/ipmi/ipmi_devintf.c 	return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
to                700 drivers/char/ipmi/ipmi_devintf.c static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
to                712 drivers/char/ipmi/ipmi_devintf.c 	return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
to                 71 drivers/char/mem.c 	u64 to = from + size;
to                 74 drivers/char/mem.c 	while (cursor < to) {
to                681 drivers/char/mem.c static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
to                125 drivers/clocksource/timer-atcpit100.c 	struct timer_of *to = to_timer_of(clkevt);
to                127 drivers/clocksource/timer-atcpit100.c 	val = readl(timer_of_base(to) + CH_EN);
to                128 drivers/clocksource/timer-atcpit100.c 	writel(val & ~CH0TMR0EN, timer_of_base(to) + CH_EN);
to                129 drivers/clocksource/timer-atcpit100.c 	writel(evt, timer_of_base(to) + CH0_REL);
to                130 drivers/clocksource/timer-atcpit100.c 	writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN);
to                137 drivers/clocksource/timer-atcpit100.c 	struct timer_of *to = to_timer_of(evt);
to                139 drivers/clocksource/timer-atcpit100.c 	atcpit100_clkevt_time_setup(timer_of_base(to), timer_of_period(to));
to                140 drivers/clocksource/timer-atcpit100.c 	atcpit100_clkevt_time_start(timer_of_base(to));
to                146 drivers/clocksource/timer-atcpit100.c 	struct timer_of *to = to_timer_of(evt);
to                148 drivers/clocksource/timer-atcpit100.c 	atcpit100_clkevt_time_stop(timer_of_base(to));
to                154 drivers/clocksource/timer-atcpit100.c 	struct timer_of *to = to_timer_of(evt);
to                157 drivers/clocksource/timer-atcpit100.c 	writel(~0x0, timer_of_base(to) + CH0_REL);
to                158 drivers/clocksource/timer-atcpit100.c 	val = readl(timer_of_base(to) + CH_EN);
to                159 drivers/clocksource/timer-atcpit100.c 	writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN);
to                167 drivers/clocksource/timer-atcpit100.c 	struct timer_of *to = to_timer_of(evt);
to                169 drivers/clocksource/timer-atcpit100.c 	atcpit100_timer_clear_interrupt(timer_of_base(to));
to                176 drivers/clocksource/timer-atcpit100.c static struct timer_of to = {
to                207 drivers/clocksource/timer-atcpit100.c 	return ~readl(timer_of_base(&to) + CH1_CNT);
to                227 drivers/clocksource/timer-atcpit100.c 	ret = timer_of_init(node, &to);
to                231 drivers/clocksource/timer-atcpit100.c 	base = timer_of_base(&to);
to                234 drivers/clocksource/timer-atcpit100.c 		timer_of_rate(&to));
to                237 drivers/clocksource/timer-atcpit100.c 		node->name, timer_of_rate(&to), 300, 32,
to                248 drivers/clocksource/timer-atcpit100.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
to                 76 drivers/clocksource/timer-gx6605s.c static struct timer_of to = {
to                 97 drivers/clocksource/timer-gx6605s.c 	base = timer_of_base(&to) + CLKSRC_OFFSET;
to                107 drivers/clocksource/timer-gx6605s.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 2,
to                122 drivers/clocksource/timer-gx6605s.c 	sched_clock_register(gx6605s_sched_clock_read, 32, timer_of_rate(&to));
to                125 drivers/clocksource/timer-gx6605s.c 			timer_of_rate(&to), 200, 32, clocksource_mmio_readl_up);
to                146 drivers/clocksource/timer-gx6605s.c 	ret = timer_of_init(np, &to);
to                150 drivers/clocksource/timer-gx6605s.c 	gx6605s_clkevt_init(timer_of_base(&to));
to                152 drivers/clocksource/timer-gx6605s.c 	return gx6605s_clksrc_init(timer_of_base(&to) + CLKSRC_OFFSET);
to                 56 drivers/clocksource/timer-mediatek.c #define SYST_CON_REG(to)        (timer_of_base(to) + SYST_CON)
to                 57 drivers/clocksource/timer-mediatek.c #define SYST_VAL_REG(to)        (timer_of_base(to) + SYST_VAL)
to                 75 drivers/clocksource/timer-mediatek.c static void mtk_syst_ack_irq(struct timer_of *to)
to                 78 drivers/clocksource/timer-mediatek.c 	writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
to                 84 drivers/clocksource/timer-mediatek.c 	struct timer_of *to = to_timer_of(clkevt);
to                 86 drivers/clocksource/timer-mediatek.c 	mtk_syst_ack_irq(to);
to                 95 drivers/clocksource/timer-mediatek.c 	struct timer_of *to = to_timer_of(clkevt);
to                 98 drivers/clocksource/timer-mediatek.c 	writel(SYST_CON_EN, SYST_CON_REG(to));
to                104 drivers/clocksource/timer-mediatek.c 	writel(ticks, SYST_VAL_REG(to));
to                107 drivers/clocksource/timer-mediatek.c 	writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
to                135 drivers/clocksource/timer-mediatek.c static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
to                139 drivers/clocksource/timer-mediatek.c 	val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
to                140 drivers/clocksource/timer-mediatek.c 	writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
to                144 drivers/clocksource/timer-mediatek.c static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
to                147 drivers/clocksource/timer-mediatek.c 	writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
to                150 drivers/clocksource/timer-mediatek.c static void mtk_gpt_clkevt_time_start(struct timer_of *to,
to                156 drivers/clocksource/timer-mediatek.c 	writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
to                158 drivers/clocksource/timer-mediatek.c 	val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
to                169 drivers/clocksource/timer-mediatek.c 	       timer_of_base(to) + GPT_CTRL_REG(timer));
to                181 drivers/clocksource/timer-mediatek.c 	struct timer_of *to = to_timer_of(clk);
to                183 drivers/clocksource/timer-mediatek.c 	mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
to                184 drivers/clocksource/timer-mediatek.c 	mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
to                185 drivers/clocksource/timer-mediatek.c 	mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
to                193 drivers/clocksource/timer-mediatek.c 	struct timer_of *to = to_timer_of(clk);
to                195 drivers/clocksource/timer-mediatek.c 	mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
to                196 drivers/clocksource/timer-mediatek.c 	mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
to                197 drivers/clocksource/timer-mediatek.c 	mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
to                205 drivers/clocksource/timer-mediatek.c 	struct timer_of *to = to_timer_of(clkevt);
to                208 drivers/clocksource/timer-mediatek.c 	writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
to                215 drivers/clocksource/timer-mediatek.c __init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
to                218 drivers/clocksource/timer-mediatek.c 	       timer_of_base(to) + GPT_CTRL_REG(timer));
to                221 drivers/clocksource/timer-mediatek.c 	       timer_of_base(to) + GPT_CLK_REG(timer));
to                223 drivers/clocksource/timer-mediatek.c 	writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
to                226 drivers/clocksource/timer-mediatek.c 	       timer_of_base(to) + GPT_CTRL_REG(timer));
to                229 drivers/clocksource/timer-mediatek.c static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
to                234 drivers/clocksource/timer-mediatek.c 	writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
to                237 drivers/clocksource/timer-mediatek.c 	writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
to                239 drivers/clocksource/timer-mediatek.c 	val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
to                241 drivers/clocksource/timer-mediatek.c 	       timer_of_base(to) + GPT_IRQ_EN_REG);
to                244 drivers/clocksource/timer-mediatek.c static struct timer_of to = {
to                262 drivers/clocksource/timer-mediatek.c 	to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
to                263 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
to                264 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
to                265 drivers/clocksource/timer-mediatek.c 	to.clkevt.tick_resume = mtk_syst_clkevt_resume;
to                266 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
to                267 drivers/clocksource/timer-mediatek.c 	to.of_irq.handler = mtk_syst_handler;
to                269 drivers/clocksource/timer-mediatek.c 	ret = timer_of_init(node, &to);
to                273 drivers/clocksource/timer-mediatek.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
to                283 drivers/clocksource/timer-mediatek.c 	to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
to                284 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
to                285 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
to                286 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
to                287 drivers/clocksource/timer-mediatek.c 	to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
to                288 drivers/clocksource/timer-mediatek.c 	to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
to                289 drivers/clocksource/timer-mediatek.c 	to.of_irq.handler = mtk_gpt_interrupt;
to                291 drivers/clocksource/timer-mediatek.c 	ret = timer_of_init(node, &to);
to                296 drivers/clocksource/timer-mediatek.c 	mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
to                297 drivers/clocksource/timer-mediatek.c 	clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
to                298 drivers/clocksource/timer-mediatek.c 			      node->name, timer_of_rate(&to), 300, 32,
to                300 drivers/clocksource/timer-mediatek.c 	gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
to                301 drivers/clocksource/timer-mediatek.c 	sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
to                304 drivers/clocksource/timer-mediatek.c 	mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
to                305 drivers/clocksource/timer-mediatek.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
to                308 drivers/clocksource/timer-mediatek.c 	mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
to                 52 drivers/clocksource/timer-milbeaut.c 	struct timer_of *to = to_timer_of(clk);
to                 55 drivers/clocksource/timer-milbeaut.c 	val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
to                 57 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
to                 64 drivers/clocksource/timer-milbeaut.c static void mlb_evt_timer_start(struct timer_of *to, bool periodic)
to                 71 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
to                 74 drivers/clocksource/timer-milbeaut.c static void mlb_evt_timer_stop(struct timer_of *to)
to                 76 drivers/clocksource/timer-milbeaut.c 	u32 val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
to                 79 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
to                 82 drivers/clocksource/timer-milbeaut.c static void mlb_evt_timer_register_count(struct timer_of *to, unsigned long cnt)
to                 84 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(cnt, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
to                 89 drivers/clocksource/timer-milbeaut.c 	struct timer_of *to = to_timer_of(clk);
to                 91 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_stop(to);
to                 92 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_register_count(to, to->of_clk.period);
to                 93 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_start(to, MLB_TIMER_PERIODIC);
to                 99 drivers/clocksource/timer-milbeaut.c 	struct timer_of *to = to_timer_of(clk);
to                101 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_stop(to);
to                102 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
to                108 drivers/clocksource/timer-milbeaut.c 	struct timer_of *to = to_timer_of(clk);
to                110 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_stop(to);
to                117 drivers/clocksource/timer-milbeaut.c 	struct timer_of *to = to_timer_of(clk);
to                119 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_stop(to);
to                120 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_register_count(to, event);
to                121 drivers/clocksource/timer-milbeaut.c 	mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
to                125 drivers/clocksource/timer-milbeaut.c static int mlb_config_clock_source(struct timer_of *to)
to                129 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
to                130 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
to                131 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
to                133 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
to                137 drivers/clocksource/timer-milbeaut.c static int mlb_config_clock_event(struct timer_of *to)
to                139 drivers/clocksource/timer-milbeaut.c 	writel_relaxed(0, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
to                143 drivers/clocksource/timer-milbeaut.c static struct timer_of to = {
to                165 drivers/clocksource/timer-milbeaut.c 	return ~readl_relaxed(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS);
to                173 drivers/clocksource/timer-milbeaut.c 	ret = timer_of_init(node, &to);
to                177 drivers/clocksource/timer-milbeaut.c 	rate = timer_of_rate(&to) / MLB_TMR_DIV_CNT;
to                178 drivers/clocksource/timer-milbeaut.c 	mlb_config_clock_source(&to);
to                179 drivers/clocksource/timer-milbeaut.c 	clocksource_mmio_init(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS,
to                183 drivers/clocksource/timer-milbeaut.c 	mlb_config_clock_event(&to);
to                184 drivers/clocksource/timer-milbeaut.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 15,
to                 64 drivers/clocksource/timer-mp-csky.c 	struct timer_of *to = this_cpu_ptr(&csky_to);
to                 68 drivers/clocksource/timer-mp-csky.c 	to->clkevt.event_handler(&to->clkevt);
to                 78 drivers/clocksource/timer-mp-csky.c 	struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
to                 80 drivers/clocksource/timer-mp-csky.c 	to->clkevt.cpumask = cpumask_of(cpu);
to                 84 drivers/clocksource/timer-mp-csky.c 	clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
to                121 drivers/clocksource/timer-mp-csky.c 	struct timer_of *to = NULL;
to                145 drivers/clocksource/timer-mp-csky.c 		to = per_cpu_ptr(&csky_to, cpu);
to                146 drivers/clocksource/timer-mp-csky.c 		ret = timer_of_init(np, to);
to                151 drivers/clocksource/timer-mp-csky.c 	clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
to                152 drivers/clocksource/timer-mp-csky.c 	sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
to                168 drivers/clocksource/timer-mp-csky.c 		to = per_cpu_ptr(&csky_to, cpu_rollback);
to                169 drivers/clocksource/timer-mp-csky.c 		timer_of_cleanup(to);
to                 58 drivers/clocksource/timer-npcm7xx.c 	struct timer_of *to = to_timer_of(evt);
to                 61 drivers/clocksource/timer-npcm7xx.c 	val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                 63 drivers/clocksource/timer-npcm7xx.c 	writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                 70 drivers/clocksource/timer-npcm7xx.c 	struct timer_of *to = to_timer_of(evt);
to                 73 drivers/clocksource/timer-npcm7xx.c 	val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                 75 drivers/clocksource/timer-npcm7xx.c 	writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                 82 drivers/clocksource/timer-npcm7xx.c 	struct timer_of *to = to_timer_of(evt);
to                 85 drivers/clocksource/timer-npcm7xx.c 	val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                 88 drivers/clocksource/timer-npcm7xx.c 	writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                 95 drivers/clocksource/timer-npcm7xx.c 	struct timer_of *to = to_timer_of(evt);
to                 98 drivers/clocksource/timer-npcm7xx.c 	writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
to                100 drivers/clocksource/timer-npcm7xx.c 	val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                103 drivers/clocksource/timer-npcm7xx.c 	writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                111 drivers/clocksource/timer-npcm7xx.c 	struct timer_of *to = to_timer_of(clk);
to                114 drivers/clocksource/timer-npcm7xx.c 	writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
to                115 drivers/clocksource/timer-npcm7xx.c 	val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                117 drivers/clocksource/timer-npcm7xx.c 	writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
to                125 drivers/clocksource/timer-npcm7xx.c 	struct timer_of *to = to_timer_of(evt);
to                127 drivers/clocksource/timer-npcm7xx.c 	writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
to                 24 drivers/clocksource/timer-of.c 	struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
to                 26 drivers/clocksource/timer-of.c 	struct clock_event_device *clkevt = &to->clkevt;
to                 54 drivers/clocksource/timer-of.c 	struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
to                 55 drivers/clocksource/timer-of.c 	struct clock_event_device *clkevt = &to->clkevt;
to                168 drivers/clocksource/timer-of.c int __init timer_of_init(struct device_node *np, struct timer_of *to)
to                173 drivers/clocksource/timer-of.c 	if (to->flags & TIMER_OF_BASE) {
to                174 drivers/clocksource/timer-of.c 		ret = timer_of_base_init(np, &to->of_base);
to                180 drivers/clocksource/timer-of.c 	if (to->flags & TIMER_OF_CLOCK) {
to                181 drivers/clocksource/timer-of.c 		ret = timer_of_clk_init(np, &to->of_clk);
to                187 drivers/clocksource/timer-of.c 	if (to->flags & TIMER_OF_IRQ) {
to                188 drivers/clocksource/timer-of.c 		ret = timer_of_irq_init(np, &to->of_irq);
to                194 drivers/clocksource/timer-of.c 	if (!to->clkevt.name)
to                195 drivers/clocksource/timer-of.c 		to->clkevt.name = np->full_name;
to                197 drivers/clocksource/timer-of.c 	to->np = np;
to                203 drivers/clocksource/timer-of.c 		timer_of_irq_exit(&to->of_irq);
to                206 drivers/clocksource/timer-of.c 		timer_of_clk_exit(&to->of_clk);
to                209 drivers/clocksource/timer-of.c 		timer_of_base_exit(&to->of_base);
to                220 drivers/clocksource/timer-of.c void __init timer_of_cleanup(struct timer_of *to)
to                222 drivers/clocksource/timer-of.c 	if (to->flags & TIMER_OF_IRQ)
to                223 drivers/clocksource/timer-of.c 		timer_of_irq_exit(&to->of_irq);
to                225 drivers/clocksource/timer-of.c 	if (to->flags & TIMER_OF_CLOCK)
to                226 drivers/clocksource/timer-of.c 		timer_of_clk_exit(&to->of_clk);
to                228 drivers/clocksource/timer-of.c 	if (to->flags & TIMER_OF_BASE)
to                229 drivers/clocksource/timer-of.c 		timer_of_base_exit(&to->of_base);
to                 49 drivers/clocksource/timer-of.h static inline void __iomem *timer_of_base(struct timer_of *to)
to                 51 drivers/clocksource/timer-of.h 	return to->of_base.base;
to                 54 drivers/clocksource/timer-of.h static inline int timer_of_irq(struct timer_of *to)
to                 56 drivers/clocksource/timer-of.h 	return to->of_irq.irq;
to                 59 drivers/clocksource/timer-of.h static inline unsigned long timer_of_rate(struct timer_of *to)
to                 61 drivers/clocksource/timer-of.h 	return to->of_clk.rate;
to                 64 drivers/clocksource/timer-of.h static inline unsigned long timer_of_period(struct timer_of *to)
to                 66 drivers/clocksource/timer-of.h 	return to->of_clk.period;
to                 70 drivers/clocksource/timer-of.h 				struct timer_of *to);
to                 72 drivers/clocksource/timer-of.h extern void __init timer_of_cleanup(struct timer_of *to);
to                 69 drivers/clocksource/timer-rda.c 	struct timer_of *to = to_timer_of(evt);
to                 71 drivers/clocksource/timer-rda.c 	rda_ostimer_stop(timer_of_base(to));
to                 78 drivers/clocksource/timer-rda.c 	struct timer_of *to = to_timer_of(evt);
to                 80 drivers/clocksource/timer-rda.c 	rda_ostimer_stop(timer_of_base(to));
to                 87 drivers/clocksource/timer-rda.c 	struct timer_of *to = to_timer_of(evt);
to                 90 drivers/clocksource/timer-rda.c 	rda_ostimer_stop(timer_of_base(to));
to                 94 drivers/clocksource/timer-rda.c 	rda_ostimer_start(timer_of_base(to), true, cycles_per_jiffy);
to                107 drivers/clocksource/timer-rda.c 	struct timer_of *to = to_timer_of(ev);
to                109 drivers/clocksource/timer-rda.c 	rda_ostimer_start(timer_of_base(to), false, evt);
to                117 drivers/clocksource/timer-rda.c 	struct timer_of *to = to_timer_of(evt);
to                121 drivers/clocksource/timer-rda.c 		       timer_of_base(to) + RDA_TIMER_IRQ_CLR);
to                 82 drivers/clocksource/timer-sprd.c 	struct timer_of *to = to_timer_of(ce);
to                 84 drivers/clocksource/timer-sprd.c 	sprd_timer_disable(timer_of_base(to));
to                 85 drivers/clocksource/timer-sprd.c 	sprd_timer_update_counter(timer_of_base(to), cycles);
to                 86 drivers/clocksource/timer-sprd.c 	sprd_timer_enable(timer_of_base(to), 0);
to                 93 drivers/clocksource/timer-sprd.c 	struct timer_of *to = to_timer_of(ce);
to                 95 drivers/clocksource/timer-sprd.c 	sprd_timer_disable(timer_of_base(to));
to                 96 drivers/clocksource/timer-sprd.c 	sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
to                 97 drivers/clocksource/timer-sprd.c 	sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
to                104 drivers/clocksource/timer-sprd.c 	struct timer_of *to = to_timer_of(ce);
to                106 drivers/clocksource/timer-sprd.c 	sprd_timer_disable(timer_of_base(to));
to                113 drivers/clocksource/timer-sprd.c 	struct timer_of *to = to_timer_of(ce);
to                115 drivers/clocksource/timer-sprd.c 	sprd_timer_clear_interrupt(timer_of_base(to));
to                118 drivers/clocksource/timer-sprd.c 		sprd_timer_disable(timer_of_base(to));
to                124 drivers/clocksource/timer-sprd.c static struct timer_of to = {
to                148 drivers/clocksource/timer-sprd.c 	ret = timer_of_init(np, &to);
to                152 drivers/clocksource/timer-sprd.c 	sprd_timer_enable_interrupt(timer_of_base(&to));
to                153 drivers/clocksource/timer-sprd.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
to                 62 drivers/clocksource/timer-stm32.c static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
to                 64 drivers/clocksource/timer-stm32.c 	struct stm32_timer_private *pd = to->private_data;
to                 78 drivers/clocksource/timer-stm32.c static int stm32_timer_of_bits_get(struct timer_of *to)
to                 80 drivers/clocksource/timer-stm32.c 	struct stm32_timer_private *pd = to->private_data;
to                 99 drivers/clocksource/timer-stm32.c static void stm32_clock_event_disable(struct timer_of *to)
to                101 drivers/clocksource/timer-stm32.c 	writel_relaxed(0, timer_of_base(to) + TIM_DIER);
to                112 drivers/clocksource/timer-stm32.c static void stm32_timer_start(struct timer_of *to)
to                114 drivers/clocksource/timer-stm32.c 	writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
to                119 drivers/clocksource/timer-stm32.c 	struct timer_of *to = to_timer_of(clkevt);
to                121 drivers/clocksource/timer-stm32.c 	stm32_clock_event_disable(to);
to                129 drivers/clocksource/timer-stm32.c 	struct timer_of *to = to_timer_of(clkevt);
to                132 drivers/clocksource/timer-stm32.c 	next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
to                133 drivers/clocksource/timer-stm32.c 	writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
to                134 drivers/clocksource/timer-stm32.c 	now = readl_relaxed(timer_of_base(to) + TIM_CNT);
to                139 drivers/clocksource/timer-stm32.c 	writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
to                146 drivers/clocksource/timer-stm32.c 	struct timer_of *to = to_timer_of(clkevt);
to                148 drivers/clocksource/timer-stm32.c 	stm32_timer_start(to);
to                150 drivers/clocksource/timer-stm32.c 	return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
to                155 drivers/clocksource/timer-stm32.c 	struct timer_of *to = to_timer_of(clkevt);
to                157 drivers/clocksource/timer-stm32.c 	stm32_timer_start(to);
to                165 drivers/clocksource/timer-stm32.c 	struct timer_of *to = to_timer_of(clkevt);
to                167 drivers/clocksource/timer-stm32.c 	writel_relaxed(0, timer_of_base(to) + TIM_SR);
to                188 drivers/clocksource/timer-stm32.c static void __init stm32_timer_set_width(struct timer_of *to)
to                192 drivers/clocksource/timer-stm32.c 	writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
to                194 drivers/clocksource/timer-stm32.c 	width = readl_relaxed(timer_of_base(to) + TIM_ARR);
to                196 drivers/clocksource/timer-stm32.c 	stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
to                207 drivers/clocksource/timer-stm32.c static void __init stm32_timer_set_prescaler(struct timer_of *to)
to                211 drivers/clocksource/timer-stm32.c 	if (stm32_timer_of_bits_get(to) != 32) {
to                212 drivers/clocksource/timer-stm32.c 		prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
to                222 drivers/clocksource/timer-stm32.c 	writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
to                223 drivers/clocksource/timer-stm32.c 	writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
to                224 drivers/clocksource/timer-stm32.c 	writel_relaxed(0, timer_of_base(to) + TIM_SR);
to                227 drivers/clocksource/timer-stm32.c 	to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
to                228 drivers/clocksource/timer-stm32.c 	to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
to                231 drivers/clocksource/timer-stm32.c static int __init stm32_clocksource_init(struct timer_of *to)
to                233 drivers/clocksource/timer-stm32.c         u32 bits = stm32_timer_of_bits_get(to);
to                234 drivers/clocksource/timer-stm32.c 	const char *name = to->np->full_name;
to                251 drivers/clocksource/timer-stm32.c 		stm32_timer_start(to);
to                253 drivers/clocksource/timer-stm32.c 		stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
to                254 drivers/clocksource/timer-stm32.c 		sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
to                258 drivers/clocksource/timer-stm32.c 		stm32_timer_delay.freq = timer_of_rate(to);
to                263 drivers/clocksource/timer-stm32.c 	return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
to                264 drivers/clocksource/timer-stm32.c 				     timer_of_rate(to), bits == 32 ? 250 : 100,
to                268 drivers/clocksource/timer-stm32.c static void __init stm32_clockevent_init(struct timer_of *to)
to                270 drivers/clocksource/timer-stm32.c 	u32 bits = stm32_timer_of_bits_get(to);
to                272 drivers/clocksource/timer-stm32.c 	to->clkevt.name = to->np->full_name;
to                273 drivers/clocksource/timer-stm32.c 	to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
to                274 drivers/clocksource/timer-stm32.c 	to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
to                275 drivers/clocksource/timer-stm32.c 	to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
to                276 drivers/clocksource/timer-stm32.c 	to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
to                277 drivers/clocksource/timer-stm32.c 	to->clkevt.tick_resume = stm32_clock_event_shutdown;
to                278 drivers/clocksource/timer-stm32.c 	to->clkevt.set_next_event = stm32_clock_event_set_next_event;
to                279 drivers/clocksource/timer-stm32.c 	to->clkevt.rating = bits == 32 ? 250 : 100;
to                281 drivers/clocksource/timer-stm32.c 	clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
to                285 drivers/clocksource/timer-stm32.c 		to->np, bits);
to                291 drivers/clocksource/timer-stm32.c 	struct timer_of *to;
to                294 drivers/clocksource/timer-stm32.c 	to = kzalloc(sizeof(*to), GFP_KERNEL);
to                295 drivers/clocksource/timer-stm32.c 	if (!to)
to                298 drivers/clocksource/timer-stm32.c 	to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
to                299 drivers/clocksource/timer-stm32.c 	to->of_irq.handler = stm32_clock_event_handler;
to                301 drivers/clocksource/timer-stm32.c 	ret = timer_of_init(node, to);
to                305 drivers/clocksource/timer-stm32.c 	to->private_data = kzalloc(sizeof(struct stm32_timer_private),
to                307 drivers/clocksource/timer-stm32.c 	if (!to->private_data) {
to                318 drivers/clocksource/timer-stm32.c 	stm32_timer_set_width(to);
to                320 drivers/clocksource/timer-stm32.c 	stm32_timer_set_prescaler(to);
to                322 drivers/clocksource/timer-stm32.c 	ret = stm32_clocksource_init(to);
to                326 drivers/clocksource/timer-stm32.c 	stm32_clockevent_init(to);
to                330 drivers/clocksource/timer-stm32.c 	timer_of_cleanup(to);
to                332 drivers/clocksource/timer-stm32.c 	kfree(to);
to                 87 drivers/clocksource/timer-sun4i.c 	struct timer_of *to = to_timer_of(evt);
to                 89 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_stop(timer_of_base(to), 0);
to                 96 drivers/clocksource/timer-sun4i.c 	struct timer_of *to = to_timer_of(evt);
to                 98 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_stop(timer_of_base(to), 0);
to                 99 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_start(timer_of_base(to), 0, false);
to                106 drivers/clocksource/timer-sun4i.c 	struct timer_of *to = to_timer_of(evt);
to                108 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_stop(timer_of_base(to), 0);
to                109 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_setup(timer_of_base(to), 0, timer_of_period(to));
to                110 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_start(timer_of_base(to), 0, true);
to                118 drivers/clocksource/timer-sun4i.c 	struct timer_of *to = to_timer_of(clkevt);
to                120 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_stop(timer_of_base(to), 0);
to                121 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_setup(timer_of_base(to), 0, evt - TIMER_SYNC_TICKS);
to                122 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_start(timer_of_base(to), 0, false);
to                135 drivers/clocksource/timer-sun4i.c 	struct timer_of *to = to_timer_of(evt);
to                137 drivers/clocksource/timer-sun4i.c 	sun4i_timer_clear_interrupt(timer_of_base(to));
to                143 drivers/clocksource/timer-sun4i.c static struct timer_of to = {
to                166 drivers/clocksource/timer-sun4i.c 	return ~readl(timer_of_base(&to) + TIMER_CNTVAL_REG(1));
to                174 drivers/clocksource/timer-sun4i.c 	ret = timer_of_init(node, &to);
to                178 drivers/clocksource/timer-sun4i.c 	writel(~0, timer_of_base(&to) + TIMER_INTVAL_REG(1));
to                181 drivers/clocksource/timer-sun4i.c 	       timer_of_base(&to) + TIMER_CTL_REG(1));
to                192 drivers/clocksource/timer-sun4i.c 				     timer_of_rate(&to));
to                194 drivers/clocksource/timer-sun4i.c 	ret = clocksource_mmio_init(timer_of_base(&to) + TIMER_CNTVAL_REG(1),
to                195 drivers/clocksource/timer-sun4i.c 				    node->name, timer_of_rate(&to), 350, 32,
to                203 drivers/clocksource/timer-sun4i.c 	       timer_of_base(&to) + TIMER_CTL_REG(0));
to                206 drivers/clocksource/timer-sun4i.c 	sun4i_clkevt_time_stop(timer_of_base(&to), 0);
to                209 drivers/clocksource/timer-sun4i.c 	sun4i_timer_clear_interrupt(timer_of_base(&to));
to                211 drivers/clocksource/timer-sun4i.c 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
to                215 drivers/clocksource/timer-sun4i.c 	val = readl(timer_of_base(&to) + TIMER_IRQ_EN_REG);
to                216 drivers/clocksource/timer-sun4i.c 	writel(val | TIMER_IRQ_EN(0), timer_of_base(&to) + TIMER_IRQ_EN_REG);
to                134 drivers/clocksource/timer-tegra.c 	struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
to                136 drivers/clocksource/timer-tegra.c 	writel_relaxed(0, timer_of_base(to) + TIMER_PTV);
to                137 drivers/clocksource/timer-tegra.c 	writel_relaxed(TIMER_PCR_INTR_CLR, timer_of_base(to) + TIMER_PCR);
to                139 drivers/clocksource/timer-tegra.c 	irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
to                140 drivers/clocksource/timer-tegra.c 	enable_irq(to->clkevt.irq);
to                150 drivers/clocksource/timer-tegra.c 	clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
to                159 drivers/clocksource/timer-tegra.c 	struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
to                161 drivers/clocksource/timer-tegra.c 	to->clkevt.set_state_shutdown(&to->clkevt);
to                162 drivers/clocksource/timer-tegra.c 	disable_irq_nosync(to->clkevt.irq);
to                238 drivers/clocksource/timer-tegra.c static inline unsigned long tegra_rate_for_timer(struct timer_of *to,
to                248 drivers/clocksource/timer-tegra.c 	return timer_of_rate(to);
to                254 drivers/clocksource/timer-tegra.c 	struct timer_of *to;
to                257 drivers/clocksource/timer-tegra.c 	to = this_cpu_ptr(&tegra_to);
to                258 drivers/clocksource/timer-tegra.c 	ret = timer_of_init(np, to);
to                262 drivers/clocksource/timer-tegra.c 	timer_reg_base = timer_of_base(to);
to                269 drivers/clocksource/timer-tegra.c 	switch (timer_of_rate(to)) {
to                304 drivers/clocksource/timer-tegra.c 		unsigned long rate = tegra_rate_for_timer(to, tegra20);
to                366 drivers/clocksource/timer-tegra.c 	to->of_base.base = timer_reg_base;
to                368 drivers/clocksource/timer-tegra.c 	timer_of_cleanup(to);
to                 80 drivers/cpufreq/maple-cpufreq.c 	int to;
to                 93 drivers/cpufreq/maple-cpufreq.c 	for (to = 0; to < 10; to++) {
to                150 drivers/cpufreq/pmac64-cpufreq.c 	int to;
to                167 drivers/cpufreq/pmac64-cpufreq.c 	for (to = 0; to < 10; to++) {
to                138 drivers/crypto/bcm/util.c 	struct scatterlist *to = *to_sg;
to                156 drivers/crypto/bcm/util.c 			sg_set_page(to++, sg_page(sg), frag_len, offset);
to                165 drivers/crypto/bcm/util.c 	*to_sg = to;
to                385 drivers/crypto/chelsio/chcr_algo.c 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
to                419 drivers/crypto/chelsio/chcr_algo.c 	walk->to->len[j % 8] = htons(size);
to                420 drivers/crypto/chelsio/chcr_algo.c 	walk->to->addr[j % 8] = cpu_to_be64(addr);
to                423 drivers/crypto/chelsio/chcr_algo.c 		walk->to++;
to                455 drivers/crypto/chelsio/chcr_algo.c 			walk->to->len[j % 8] = htons(ent_len);
to                456 drivers/crypto/chelsio/chcr_algo.c 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
to                462 drivers/crypto/chelsio/chcr_algo.c 				walk->to++;
to                209 drivers/crypto/chelsio/chcr_crypto.h 	struct phys_sge_pairs *to;
to                551 drivers/dma/ti/edma.c static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
to                553 drivers/dma/ti/edma.c 	if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
to                557 drivers/dma/ti/edma.c 	to = EDMA_CHAN_SLOT(to);
to                558 drivers/dma/ti/edma.c 	if (from >= ecc->num_slots || to >= ecc->num_slots)
to                562 drivers/dma/ti/edma.c 			  PARM_OFFSET(to));
to                340 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		uint64_t from = src_node_start, to = dst_node_start;
to                372 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 					&to);
to                375 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			to += dst_page_offset;
to                378 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		r = amdgpu_copy_buffer(ring, from, to, cur_size,
to                 33 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from)
to                 35 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->line_buf_depth           = from->line_buf_depth;
to                 36 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->bits_per_component       = from->bits_per_component;
to                 37 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->convert_rgb              = from->convert_rgb;
to                 38 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->slice_width              = from->slice_width;
to                 39 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->slice_height             = from->slice_height;
to                 40 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->simple_422               = from->simple_422;
to                 41 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->native_422               = from->native_422;
to                 42 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->native_420               = from->native_420;
to                 43 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->pic_width                = from->pic_width;
to                 44 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->pic_height               = from->pic_height;
to                 45 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->rc_tgt_offset_high       = from->rc_tgt_offset_high;
to                 46 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->rc_tgt_offset_low        = from->rc_tgt_offset_low;
to                 47 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->bits_per_pixel           = from->bits_per_pixel;
to                 48 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->rc_edge_factor           = from->rc_edge_factor;
to                 49 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->rc_quant_incr_limit1     = from->rc_quant_incr_limit1;
to                 50 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->rc_quant_incr_limit0     = from->rc_quant_incr_limit0;
to                 51 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->initial_xmit_delay       = from->initial_xmit_delay;
to                 52 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->initial_dec_delay        = from->initial_dec_delay;
to                 53 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->block_pred_enable        = from->block_pred_enable;
to                 54 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->first_line_bpg_offset    = from->first_line_bpg_offset;
to                 55 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->second_line_bpg_offset   = from->second_line_bpg_offset;
to                 56 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->initial_offset           = from->initial_offset;
to                 57 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	memcpy(&to->rc_buf_thresh, &from->rc_buf_thresh, sizeof(from->rc_buf_thresh));
to                 58 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	memcpy(&to->rc_range_params, &from->rc_range_params, sizeof(from->rc_range_params));
to                 59 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->rc_model_size            = from->rc_model_size;
to                 60 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->flatness_min_qp          = from->flatness_min_qp;
to                 61 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->flatness_max_qp          = from->flatness_max_qp;
to                 62 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->initial_scale_value      = from->initial_scale_value;
to                 63 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->scale_decrement_interval = from->scale_decrement_interval;
to                 64 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->scale_increment_interval = from->scale_increment_interval;
to                 65 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->nfl_bpg_offset           = from->nfl_bpg_offset;
to                 66 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->nsl_bpg_offset           = from->nsl_bpg_offset;
to                 67 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->slice_bpg_offset         = from->slice_bpg_offset;
to                 68 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->final_offset             = from->final_offset;
to                 69 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->vbr_enable               = from->vbr_enable;
to                 70 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->slice_chunk_size         = from->slice_chunk_size;
to                 71 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->second_line_offset_adj   = from->second_line_offset_adj;
to                 72 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c 	to->dsc_version_minor        = from->dsc_version_minor;
to               1349 drivers/gpu/drm/drm_bufs.c 	struct drm_buf_desc __user *to = &request->list[count];
to               1355 drivers/gpu/drm/drm_bufs.c 	if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
to                146 drivers/gpu/drm/drm_dp_aux_dev.c static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                155 drivers/gpu/drm/drm_dp_aux_dev.c 	iov_iter_truncate(to, AUX_MAX_OFFSET - pos);
to                157 drivers/gpu/drm/drm_dp_aux_dev.c 	while (iov_iter_count(to)) {
to                159 drivers/gpu/drm/drm_dp_aux_dev.c 		ssize_t todo = min(iov_iter_count(to), sizeof(buf));
to                175 drivers/gpu/drm/drm_dp_aux_dev.c 		if (copy_to_iter(buf, res, to) != res) {
to                376 drivers/gpu/drm/drm_ioc32.c 	drm_buf_desc32_t __user *to = compat_ptr(request->list);
to                382 drivers/gpu/drm/drm_ioc32.c 	if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
to                434 drivers/gpu/drm/drm_ioc32.c 	drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
to                441 drivers/gpu/drm/drm_ioc32.c 	if (copy_to_user(to, &v, sizeof(v)))
to                 73 drivers/gpu/drm/etnaviv/etnaviv_buffer.c 	u32 from, u32 to)
to                 78 drivers/gpu/drm/etnaviv/etnaviv_buffer.c 	OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
to                 81 drivers/gpu/drm/etnaviv/etnaviv_buffer.c static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
to                 85 drivers/gpu/drm/etnaviv/etnaviv_buffer.c 		       VIVS_GL_SEMAPHORE_TOKEN_TO(to));
to                112 drivers/gpu/drm/etnaviv/etnaviv_drv.h 	struct timespec64 ts, to;
to                114 drivers/gpu/drm/etnaviv/etnaviv_drv.h 	to = timespec_to_timespec64(*timeout);
to                119 drivers/gpu/drm/etnaviv/etnaviv_drv.h 	if (timespec64_compare(&to, &ts) <= 0)
to                122 drivers/gpu/drm/etnaviv/etnaviv_drv.h 	ts = timespec64_sub(to, ts);
to                500 drivers/gpu/drm/i915/gt/intel_timeline.c 			     struct i915_request *to,
to                507 drivers/gpu/drm/i915/gt/intel_timeline.c 	GEM_BUG_ON(to->timeline == tl);
to                512 drivers/gpu/drm/i915/gt/intel_timeline.c 		err = cacheline_ref(cl, to);
to                772 drivers/gpu/drm/i915/i915_gem_gtt.c 	       struct i915_page_dma * const to,
to                779 drivers/gpu/drm/i915/i915_gem_gtt.c 	pd->entry[idx] = to;
to                780 drivers/gpu/drm/i915/i915_gem_gtt.c 	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
to                783 drivers/gpu/drm/i915/i915_gem_gtt.c #define set_pd_entry(pd, idx, to) \
to                784 drivers/gpu/drm/i915/i915_gem_gtt.c 	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
to                832 drivers/gpu/drm/i915/i915_request.c emit_semaphore_wait(struct i915_request *to,
to                841 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
to                844 drivers/gpu/drm/i915/i915_request.c 	if (already_busywaiting(to) & from->engine->mask)
to                845 drivers/gpu/drm/i915/i915_request.c 		return i915_sw_fence_await_dma_fence(&to->submit,
to                849 drivers/gpu/drm/i915/i915_request.c 	err = i915_request_await_start(to, from);
to                854 drivers/gpu/drm/i915/i915_request.c 	err = __i915_request_await_execution(to, from, NULL, gfp);
to                859 drivers/gpu/drm/i915/i915_request.c 	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
to                863 drivers/gpu/drm/i915/i915_request.c 	cs = intel_ring_begin(to, 4);
to                883 drivers/gpu/drm/i915/i915_request.c 	intel_ring_advance(to, cs);
to                884 drivers/gpu/drm/i915/i915_request.c 	to->sched.semaphores |= from->engine->mask;
to                885 drivers/gpu/drm/i915/i915_request.c 	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
to                890 drivers/gpu/drm/i915/i915_request.c i915_request_await_request(struct i915_request *to, struct i915_request *from)
to                894 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(to == from);
to                895 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(to->timeline == from->timeline);
to                898 drivers/gpu/drm/i915/i915_request.c 		i915_sw_fence_set_error_once(&to->submit, from->fence.error);
to                902 drivers/gpu/drm/i915/i915_request.c 	if (to->engine->schedule) {
to                903 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
to                908 drivers/gpu/drm/i915/i915_request.c 	if (to->engine == from->engine) {
to                909 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
to                912 drivers/gpu/drm/i915/i915_request.c 	} else if (intel_engine_has_semaphores(to->engine) &&
to                913 drivers/gpu/drm/i915/i915_request.c 		   to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
to                914 drivers/gpu/drm/i915/i915_request.c 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
to                916 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sw_fence_await_dma_fence(&to->submit,
to                923 drivers/gpu/drm/i915/i915_request.c 	if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
to                924 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sw_fence_await_dma_fence(&to->semaphore,
to               1059 drivers/gpu/drm/i915/i915_request.c i915_request_await_object(struct i915_request *to,
to               1076 drivers/gpu/drm/i915/i915_request.c 			ret = i915_request_await_dma_fence(to, shared[i]);
to               1092 drivers/gpu/drm/i915/i915_request.c 			ret = i915_request_await_dma_fence(to, excl);
to                285 drivers/gpu/drm/i915/i915_request.h int i915_request_await_object(struct i915_request *to,
to                 72 drivers/gpu/drm/mediatek/mtk_hdmi_regs.h #define CH_SWITCH(from, to)		((from) << ((to) * 3))
to                751 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
to                783 drivers/gpu/drm/ttm/ttm_bo_util.c 		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
to                821 drivers/gpu/drm/ttm/ttm_bo_util.c 		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
to                444 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
to                450 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c 		vmw_binding_transfer(to, from, entry);
to                196 drivers/gpu/drm/vmwgfx/vmwgfx_binding.h vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
to                743 drivers/gpu/drm/xen/xen_drm_front.c 	int to = 100;
to                760 drivers/gpu/drm/xen/xen_drm_front.c 				     --to)
to                763 drivers/gpu/drm/xen/xen_drm_front.c 	if (!to) {
to                370 drivers/gpu/ipu-v3/ipu-cpmem.c 	int bpp = 0, npb = 0, ro, go, bo, to;
to                375 drivers/gpu/ipu-v3/ipu-cpmem.c 	to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
to                387 drivers/gpu/ipu-v3/ipu-cpmem.c 		ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to);
to                 62 drivers/hid/hid-apple.c 	u16 to;
to                206 drivers/hid/hid-apple.c 			else if (test_bit(trans->to, input->key))
to                207 drivers/hid/hid-apple.c 				code = trans->to;
to                226 drivers/hid/hid-apple.c 				code = do_translate ? trans->to : trans->from;
to                247 drivers/hid/hid-apple.c 				input_event(input, usage->type, trans->to,
to                259 drivers/hid/hid-apple.c 				input_event(input, usage->type, trans->to, value);
to                268 drivers/hid/hid-apple.c 			input_event(input, usage->type, trans->to, value);
to                326 drivers/hid/hid-apple.c 		set_bit(trans->to, input->keybit);
to                329 drivers/hid/hid-apple.c 		set_bit(trans->to, input->keybit);
to                332 drivers/hid/hid-apple.c 		set_bit(trans->to, input->keybit);
to                335 drivers/hid/hid-apple.c 		set_bit(trans->to, input->keybit);
to                126 drivers/hid/hid-icade.c 	u16 to;
to                183 drivers/hid/hid-icade.c 			trans->to, trans->press);
to                200 drivers/hid/hid-icade.c 		hid_map_usage(hi, usage, bit, max, EV_KEY, trans->to);
to                201 drivers/hid/hid-icade.c 		set_bit(trans->to, hi->input->keybit);
to                186 drivers/hwmon/adm1026.c #define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
to                124 drivers/hwmon/lm85.c #define SCALE(val, from, to)	(((val) * (to) + ((from) / 2)) / (from))
to                269 drivers/hwmon/pmbus/pmbus_core.c 	u8 to;
to                276 drivers/hwmon/pmbus/pmbus_core.c 	to = (from & ~mask) | (config & mask);
to                277 drivers/hwmon/pmbus/pmbus_core.c 	if (to != from) {
to                279 drivers/hwmon/pmbus/pmbus_core.c 					   pmbus_fan_config_registers[id], to);
to                206 drivers/hwspinlock/hwspinlock_core.c int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
to                212 drivers/hwspinlock/hwspinlock_core.c 	expire = msecs_to_jiffies(to) + jiffies;
to                227 drivers/hwspinlock/hwspinlock_core.c 			if (atomic_delay > to * 1000)
to                 54 drivers/iio/adc/dln2-adc.c 	unsigned int to;
to                 91 drivers/iio/adc/dln2-adc.c 		p->to + p->length == out_loc) {
to                 96 drivers/iio/adc/dln2-adc.c 		p->to = out_loc;
to                503 drivers/iio/adc/dln2-adc.c 		memcpy((void *)data.values + t->to,
to                787 drivers/iio/industrialio-buffer.c 	unsigned to;
to                807 drivers/iio/industrialio-buffer.c 		(*p)->to + (*p)->length == out_loc) {
to                814 drivers/iio/industrialio-buffer.c 		(*p)->to = out_loc;
to               1377 drivers/iio/industrialio-buffer.c 		memcpy(buffer->demux_bounce + t->to,
to                649 drivers/infiniband/core/uverbs_ioctl.c int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                658 drivers/infiniband/core/uverbs_ioctl.c 		*to = 0;
to                677 drivers/infiniband/core/uverbs_ioctl.c 	*to = flags;
to                682 drivers/infiniband/core/uverbs_ioctl.c int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                694 drivers/infiniband/core/uverbs_ioctl.c 	*to = flags;
to                770 drivers/infiniband/core/uverbs_ioctl.c int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                781 drivers/infiniband/core/uverbs_ioctl.c 		*to = *def_val;
to                783 drivers/infiniband/core/uverbs_ioctl.c 		*to = attr->ptr_attr.data;
to                786 drivers/infiniband/core/uverbs_ioctl.c 	if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
to                675 drivers/infiniband/hw/cxgb3/cxio_hal.c 			 u32 zbva, u64 to, u32 len, u8 page_size,
to                715 drivers/infiniband/hw/cxgb3/cxio_hal.c 		tpt.va_hi = cpu_to_be32((u32) (to >> 32));
to                716 drivers/infiniband/hw/cxgb3/cxio_hal.c 		tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
to                766 drivers/infiniband/hw/cxgb3/cxio_hal.c 			   enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
to                771 drivers/infiniband/hw/cxgb3/cxio_hal.c 			     zbva, to, len, page_size, pbl_size, pbl_addr);
to                775 drivers/infiniband/hw/cxgb3/cxio_hal.c 			   enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
to                779 drivers/infiniband/hw/cxgb3/cxio_hal.c 			     zbva, to, len, page_size, pbl_size, pbl_addr);
to                172 drivers/infiniband/hw/cxgb3/cxio_hal.h 			   enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
to                175 drivers/infiniband/hw/cxgb3/cxio_hal.h 			   enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
to                163 drivers/infiniband/hw/cxgb3/cxio_wr.h 	__be64 to;
to                 79 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
to                118 drivers/infiniband/hw/cxgb3/iwch_qp.c 			wqe->write.sgl[i].to =
to                269 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
to                278 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe->recv.sgl[i].to = 0;
to                333 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
to                341 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe->recv.sgl[i].to = 0;
to                273 drivers/infiniband/hw/cxgb4/mem.c 			   int bind_enabled, u32 zbva, u64 to,
to                327 drivers/infiniband/hw/cxgb4/mem.c 		tpt->va_hi = cpu_to_be32((u32)(to >> 32));
to                328 drivers/infiniband/hw/cxgb4/mem.c 		tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
to                133 drivers/infiniband/hw/cxgb4/t4fw_ri_api.h 	__be64 to;
to               9285 drivers/infiniband/hw/hfi1/chip.c 		u16 to;
to               9295 drivers/infiniband/hw/hfi1/chip.c 			result |= opa_link_xlate[i].to;
to               1145 drivers/infiniband/hw/qib/qib.h void qib_pio_copy(void __iomem *to, const void *from, size_t count);
to                 45 drivers/infiniband/hw/qib/qib_pio_copy.c void qib_pio_copy(void __iomem *to, const void *from, size_t count)
to                 48 drivers/infiniband/hw/qib/qib_pio_copy.c 	u64 __iomem *dst = to;
to                 57 drivers/infiniband/hw/qib/qib_pio_copy.c 	u32 __iomem *dst = to;
to               2591 drivers/infiniband/sw/rdmavt/qp.c 	u32 to;
to               2595 drivers/infiniband/sw/rdmavt/qp.c 	to = rvt_aeth_to_usec(aeth);
to               2596 drivers/infiniband/sw/rdmavt/qp.c 	trace_rvt_rnrnak_add(qp, to);
to               2598 drivers/infiniband/sw/rdmavt/qp.c 		      ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
to                 90 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 to),
to                 91 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, to),
to                 97 drivers/infiniband/sw/rdmavt/trace_qp.h 		__field(u32, to)
to                104 drivers/infiniband/sw/rdmavt/trace_qp.h 		__entry->to = to;
to                112 drivers/infiniband/sw/rdmavt/trace_qp.h 		__entry->to
to                118 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 to),
to                119 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, to));
to                123 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 to),
to                124 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, to));
to                128 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 to),
to                129 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, to));
to                462 drivers/input/keyboard/applespi.c 	u16 to;
to               1076 drivers/input/keyboard/applespi.c 			key = trans->to;
to               1088 drivers/input/keyboard/applespi.c 		key = trans->to;
to               1726 drivers/input/keyboard/applespi.c 		if (applespi_fn_codes[i].to)
to               1728 drivers/input/keyboard/applespi.c 					     EV_KEY, applespi_fn_codes[i].to);
to                448 drivers/input/serio/hil_mlc.c #define EXPECT(comp, to, got, got_wrong, timed_out) \
to                449 drivers/input/serio/hil_mlc.c { HILSE_EXPECT,		{ .packet = comp }, to, got, got_wrong, timed_out },
to                450 drivers/input/serio/hil_mlc.c #define EXPECT_LAST(comp, to, got, got_wrong, timed_out) \
to                451 drivers/input/serio/hil_mlc.c { HILSE_EXPECT_LAST,	{ .packet = comp }, to, got, got_wrong, timed_out },
to                452 drivers/input/serio/hil_mlc.c #define EXPECT_DISC(comp, to, got, got_wrong, timed_out) \
to                453 drivers/input/serio/hil_mlc.c { HILSE_EXPECT_DISC,	{ .packet = comp }, to, got, got_wrong, timed_out },
to                454 drivers/input/serio/hil_mlc.c #define IN(to, got, got_error, timed_out) \
to                455 drivers/input/serio/hil_mlc.c { HILSE_IN,		{ .packet = 0    }, to, got, got_error, timed_out },
to                706 drivers/iommu/iova.c copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
to                719 drivers/iommu/iova.c 		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
to                753 drivers/irqchip/irq-gic-v3-its.c 					 struct its_cmd_block *to)
to                759 drivers/irqchip/irq-gic-v3-its.c 	to_idx = its_cmd_ptr_to_offset(its, to);
to               2765 drivers/irqchip/irq-gic-v3-its.c static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
to               2785 drivers/irqchip/irq-gic-v3-its.c 	target_col = &vpe_proxy.dev->its->collections[to];
to               2787 drivers/irqchip/irq-gic-v3-its.c 	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
to                875 drivers/isdn/hardware/mISDN/mISDNipac.c 	u8 starb, to = 50;
to                877 drivers/isdn/hardware/mISDN/mISDNipac.c 	while (to) {
to                882 drivers/isdn/hardware/mISDN/mISDNipac.c 		to--;
to                884 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (to < 50)
to                886 drivers/isdn/hardware/mISDN/mISDNipac.c 			 50 - to);
to                887 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (!to)
to                895 drivers/isdn/hardware/mISDN/mISDNipac.c 	u8 starb, to = 50;
to                897 drivers/isdn/hardware/mISDN/mISDNipac.c 	while (to) {
to                902 drivers/isdn/hardware/mISDN/mISDNipac.c 		to--;
to                904 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (to < 50)
to                906 drivers/isdn/hardware/mISDN/mISDNipac.c 			 50 - to);
to                907 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (!to)
to                798 drivers/md/dm-clone-target.c 	struct dm_io_region from, to;
to                828 drivers/md/dm-clone-target.c 	to.bdev = clone->dest_dev->bdev;
to                829 drivers/md/dm-clone-target.c 	to.sector = from.sector;
to                830 drivers/md/dm-clone-target.c 	to.count = from.count;
to                834 drivers/md/dm-clone-target.c 	dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
to                332 drivers/md/dm-raid1.c 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
to                354 drivers/md/dm-raid1.c 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
to                369 drivers/md/dm-raid1.c 	dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
to               1294 drivers/md/dm-thin.c 	struct dm_io_region to;
to               1296 drivers/md/dm-thin.c 	to.bdev = tc->pool_dev->bdev;
to               1297 drivers/md/dm-thin.c 	to.sector = begin;
to               1298 drivers/md/dm-thin.c 	to.count = end - begin;
to               1300 drivers/md/dm-thin.c 	dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
to               1354 drivers/md/dm-thin.c 		struct dm_io_region from, to;
to               1360 drivers/md/dm-thin.c 		to.bdev = tc->pool_dev->bdev;
to               1361 drivers/md/dm-thin.c 		to.sector = data_dest * pool->sectors_per_block;
to               1362 drivers/md/dm-thin.c 		to.count = len;
to               1364 drivers/md/dm-thin.c 		dm_kcopyd_copy(pool->copier, &from, 1, &to,
to               1561 drivers/md/dm-writecache.c 	struct dm_io_region from, to;
to               1576 drivers/md/dm-writecache.c 		to.bdev = wc->dev->bdev;
to               1577 drivers/md/dm-writecache.c 		to.sector = read_original_sector(wc, e);
to               1578 drivers/md/dm-writecache.c 		to.count = n_sectors;
to               1593 drivers/md/dm-writecache.c 		dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
to                 90 drivers/media/firewire/firedtv-avc.c static inline void clear_operands(struct avc_command_frame *c, int from, int to)
to                 92 drivers/media/firewire/firedtv-avc.c 	memset(&c->operand[from], 0, to - from + 1);
to                 97 drivers/media/firewire/firedtv-avc.c 	int to = ALIGN(from, 4);
to                 99 drivers/media/firewire/firedtv-avc.c 	if (from <= to && to <= LAST_OPERAND)
to                100 drivers/media/firewire/firedtv-avc.c 		clear_operands(c, from, to);
to                 46 drivers/media/pci/cobalt/cobalt-flash.c static void flash_copy_from(struct map_info *map, void *to,
to                 50 drivers/media/pci/cobalt/cobalt-flash.c 	u8 *dest = to;
to                 64 drivers/media/pci/cobalt/cobalt-flash.c static void flash_copy_to(struct map_info *map, unsigned long to,
to                 68 drivers/media/pci/cobalt/cobalt-flash.c 	u32 dest = to;
to                127 drivers/media/pci/cx18/cx18-io.h void cx18_memcpy_fromio(struct cx18 *cx, void *to,
to                130 drivers/media/pci/cx18/cx18-io.h 	memcpy_fromio(to, from, len);
to                 77 drivers/media/pci/ivtv/ivtv-queue.c 		struct ivtv_queue *to, int clear)
to                 81 drivers/media/pci/ivtv/ivtv-queue.c 	list_move_tail(from->list.next, &to->list);
to                 88 drivers/media/pci/ivtv/ivtv-queue.c 	to->buffers++;
to                 89 drivers/media/pci/ivtv/ivtv-queue.c 	to->length += s->buf_size;
to                 90 drivers/media/pci/ivtv/ivtv-queue.c 	to->bytesused += buf->bytesused - buf->readpos;
to                111 drivers/media/pci/ivtv/ivtv-queue.c 		    struct ivtv_queue *to, int needed_bytes)
to                116 drivers/media/pci/ivtv/ivtv-queue.c 	int to_free = to == &s->q_free;
to                156 drivers/media/pci/ivtv/ivtv-queue.c 		u32 old_length = to->length;
to                158 drivers/media/pci/ivtv/ivtv-queue.c 		while (to->length - old_length < needed_bytes) {
to                159 drivers/media/pci/ivtv/ivtv-queue.c 			ivtv_queue_move_buf(s, from, to, 1);
to                163 drivers/media/pci/ivtv/ivtv-queue.c 		u32 old_bytesused = to->bytesused;
to                165 drivers/media/pci/ivtv/ivtv-queue.c 		while (to->bytesused - old_bytesused < needed_bytes) {
to                166 drivers/media/pci/ivtv/ivtv-queue.c 			ivtv_queue_move_buf(s, from, to, to_free);
to                 63 drivers/media/pci/ivtv/ivtv-queue.h 		    struct ivtv_queue *to, int needed_bytes);
to                892 drivers/media/platform/omap3isp/isppreview.c 			void *to = (void *)params + attr->param_offset;
to                895 drivers/media/platform/omap3isp/isppreview.c 			if (to && from && size) {
to                896 drivers/media/platform/omap3isp/isppreview.c 				if (copy_from_user(to, from, size)) {
to                595 drivers/media/platform/sti/bdisp/bdisp-hw.c static int bdisp_hw_get_inc(u32 from, u32 to, u16 *inc)
to                599 drivers/media/platform/sti/bdisp/bdisp-hw.c 	if (!to)
to                602 drivers/media/platform/sti/bdisp/bdisp-hw.c 	if (to == from) {
to                607 drivers/media/platform/sti/bdisp/bdisp-hw.c 	tmp = (from << 10) / to;
to                243 drivers/media/tuners/mt20xx.c 			       unsigned int from, unsigned int to)
to                250 drivers/media/tuners/mt20xx.c 		  rfin,if1,if2,from,to);
to                257 drivers/media/tuners/mt20xx.c 	ret=mt2032_compute_freq(fe,rfin,if1,if2,from,to,&buf[1],&sel,priv->xogc);
to                302 drivers/media/tuners/mt20xx.c 	int if2,from,to;
to                308 drivers/media/tuners/mt20xx.c 		to   = 46750*1000;
to                313 drivers/media/tuners/mt20xx.c 		to   = 39900*1000;
to                318 drivers/media/tuners/mt20xx.c 			   1090*1000*1000, if2, from, to);
to                684 drivers/media/usb/cx231xx/cx231xx-video.c void cx231xx_swab(u16 *from, u16 *to, u16 len)
to                692 drivers/media/usb/cx231xx/cx231xx-video.c 		to[i] = (from[i] << 8) | (from[i] >> 8);
to                827 drivers/media/usb/cx231xx/cx231xx.h void cx231xx_swab(u16 *from, u16 *to, u16 len);
to                 38 drivers/media/v4l2-core/v4l2-compat-ioctl32.c #define assign_in_user(to, from)					\
to                 42 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	get_user(__assign_tmp, from) || put_user(__assign_tmp, to);	\
to                 95 drivers/media/v4l2-core/v4l2-compat-ioctl32.c #define assign_in_user_cast(to, from)					\
to                 99 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	get_user_cast(__assign_tmp, from) || put_user(__assign_tmp, to);\
to               1954 drivers/media/v4l2-core/v4l2-ctrls.c 		       union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to)
to               1958 drivers/media/v4l2-core/v4l2-ctrls.c 	memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
to                139 drivers/media/v4l2-core/v4l2-dev.c static inline int devnode_find(struct video_device *vdev, int from, int to)
to                141 drivers/media/v4l2-core/v4l2-dev.c 	return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from);
to                259 drivers/mfd/sm501.c 	unsigned long to;
to                264 drivers/mfd/sm501.c 	to = (misc & ~clear) | set;
to                266 drivers/mfd/sm501.c 	if (to != misc) {
to                267 drivers/mfd/sm501.c 		smc501_writel(to, sm->regs + SM501_MISC_CONTROL);
to                274 drivers/mfd/sm501.c 	return to;
to                315 drivers/mfd/sm501.c int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
to                336 drivers/mfd/sm501.c 		sm->unit_power[unit], to);
to                338 drivers/mfd/sm501.c 	if (to == 0 && sm->unit_power[unit] == 0) {
to                343 drivers/mfd/sm501.c 	sm->unit_power[unit] += to ? 1 : -1;
to                344 drivers/mfd/sm501.c 	to = sm->unit_power[unit] ? 1 : 0;
to                346 drivers/mfd/sm501.c 	if (to) {
to                520 drivers/mfd/sm501.c 	struct sm501_clock to;
to                535 drivers/mfd/sm501.c 						     &to, 5) / 2);
to                536 drivers/mfd/sm501.c 			reg = to.shift & 0x07;/* bottom 3 bits are shift */
to                537 drivers/mfd/sm501.c 			if (to.divider == 3)
to                539 drivers/mfd/sm501.c 			else if (to.divider == 5)
to                542 drivers/mfd/sm501.c 			pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m;
to                545 drivers/mfd/sm501.c 							 &to, 5) / 2);
to                546 drivers/mfd/sm501.c 			reg = to.shift & 0x07;/* bottom 3 bits are shift */
to                547 drivers/mfd/sm501.c 			if (to.divider == 3)
to                549 drivers/mfd/sm501.c 			else if (to.divider == 5)
to                551 drivers/mfd/sm501.c 			if (to.mclk != 288000000)
to                560 drivers/mfd/sm501.c 		sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
to                561 drivers/mfd/sm501.c 		reg=to.shift & 0x07;	/* bottom 3 bits are shift */
to                562 drivers/mfd/sm501.c 		if (to.divider == 3)
to                564 drivers/mfd/sm501.c 		if (to.mclk != 288000000)
to                572 drivers/mfd/sm501.c 		sm501_freq = sm501_select_clock( req_freq, &to, 3);
to                573 drivers/mfd/sm501.c 		reg=to.shift & 0x07;	/* bottom 3 bits are shift */
to                574 drivers/mfd/sm501.c 		if (to.divider == 3)
to                576 drivers/mfd/sm501.c 		if (to.mclk != 288000000)
to                645 drivers/mfd/sm501.c 	struct sm501_clock to;
to                652 drivers/mfd/sm501.c 						     &to, 5) / 2);
to                655 drivers/mfd/sm501.c 							 &to, 5) / 2);
to                660 drivers/mfd/sm501.c 		sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
to                665 drivers/mfd/sm501.c 		sm501_freq = sm501_select_clock(req_freq, &to, 3);
to                377 drivers/misc/vmw_vmci/vmci_queue_pair.c static int qp_memcpy_from_queue_iter(struct iov_iter *to,
to                405 drivers/misc/vmw_vmci/vmci_queue_pair.c 		err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
to               2589 drivers/misc/vmw_vmci/vmci_queue_pair.c 				 struct iov_iter *to,
to               2592 drivers/misc/vmw_vmci/vmci_queue_pair.c 	size_t buf_size = iov_iter_count(to);
to               2614 drivers/misc/vmw_vmci/vmci_queue_pair.c 		result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
to               2620 drivers/misc/vmw_vmci/vmci_queue_pair.c 		result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
to               2622 drivers/misc/vmw_vmci/vmci_queue_pair.c 			result = qp_memcpy_from_queue_iter(to, consume_q, 0,
to               3060 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct iov_iter to;
to               3066 drivers/misc/vmw_vmci/vmci_queue_pair.c 	iov_iter_kvec(&to, READ, &v, 1, buf_size);
to               3074 drivers/misc/vmw_vmci/vmci_queue_pair.c 					   &to, true);
to               3104 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct iov_iter to;
to               3111 drivers/misc/vmw_vmci/vmci_queue_pair.c 	iov_iter_kvec(&to, READ, &v, 1, buf_size);
to               3119 drivers/misc/vmw_vmci/vmci_queue_pair.c 					   &to, false);
to               1656 drivers/mmc/core/core.c 			unsigned int to, unsigned int arg)
to               1684 drivers/mmc/core/core.c 		qty += ((to >> card->erase_shift) -
to               1687 drivers/mmc/core/core.c 		qty += to - from + 1;
to               1689 drivers/mmc/core/core.c 		qty += ((to / card->erase_size) -
to               1694 drivers/mmc/core/core.c 		to <<= 9;
to               1716 drivers/mmc/core/core.c 	cmd.arg = to;
to               1806 drivers/mmc/core/core.c 					 unsigned int *to,
to               1846 drivers/mmc/core/core.c 	*to = from_new + nr_new;
to               1864 drivers/mmc/core/core.c 	unsigned int rem, to = from + nr;
to               1891 drivers/mmc/core/core.c 		nr = mmc_align_erase_size(card, &from, &to, nr);
to               1896 drivers/mmc/core/core.c 	if (to <= from)
to               1900 drivers/mmc/core/core.c 	to -= 1;
to               1914 drivers/mmc/core/core.c 		if ((err) || (to <= from))
to               1918 drivers/mmc/core/core.c 	return mmc_do_erase(card, from, to, arg);
to                425 drivers/mmc/host/cb710-mmc.c 	int error, to;
to                432 drivers/mmc/host/cb710-mmc.c 	to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE);
to                434 drivers/mmc/host/cb710-mmc.c 		error = to;
to               1623 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
to               1631 drivers/mtd/chips/cfi_cmdset_0001.c 	chipnum = to >> cfi->chipshift;
to               1632 drivers/mtd/chips/cfi_cmdset_0001.c 	ofs = to  - (chipnum << cfi->chipshift);
to               1869 drivers/mtd/chips/cfi_cmdset_0001.c 				unsigned long count, loff_t to, size_t *retlen)
to               1885 drivers/mtd/chips/cfi_cmdset_0001.c 	chipnum = to >> cfi->chipshift;
to               1886 drivers/mtd/chips/cfi_cmdset_0001.c 	ofs = to - (chipnum << cfi->chipshift);
to               1920 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
to               1928 drivers/mtd/chips/cfi_cmdset_0001.c 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
to                 86 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
to               1820 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
to               1830 drivers/mtd/chips/cfi_cmdset_0002.c 	chipnum = to >> cfi->chipshift;
to               1831 drivers/mtd/chips/cfi_cmdset_0002.c 	ofs = to  - (chipnum << cfi->chipshift);
to               2100 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
to               2110 drivers/mtd/chips/cfi_cmdset_0002.c 	chipnum = to >> cfi->chipshift;
to               2111 drivers/mtd/chips/cfi_cmdset_0002.c 	ofs = to  - (chipnum << cfi->chipshift);
to               2312 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
to               2321 drivers/mtd/chips/cfi_cmdset_0002.c 	chipnum = to >> cfi->chipshift;
to               2322 drivers/mtd/chips/cfi_cmdset_0002.c 	ofs = to - (chipnum << cfi->chipshift);
to                 40 drivers/mtd/chips/cfi_cmdset_0020.c 		unsigned long count, loff_t to, size_t *retlen);
to                608 drivers/mtd/chips/cfi_cmdset_0020.c static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
to                618 drivers/mtd/chips/cfi_cmdset_0020.c 	chipnum = to >> cfi->chipshift;
to                619 drivers/mtd/chips/cfi_cmdset_0020.c 	ofs = to  - (chipnum << cfi->chipshift);
to                666 drivers/mtd/chips/cfi_cmdset_0020.c 		unsigned long count, loff_t to, size_t *retlen)
to                696 drivers/mtd/chips/cfi_cmdset_0020.c 			ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
to                703 drivers/mtd/chips/cfi_cmdset_0020.c 			to += ECCBUF_SIZE;
to                706 drivers/mtd/chips/cfi_cmdset_0020.c 			ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
to                711 drivers/mtd/chips/cfi_cmdset_0020.c 			to += thislen;
to                721 drivers/mtd/chips/cfi_cmdset_0020.c 		ret = mtd_write(mtd, to, buflen, &thislen, buffer);
to                 77 drivers/mtd/chips/map_absent.c static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
to                114 drivers/mtd/chips/map_ram.c static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
to                118 drivers/mtd/chips/map_ram.c 	map_copy_to(map, to, buf, len);
to                105 drivers/mtd/chips/map_rom.c static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
to                216 drivers/mtd/devices/bcm47xxsflash.c static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
to                228 drivers/mtd/devices/bcm47xxsflash.c 			written = bcm47xxsflash_write_st(mtd, to, len, buf);
to                231 drivers/mtd/devices/bcm47xxsflash.c 			written = bcm47xxsflash_write_at(mtd, to, len, buf);
to                237 drivers/mtd/devices/bcm47xxsflash.c 			pr_err("Error writing at offset 0x%llX\n", to);
to                240 drivers/mtd/devices/bcm47xxsflash.c 		to += (loff_t)written;
to                 53 drivers/mtd/devices/block2mtd.c static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
to                 57 drivers/mtd/devices/block2mtd.c 	int index = to >> PAGE_SHIFT;	// page index
to                136 drivers/mtd/devices/block2mtd.c 		loff_t to, size_t len, size_t *retlen)
to                140 drivers/mtd/devices/block2mtd.c 	int index = to >> PAGE_SHIFT;	// page index
to                141 drivers/mtd/devices/block2mtd.c 	int offset = to & ~PAGE_MASK;	// page offset
to                175 drivers/mtd/devices/block2mtd.c static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
to                182 drivers/mtd/devices/block2mtd.c 	err = _block2mtd_write(dev, buf, to, len, retlen);
to               1223 drivers/mtd/devices/docg3.c static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
to               1229 drivers/mtd/devices/docg3.c 	doc_dbg("doc_write_page(to=%lld)\n", to);
to               1230 drivers/mtd/devices/docg3.c 	calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
to               1336 drivers/mtd/devices/docg3.c static int doc_backup_oob(struct docg3 *docg3, loff_t to,
to               1347 drivers/mtd/devices/docg3.c 	docg3->oob_write_ofs = to;
to                500 drivers/mtd/devices/lart.c static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen,const u_char *buf)
to                506 drivers/mtd/devices/lart.c    printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
to                513 drivers/mtd/devices/lart.c    if (to & (BUSWIDTH - 1))
to                515 drivers/mtd/devices/lart.c 		__u32 aligned = to & ~(BUSWIDTH - 1);
to                516 drivers/mtd/devices/lart.c 		int gap = to - aligned;
to                526 drivers/mtd/devices/lart.c 		to += n;
to                534 drivers/mtd/devices/lart.c 		if (!write_dword (to,*((__u32 *) buf))) return (-EIO);
to                536 drivers/mtd/devices/lart.c 		to += BUSWIDTH;
to                550 drivers/mtd/devices/lart.c 		if (!write_dword (to,*((__u32 *) tmp))) return (-EIO);
to                 60 drivers/mtd/devices/mchp23k256.c static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
to                 74 drivers/mtd/devices/mchp23k256.c 	mchp23k256_addr2cmd(flash, to, command);
to                 63 drivers/mtd/devices/ms02-nv.c static int ms02nv_write(struct mtd_info *mtd, loff_t to,
to                 68 drivers/mtd/devices/ms02-nv.c 	memcpy(mp->uaddr + to, buf, len);
to                293 drivers/mtd/devices/mtd_dataflash.c static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
to                307 drivers/mtd/devices/mtd_dataflash.c 		(unsigned int)to, (unsigned int)(to + len));
to                315 drivers/mtd/devices/mtd_dataflash.c 	pageaddr = ((unsigned)to / priv->page_size);
to                316 drivers/mtd/devices/mtd_dataflash.c 	offset = ((unsigned)to % priv->page_size);
to                107 drivers/mtd/devices/mtdram.c static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
to                110 drivers/mtd/devices/mtdram.c 	memcpy((char *)mtd->priv + to, buf, len);
to                 69 drivers/mtd/devices/phram.c static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
to                 74 drivers/mtd/devices/phram.c 	memcpy(start + to, buf, len);
to                279 drivers/mtd/devices/pmc551.c static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
to                291 drivers/mtd/devices/pmc551.c 		(long)to, (long)len, (long)priv->asize);
to                294 drivers/mtd/devices/pmc551.c 	end = to + len - 1;
to                295 drivers/mtd/devices/pmc551.c 	soff_hi = to & ~(priv->asize - 1);
to                299 drivers/mtd/devices/pmc551.c 	pmc551_point(mtd, to, len, retlen, (void **)&ptr, NULL);
to                153 drivers/mtd/devices/powernv_flash.c static int powernv_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
to                156 drivers/mtd/devices/powernv_flash.c 	return powernv_flash_async_op(mtd, FLASH_OP_WRITE, to,
to                117 drivers/mtd/devices/slram.c static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
to                122 drivers/mtd/devices/slram.c 	memcpy(priv->start + to, buf, len);
to                674 drivers/mtd/devices/spear_smi.c static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
to                692 drivers/mtd/devices/spear_smi.c 	dest = flash->base_addr + to;
to                695 drivers/mtd/devices/spear_smi.c 	page_offset = (u32)to % flash->page_size;
to                249 drivers/mtd/devices/sst25l.c static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
to                256 drivers/mtd/devices/sst25l.c 	if ((uint32_t)to % mtd->writesize)
to                272 drivers/mtd/devices/sst25l.c 		command[1] = (to + i) >> 16;
to                273 drivers/mtd/devices/sst25l.c 		command[2] = (to + i) >> 8;
to                274 drivers/mtd/devices/sst25l.c 		command[3] = (to + i);
to               1746 drivers/mtd/devices/st_spi_fsm.c static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
to               1756 drivers/mtd/devices/st_spi_fsm.c 	dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len);
to               1759 drivers/mtd/devices/st_spi_fsm.c 	page_offs = to % FLASH_PAGESIZE;
to               1767 drivers/mtd/devices/st_spi_fsm.c 		ret = stfsm_write(fsm, b, bytes, to);
to               1773 drivers/mtd/devices/st_spi_fsm.c 		to += bytes;
to                 41 drivers/mtd/hyperbus/hyperbus-core.c static void hyperbus_copy_from(struct map_info *map, void *to,
to                 47 drivers/mtd/hyperbus/hyperbus-core.c 	ctlr->ops->copy_from(hbdev, to, from, len);
to                 50 drivers/mtd/hyperbus/hyperbus-core.c static void hyperbus_copy_to(struct map_info *map, unsigned long to,
to                 56 drivers/mtd/hyperbus/hyperbus-core.c 	ctlr->ops->copy_to(hbdev, to, from, len);
to                 21 drivers/mtd/lpddr/lpddr_cmds.c static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
to                 24 drivers/mtd/lpddr/lpddr_cmds.c 				unsigned long count, loff_t to, size_t *retlen);
to                606 drivers/mtd/lpddr/lpddr_cmds.c static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
to                614 drivers/mtd/lpddr/lpddr_cmds.c 	return lpddr_writev(mtd, &vec, 1, to, retlen);
to                619 drivers/mtd/lpddr/lpddr_cmds.c 				unsigned long count, loff_t to, size_t *retlen)
to                635 drivers/mtd/lpddr/lpddr_cmds.c 	chipnum = to >> lpddr->chipshift;
to                637 drivers/mtd/lpddr/lpddr_cmds.c 	ofs = to;
to                 75 drivers/mtd/maps/dc21285.c static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
to                 77 drivers/mtd/maps/dc21285.c 	memcpy(to, (void*)(map->virt + from), len);
to                105 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                110 drivers/mtd/maps/dc21285.c 		dc21285_write32(map, d, to);
to                112 drivers/mtd/maps/dc21285.c 		to += 4;
to                117 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                122 drivers/mtd/maps/dc21285.c 		dc21285_write16(map, d, to);
to                124 drivers/mtd/maps/dc21285.c 		to += 2;
to                129 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                133 drivers/mtd/maps/dc21285.c 	dc21285_write8(map, d, to);
to                135 drivers/mtd/maps/dc21285.c 	to++;
to                101 drivers/mtd/maps/ixp4xx.c static void ixp4xx_copy_from(struct map_info *map, void *to,
to                104 drivers/mtd/maps/ixp4xx.c 	u8 *dest = (u8 *) to;
to                 81 drivers/mtd/maps/lantiq-flash.c ltq_copy_from(struct map_info *map, void *to,
to                 85 drivers/mtd/maps/lantiq-flash.c 	unsigned char *t = (unsigned char *)to;
to                 95 drivers/mtd/maps/lantiq-flash.c ltq_copy_to(struct map_info *map, unsigned long to,
to                 99 drivers/mtd/maps/lantiq-flash.c 	unsigned char *t = (unsigned char *)map->virt + to;
to                 23 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
to                 25 drivers/mtd/maps/map_funcs.c 	inline_map_copy_from(map, to, from, len);
to                 28 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                 30 drivers/mtd/maps/map_funcs.c 	inline_map_copy_to(map, to, from, len);
to                 53 drivers/mtd/maps/pci.c static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
to                 56 drivers/mtd/maps/pci.c 	memcpy_fromio(to, map->base + map->translate(map, from), len);
to                 71 drivers/mtd/maps/pci.c static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
to                 74 drivers/mtd/maps/pci.c 	memcpy_toio(map->base + map->translate(map, to), from, len);
to                 83 drivers/mtd/maps/pcmciamtd.c static void __iomem *remap_window(struct map_info *map, unsigned long to)
to                 95 drivers/mtd/maps/pcmciamtd.c 	offset = to & ~(dev->win_size-1);
to                104 drivers/mtd/maps/pcmciamtd.c 	return dev->win_base + (to & (dev->win_size-1));
to                138 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
to                143 drivers/mtd/maps/pcmciamtd.c 	pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
to                155 drivers/mtd/maps/pcmciamtd.c 		pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
to                156 drivers/mtd/maps/pcmciamtd.c 		memcpy_fromio(to, addr, toread);
to                158 drivers/mtd/maps/pcmciamtd.c 		to += toread;
to                187 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                192 drivers/mtd/maps/pcmciamtd.c 	pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
to                194 drivers/mtd/maps/pcmciamtd.c 		int towrite = win_size - (to & (win_size-1));
to                200 drivers/mtd/maps/pcmciamtd.c 		addr = remap_window(map, to);
to                207 drivers/mtd/maps/pcmciamtd.c 		to += towrite;
to                247 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
to                254 drivers/mtd/maps/pcmciamtd.c 	pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
to                255 drivers/mtd/maps/pcmciamtd.c 	memcpy_fromio(to, win_base + from, len);
to                285 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                292 drivers/mtd/maps/pcmciamtd.c 	pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
to                293 drivers/mtd/maps/pcmciamtd.c 	memcpy_toio(win_base + to, from, len);
to                108 drivers/mtd/maps/physmap-gemini.c 						void *to, unsigned long from,
to                112 drivers/mtd/maps/physmap-gemini.c 	inline_map_copy_from(map, to, from, len);
to                117 drivers/mtd/maps/physmap-gemini.c 					      unsigned long to,
to                121 drivers/mtd/maps/physmap-gemini.c 	inline_map_copy_to(map, to, from, len);
to                 54 drivers/mtd/maps/plat-ram.c static inline void platram_setrw(struct platram_info *info, int to)
to                 60 drivers/mtd/maps/plat-ram.c 		(info->pdata->set_rw)(info->dev, to);
to                112 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
to                121 drivers/mtd/maps/sbc_gxx.c 		memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
to                123 drivers/mtd/maps/sbc_gxx.c 		to += thislen;
to                137 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                141 drivers/mtd/maps/sbc_gxx.c 		if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
to                142 drivers/mtd/maps/sbc_gxx.c 			thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
to                145 drivers/mtd/maps/sbc_gxx.c 		sbc_gxx_page(map, to);
to                146 drivers/mtd/maps/sbc_gxx.c 		memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
to                148 drivers/mtd/maps/sbc_gxx.c 		to += thislen;
to                416 drivers/mtd/maps/vmu-flash.c static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
to                433 drivers/mtd/maps/vmu-flash.c 	if (to + len > numblocks * card->blocklen)
to                434 drivers/mtd/maps/vmu-flash.c 		len = numblocks * card->blocklen - to;
to                440 drivers/mtd/maps/vmu-flash.c 	vblock = ofs_to_block(to, mtd, partition);
to                454 drivers/mtd/mtdchar.c 			    struct nand_ecclayout_user *to)
to                459 drivers/mtd/mtdchar.c 	if (!mtd || !to)
to                462 drivers/mtd/mtdchar.c 	memset(to, 0, sizeof(*to));
to                464 drivers/mtd/mtdchar.c 	to->eccbytes = 0;
to                479 drivers/mtd/mtdchar.c 			to->eccpos[i] = eccpos++;
to                480 drivers/mtd/mtdchar.c 			to->eccbytes++;
to                493 drivers/mtd/mtdchar.c 		to->oobfree[i].offset = oobregion.offset;
to                494 drivers/mtd/mtdchar.c 		to->oobfree[i].length = oobregion.length;
to                495 drivers/mtd/mtdchar.c 		to->oobavail += to->oobfree[i].length;
to                501 drivers/mtd/mtdchar.c static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
to                506 drivers/mtd/mtdchar.c 	if (!mtd || !to)
to                509 drivers/mtd/mtdchar.c 	memset(to, 0, sizeof(*to));
to                511 drivers/mtd/mtdchar.c 	to->eccbytes = 0;
to                512 drivers/mtd/mtdchar.c 	for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
to                523 drivers/mtd/mtdchar.c 		if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
to                528 drivers/mtd/mtdchar.c 			to->eccpos[i] = eccpos++;
to                529 drivers/mtd/mtdchar.c 			to->eccbytes++;
to                542 drivers/mtd/mtdchar.c 		to->oobfree[i][0] = oobregion.offset;
to                543 drivers/mtd/mtdchar.c 		to->oobfree[i][1] = oobregion.length;
to                546 drivers/mtd/mtdchar.c 	to->useecc = MTD_NANDECC_AUTOPLACE;
to                106 drivers/mtd/mtdconcat.c concat_write(struct mtd_info *mtd, loff_t to, size_t len,
to                117 drivers/mtd/mtdconcat.c 		if (to >= subdev->size) {
to                119 drivers/mtd/mtdconcat.c 			to -= subdev->size;
to                122 drivers/mtd/mtdconcat.c 		if (to + len > subdev->size)
to                123 drivers/mtd/mtdconcat.c 			size = subdev->size - to;
to                127 drivers/mtd/mtdconcat.c 		err = mtd_write(subdev, to, size, &retsize, buf);
to                138 drivers/mtd/mtdconcat.c 		to = 0;
to                145 drivers/mtd/mtdconcat.c 		unsigned long count, loff_t to, size_t * retlen)
to                160 drivers/mtd/mtdconcat.c 		uint64_t __to = to;
to                175 drivers/mtd/mtdconcat.c 		if (to >= subdev->size) {
to                176 drivers/mtd/mtdconcat.c 			to -= subdev->size;
to                180 drivers/mtd/mtdconcat.c 		size = min_t(uint64_t, total_len, subdev->size - to);
to                194 drivers/mtd/mtdconcat.c 				 entry_high - entry_low + 1, to, &retsize);
to                211 drivers/mtd/mtdconcat.c 		to = 0;
to                276 drivers/mtd/mtdconcat.c concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
to                290 drivers/mtd/mtdconcat.c 		if (to >= subdev->size) {
to                291 drivers/mtd/mtdconcat.c 			to -= subdev->size;
to                296 drivers/mtd/mtdconcat.c 		if (to + devops.len > subdev->size)
to                297 drivers/mtd/mtdconcat.c 			devops.len = subdev->size - to;
to                299 drivers/mtd/mtdconcat.c 		err = mtd_write_oob(subdev, to, &devops);
to                317 drivers/mtd/mtdconcat.c 		to = 0;
to               1160 drivers/mtd/mtdcore.c int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
to               1169 drivers/mtd/mtdcore.c 	ret = mtd_write_oob(mtd, to, &ops);
to               1183 drivers/mtd/mtdcore.c int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
to               1189 drivers/mtd/mtdcore.c 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
to               1198 drivers/mtd/mtdcore.c 	return mtd->_panic_write(mtd, to, len, retlen, buf);
to               1270 drivers/mtd/mtdcore.c int mtd_write_oob(struct mtd_info *mtd, loff_t to,
to               1280 drivers/mtd/mtdcore.c 	ret = mtd_check_oob_ops(mtd, to, ops);
to               1291 drivers/mtd/mtdcore.c 		return mtd->_write_oob(mtd, to, ops);
to               1293 drivers/mtd/mtdcore.c 		return mtd->_write(mtd, to, ops->len, &ops->retlen,
to               1709 drivers/mtd/mtdcore.c int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
to               1719 drivers/mtd/mtdcore.c 	ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
to               1822 drivers/mtd/mtdcore.c 			      unsigned long count, loff_t to, size_t *retlen)
to               1831 drivers/mtd/mtdcore.c 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
to               1836 drivers/mtd/mtdcore.c 		to += vecs[i].iov_len;
to               1854 drivers/mtd/mtdcore.c 	       unsigned long count, loff_t to, size_t *retlen)
to               1860 drivers/mtd/mtdcore.c 		return default_mtd_writev(mtd, vecs, count, to, retlen);
to               1861 drivers/mtd/mtdcore.c 	return mtd->_writev(mtd, vecs, count, to, retlen);
to                150 drivers/mtd/mtdpart.c static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
to                154 drivers/mtd/mtdpart.c 	return part->parent->_write(part->parent, to + part->offset, len,
to                158 drivers/mtd/mtdpart.c static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
to                162 drivers/mtd/mtdpart.c 	return part->parent->_panic_write(part->parent, to + part->offset, len,
to                166 drivers/mtd/mtdpart.c static int part_write_oob(struct mtd_info *mtd, loff_t to,
to                171 drivers/mtd/mtdpart.c 	return part->parent->_write_oob(part->parent, to + part->offset, ops);
to                190 drivers/mtd/mtdpart.c 		unsigned long count, loff_t to, size_t *retlen)
to                194 drivers/mtd/mtdpart.c 				     to + part->offset, retlen);
to               1606 drivers/mtd/nand/onenand/onenand_base.c static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
to               1614 drivers/mtd/nand/onenand/onenand_base.c 	this->command(mtd, readcmd, to, mtd->oobsize);
to               1615 drivers/mtd/nand/onenand/onenand_base.c 	onenand_update_bufferram(mtd, to, 0);
to               1700 drivers/mtd/nand/onenand/onenand_base.c static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
to               1713 drivers/mtd/nand/onenand/onenand_base.c 	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
to               1717 drivers/mtd/nand/onenand/onenand_base.c         if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
to               1723 drivers/mtd/nand/onenand/onenand_base.c 	column = to & (mtd->writesize - 1);
to               1730 drivers/mtd/nand/onenand/onenand_base.c 		this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
to               1743 drivers/mtd/nand/onenand/onenand_base.c 		this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
to               1748 drivers/mtd/nand/onenand/onenand_base.c 		onenand_update_bufferram(mtd, to, !subpage);
to               1751 drivers/mtd/nand/onenand/onenand_base.c 			onenand_update_bufferram(mtd, to + this->writesize, !subpage);
to               1760 drivers/mtd/nand/onenand/onenand_base.c 		to += thislen;
to               1790 drivers/mtd/nand/onenand/onenand_base.c static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
to               1804 drivers/mtd/nand/onenand/onenand_base.c 	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
to               1812 drivers/mtd/nand/onenand/onenand_base.c         if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
to               1822 drivers/mtd/nand/onenand/onenand_base.c 	oobcolumn = to & (mtd->oobsize - 1);
to               1824 drivers/mtd/nand/onenand/onenand_base.c 	column = to & (mtd->writesize - 1);
to               1836 drivers/mtd/nand/onenand/onenand_base.c 			this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
to               1889 drivers/mtd/nand/onenand/onenand_base.c 				ret = onenand_verify(mtd, buf - len, to - len, len);
to               1904 drivers/mtd/nand/onenand/onenand_base.c 		    likely(onenand_block(this, to) != 0) &&
to               1911 drivers/mtd/nand/onenand/onenand_base.c 		this->command(mtd, cmd, to, mtd->writesize);
to               1920 drivers/mtd/nand/onenand/onenand_base.c 			onenand_update_bufferram(mtd, to, !ret && !subpage);
to               1928 drivers/mtd/nand/onenand/onenand_base.c 			ret = onenand_verify(mtd, buf, to, thislen);
to               1945 drivers/mtd/nand/onenand/onenand_base.c 		prev = to;
to               1947 drivers/mtd/nand/onenand/onenand_base.c 		to += thislen;
to               1974 drivers/mtd/nand/onenand/onenand_base.c static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
to               1985 drivers/mtd/nand/onenand/onenand_base.c 	to += ops->ooboffs;
to               1987 drivers/mtd/nand/onenand/onenand_base.c 	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
to               1998 drivers/mtd/nand/onenand/onenand_base.c 	column = to & (mtd->oobsize - 1);
to               2023 drivers/mtd/nand/onenand/onenand_base.c 		this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
to               2041 drivers/mtd/nand/onenand/onenand_base.c 		this->command(mtd, oobcmd, to, mtd->oobsize);
to               2043 drivers/mtd/nand/onenand/onenand_base.c 		onenand_update_bufferram(mtd, to, 0);
to               2046 drivers/mtd/nand/onenand/onenand_base.c 			onenand_update_bufferram(mtd, to + this->writesize, 0);
to               2055 drivers/mtd/nand/onenand/onenand_base.c 		ret = onenand_verify_oob(mtd, oobbuf, to);
to               2066 drivers/mtd/nand/onenand/onenand_base.c 		to += mtd->writesize;
to               2082 drivers/mtd/nand/onenand/onenand_base.c static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
to               2099 drivers/mtd/nand/onenand/onenand_base.c 		ret = onenand_write_ops_nolock(mtd, to, ops);
to               2101 drivers/mtd/nand/onenand/onenand_base.c 		ret = onenand_write_oob_nolock(mtd, to, ops);
to               2774 drivers/mtd/nand/onenand/onenand_base.c static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
to               2785 drivers/mtd/nand/onenand/onenand_base.c 	to += ops->ooboffs;
to               2792 drivers/mtd/nand/onenand/onenand_base.c 	column = to & (mtd->oobsize - 1);
to               2802 drivers/mtd/nand/onenand/onenand_base.c 		block = (int) (to >> this->erase_shift);
to               2841 drivers/mtd/nand/onenand/onenand_base.c 		onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
to               2842 drivers/mtd/nand/onenand/onenand_base.c 		onenand_update_bufferram(mtd, to, 0);
to               2845 drivers/mtd/nand/onenand/onenand_base.c 			onenand_update_bufferram(mtd, to + this->writesize, 0);
to               2881 drivers/mtd/nand/onenand/onenand_base.c 		to += mtd->writesize;
to               2942 drivers/mtd/nand/onenand/onenand_base.c static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
to               2966 drivers/mtd/nand/onenand/onenand_base.c 	ret = onenand_write_ops_nolock(mtd, to, &ops);
to               2408 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	unsigned long to;
to               2514 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
to               2515 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	if (!to) {
to                444 drivers/mtd/nand/raw/nand_base.c static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
to                451 drivers/mtd/nand/raw/nand_base.c 			 __func__, (unsigned int)to, (int)ops->ooblen);
to                462 drivers/mtd/nand/raw/nand_base.c 	chipnr = (int)(to >> chip->chip_shift);
to                477 drivers/mtd/nand/raw/nand_base.c 	page = (int)(to >> chip->page_shift);
to               3956 drivers/mtd/nand/raw/nand_base.c static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
to               3976 drivers/mtd/nand/raw/nand_base.c 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
to               3982 drivers/mtd/nand/raw/nand_base.c 	column = to & (mtd->writesize - 1);
to               3984 drivers/mtd/nand/raw/nand_base.c 	chipnr = (int)(to >> chip->chip_shift);
to               3993 drivers/mtd/nand/raw/nand_base.c 	realpage = (int)(to >> chip->page_shift);
to               3997 drivers/mtd/nand/raw/nand_base.c 	if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
to               3998 drivers/mtd/nand/raw/nand_base.c 	    ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
to               4085 drivers/mtd/nand/raw/nand_base.c static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
to               4089 drivers/mtd/nand/raw/nand_base.c 	int chipnr = (int)(to >> chip->chip_shift);
to               4103 drivers/mtd/nand/raw/nand_base.c 	ret = nand_do_write_ops(chip, to, &ops);
to               4115 drivers/mtd/nand/raw/nand_base.c static int nand_write_oob(struct mtd_info *mtd, loff_t to,
to               4138 drivers/mtd/nand/raw/nand_base.c 		ret = nand_do_write_oob(chip, to, ops);
to               4140 drivers/mtd/nand/raw/nand_base.c 		ret = nand_do_write_ops(chip, to, ops);
to                688 drivers/mtd/nand/raw/nand_bbt.c 	loff_t to;
to                693 drivers/mtd/nand/raw/nand_bbt.c 	to = (loff_t)block << this->bbt_erase_shift;
to                694 drivers/mtd/nand/raw/nand_bbt.c 	res = nand_markbad_bbm(this, to);
to                725 drivers/mtd/nand/raw/nand_bbt.c 	loff_t to;
to                786 drivers/mtd/nand/raw/nand_bbt.c 		to = ((loff_t)page) << this->page_shift;
to                791 drivers/mtd/nand/raw/nand_bbt.c 			to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
to                793 drivers/mtd/nand/raw/nand_bbt.c 			res = mtd_read(mtd, to, len, &retlen, buf);
to                804 drivers/mtd/nand/raw/nand_bbt.c 			res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
to                809 drivers/mtd/nand/raw/nand_bbt.c 			pageoffs = page - (int)(to >> this->page_shift);
to                857 drivers/mtd/nand/raw/nand_bbt.c 		einfo.addr = to;
to                867 drivers/mtd/nand/raw/nand_bbt.c 		res = scan_write_bbt(this, to, len, buf,
to                878 drivers/mtd/nand/raw/nand_bbt.c 			 (unsigned long long)to, td->version[chip]);
to                532 drivers/mtd/nand/spi/core.c static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
to                546 drivers/mtd/nand/spi/core.c 	nanddev_io_for_each_page(nand, to, ops, &iter) {
to                389 drivers/mtd/spi-nor/aspeed-smc.c static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to,
to                395 drivers/mtd/spi-nor/aspeed-smc.c 	aspeed_smc_send_cmd_addr(nor, nor->program_opcode, to);
to                929 drivers/mtd/spi-nor/cadence-quadspi.c static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
to                945 drivers/mtd/spi-nor/cadence-quadspi.c 		memcpy_toio(cqspi->ahb_base + to, buf, len);
to                948 drivers/mtd/spi-nor/cadence-quadspi.c 		ret = cqspi_indirect_write_execute(nor, to, buf, len);
to                291 drivers/mtd/spi-nor/hisi-sfc.c static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
to                304 drivers/mtd/spi-nor/hisi-sfc.c 			to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
to                677 drivers/mtd/spi-nor/intel-spi.c static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
to                692 drivers/mtd/spi-nor/intel-spi.c 		block_size = min_t(loff_t, to + block_size,
to                693 drivers/mtd/spi-nor/intel-spi.c 				   round_up(to + 1, SZ_4K)) - to;
to                695 drivers/mtd/spi-nor/intel-spi.c 		writel(to, ispi->base + FADDR);
to                726 drivers/mtd/spi-nor/intel-spi.c 			dev_err(ispi->dev, "write error: %llx: %#x\n", to,
to                732 drivers/mtd/spi-nor/intel-spi.c 		to += block_size;
to                317 drivers/mtd/spi-nor/mtk-quadspi.c static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
to                331 drivers/mtd/spi-nor/mtk-quadspi.c 		ret = mtk_nor_write_buffer(mtk_nor, to, buf);
to                336 drivers/mtd/spi-nor/mtk-quadspi.c 		to += SFLASH_WRBUF_SIZE;
to                346 drivers/mtd/spi-nor/mtk-quadspi.c 		ret = mtk_nor_write_single_byte(mtk_nor, to,
to                186 drivers/mtd/spi-nor/nxp-spifi.c static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
to                198 drivers/mtd/spi-nor/nxp-spifi.c 	writel(to, spifi->io_base + SPIFI_ADDR);
to                354 drivers/mtd/spi-nor/spi-nor.c static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
to                359 drivers/mtd/spi-nor/spi-nor.c 			   SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
to                382 drivers/mtd/spi-nor/spi-nor.c static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
to                386 drivers/mtd/spi-nor/spi-nor.c 		return spi_nor_spimem_write_data(nor, to, len, buf);
to                388 drivers/mtd/spi-nor/spi-nor.c 	return nor->write(nor, to, len, buf);
to               2583 drivers/mtd/spi-nor/spi-nor.c static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
to               2590 drivers/mtd/spi-nor/spi-nor.c 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
to               2600 drivers/mtd/spi-nor/spi-nor.c 	actual = to % 2;
to               2606 drivers/mtd/spi-nor/spi-nor.c 		ret = spi_nor_write_data(nor, to, 1, buf);
to               2615 drivers/mtd/spi-nor/spi-nor.c 	to += actual;
to               2622 drivers/mtd/spi-nor/spi-nor.c 		ret = spi_nor_write_data(nor, to, 2, buf + actual);
to               2630 drivers/mtd/spi-nor/spi-nor.c 		to += 2;
to               2645 drivers/mtd/spi-nor/spi-nor.c 		ret = spi_nor_write_data(nor, to, 1, buf + actual);
to               2667 drivers/mtd/spi-nor/spi-nor.c static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
to               2674 drivers/mtd/spi-nor/spi-nor.c 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
to               2682 drivers/mtd/spi-nor/spi-nor.c 		loff_t addr = to + i;
to               1302 drivers/mtd/ubi/eba.c int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
to               1315 drivers/mtd/ubi/eba.c 	dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
to               1420 drivers/mtd/ubi/eba.c 	err = ubi_io_write_vid_hdr(ubi, to, vidb);
to               1430 drivers/mtd/ubi/eba.c 	err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
to               1434 drivers/mtd/ubi/eba.c 				 err, to);
to               1443 drivers/mtd/ubi/eba.c 		err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
to               1454 drivers/mtd/ubi/eba.c 	vol->eba_tbl->entries[lnum].pnum = to;
to                193 drivers/mtd/ubi/gluebi.c static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
to                200 drivers/mtd/ubi/gluebi.c 	lnum = div_u64_rem(to, mtd->erasesize, &offs);
to                897 drivers/mtd/ubi/ubi.h int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
to                403 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
to                443 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	} while (time_before(jiffies, to));
to                 40 drivers/net/dsa/sja1105/sja1105_main.c 			   int from, int to, bool allow)
to                 43 drivers/net/dsa/sja1105/sja1105_main.c 		l2_fwd[from].bc_domain  |= BIT(to);
to                 44 drivers/net/dsa/sja1105/sja1105_main.c 		l2_fwd[from].reach_port |= BIT(to);
to                 45 drivers/net/dsa/sja1105/sja1105_main.c 		l2_fwd[from].fl_domain  |= BIT(to);
to                 47 drivers/net/dsa/sja1105/sja1105_main.c 		l2_fwd[from].bc_domain  &= ~BIT(to);
to                 48 drivers/net/dsa/sja1105/sja1105_main.c 		l2_fwd[from].reach_port &= ~BIT(to);
to                 49 drivers/net/dsa/sja1105/sja1105_main.c 		l2_fwd[from].fl_domain  &= ~BIT(to);
to                143 drivers/net/ethernet/8390/mac8390.c static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
to                145 drivers/net/ethernet/8390/mac8390.c static void dayna_memcpy_tocard(struct net_device *dev, int to,
to                661 drivers/net/ethernet/8390/mac8390.c static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
to                665 drivers/net/ethernet/8390/mac8390.c 	unsigned char *target = to;
to                685 drivers/net/ethernet/8390/mac8390.c static void dayna_memcpy_tocard(struct net_device *dev, int to,
to                690 drivers/net/ethernet/8390/mac8390.c 	to <<= 1;	/* word, skip overhead */
to                691 drivers/net/ethernet/8390/mac8390.c 	ptr = (unsigned short *)(dev->mem_start+to);
to                693 drivers/net/ethernet/8390/mac8390.c 	if (to & 2) {		/* avoid a byte write (stomps on other data) */
to                834 drivers/net/ethernet/8390/mac8390.c 	volatile unsigned short *to = (void *)tp;
to                841 drivers/net/ethernet/8390/mac8390.c 		*to++ = *from++;
to                846 drivers/net/ethernet/8390/mac8390.c 	unsigned short *to = tp;
to                853 drivers/net/ethernet/8390/mac8390.c 		*to++ = *from++;
to                328 drivers/net/ethernet/amd/declance.c static void cp_to_buf(const int type, void *to, const void *from, int len)
to                337 drivers/net/ethernet/amd/declance.c 		memcpy(to, from, len);
to                340 drivers/net/ethernet/amd/declance.c 		tp = to;
to                359 drivers/net/ethernet/amd/declance.c 		tp = to;
to                387 drivers/net/ethernet/amd/declance.c static void cp_from_buf(const int type, void *to, const void *from, int len)
to                396 drivers/net/ethernet/amd/declance.c 		memcpy(to, from, len);
to                399 drivers/net/ethernet/amd/declance.c 		tp = to;
to                420 drivers/net/ethernet/amd/declance.c 		tp = to;
to                 87 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
to                 90 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
to                 92 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
to                 94 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
to                104 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	to_fp->index = to;
to                123 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
to               1005 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct freelQ_e *to = &fl->entries[fl->pidx];
to               1008 drivers/net/ethernet/chelsio/cxgb/sge.c 	to->addr_lo = from->addr_lo;
to               1009 drivers/net/ethernet/chelsio/cxgb/sge.c 	to->addr_hi = from->addr_hi;
to               1010 drivers/net/ethernet/chelsio/cxgb/sge.c 	to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
to               1012 drivers/net/ethernet/chelsio/cxgb/sge.c 	to->gen2 = V_CMD_GEN2(fl->genbit);
to                582 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct rx_desc *to = &q->desc[q->pidx];
to                585 drivers/net/ethernet/chelsio/cxgb3/sge.c 	to->addr_lo = from->addr_lo;	/* already big endian */
to                586 drivers/net/ethernet/chelsio/cxgb3/sge.c 	to->addr_hi = from->addr_hi;	/* likewise */
to                588 drivers/net/ethernet/chelsio/cxgb3/sge.c 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
to                589 drivers/net/ethernet/chelsio/cxgb3/sge.c 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
to               1387 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct work_request_hdr *to = (struct work_request_hdr *)d;
to               1390 drivers/net/ethernet/chelsio/cxgb3/sge.c 		memcpy(&to[1], &from[1], len - sizeof(*from));
to               1392 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
to               1394 drivers/net/ethernet/chelsio/cxgb3/sge.c 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
to               1397 drivers/net/ethernet/chelsio/cxgb3/sge.c 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
to               3261 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			    unsigned int from, unsigned int to)
to               3265 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
to               3267 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
to                885 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct ulptx_sge_pair *to;
to                909 drivers/net/ethernet/chelsio/cxgb4/sge.c 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
to                911 drivers/net/ethernet/chelsio/cxgb4/sge.c 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
to                912 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to                913 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to                914 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->addr[0] = cpu_to_be64(addr[i]);
to                915 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->addr[1] = cpu_to_be64(addr[++i]);
to                918 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to                919 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->len[1] = cpu_to_be32(0);
to                920 drivers/net/ethernet/chelsio/cxgb4/sge.c 		to->addr[0] = cpu_to_be64(addr[i + 1]);
to                906 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct ulptx_sge_pair *to;
to                930 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
to                932 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
to                933 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to                934 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to                935 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->addr[0] = cpu_to_be64(addr[i]);
to                936 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->addr[1] = cpu_to_be64(addr[++i]);
to                939 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to                940 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->len[1] = cpu_to_be32(0);
to                941 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		to->addr[0] = cpu_to_be64(addr[i + 1]);
to                471 drivers/net/ethernet/davicom/dm9000.c dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
to                476 drivers/net/ethernet/davicom/dm9000.c 		to[0] = 0xff;
to                477 drivers/net/ethernet/davicom/dm9000.c 		to[1] = 0xff;
to                499 drivers/net/ethernet/davicom/dm9000.c 	to[0] = ior(db, DM9000_EPDRL);
to                500 drivers/net/ethernet/davicom/dm9000.c 	to[1] = ior(db, DM9000_EPDRH);
to                117 drivers/net/ethernet/ibm/ehea/ehea.h #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
to               1725 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 					   struct fm10k_swapi_table_info *to)
to               1728 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 	to->used = le32_to_cpu(from->used);
to               1729 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 	to->avail = le32_to_cpu(from->avail);
to               1893 drivers/net/ethernet/jme.c 	u32 phylink, to = JME_WAIT_LINK_TIME;
to               1897 drivers/net/ethernet/jme.c 	while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
to               2732 drivers/net/ethernet/jme.c 	int to;
to               2735 drivers/net/ethernet/jme.c 	to = JME_SMB_BUSY_TIMEOUT;
to               2736 drivers/net/ethernet/jme.c 	while ((val & SMBCSR_BUSY) && --to) {
to               2740 drivers/net/ethernet/jme.c 	if (!to) {
to               2751 drivers/net/ethernet/jme.c 	to = JME_SMB_BUSY_TIMEOUT;
to               2752 drivers/net/ethernet/jme.c 	while ((val & SMBINTF_HWCMD) && --to) {
to               2756 drivers/net/ethernet/jme.c 	if (!to) {
to               2768 drivers/net/ethernet/jme.c 	int to;
to               2771 drivers/net/ethernet/jme.c 	to = JME_SMB_BUSY_TIMEOUT;
to               2772 drivers/net/ethernet/jme.c 	while ((val & SMBCSR_BUSY) && --to) {
to               2776 drivers/net/ethernet/jme.c 	if (!to) {
to               2788 drivers/net/ethernet/jme.c 	to = JME_SMB_BUSY_TIMEOUT;
to               2789 drivers/net/ethernet/jme.c 	while ((val & SMBINTF_HWCMD) && --to) {
to               2793 drivers/net/ethernet/jme.c 	if (!to) {
to               1116 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
to               1123 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	if (!to || !from)
to               1126 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	copy = min_t(int, size, sizeof(to->first.data));
to               1127 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	memcpy(to->first.data, from, copy);
to               1131 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	next = to->next;
to               1150 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
to               1156 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	if (!to || !from)
to               1160 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	memcpy(to, from->first.data, copy);
to               1162 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	to += copy;
to               1174 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		memcpy(to, block->data, copy);
to               1175 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		to += copy;
to                173 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
to                176 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c 	list_splice(&from->erif_sublists, &to->erif_sublists);
to                177 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c 	to->kvdl_index = from->kvdl_index;
to               1088 drivers/net/ethernet/micrel/ks8851.c static void ks8851_set_msglevel(struct net_device *dev, u32 to)
to               1091 drivers/net/ethernet/micrel/ks8851.c 	ks->msg_enable = to;
to               1001 drivers/net/ethernet/micrel/ks8851_mll.c static void ks_set_msglevel(struct net_device *netdev, u32 to)
to               1004 drivers/net/ethernet/micrel/ks8851_mll.c 	ks->msg_enable = to;
to               3560 drivers/net/ethernet/micrel/ksz884x.c 	int to;
to               3574 drivers/net/ethernet/micrel/ksz884x.c 	bits = len = from = to = 0;
to               3578 drivers/net/ethernet/micrel/ksz884x.c 				data[to++] = pattern[from];
to               3600 drivers/net/ethernet/micrel/ksz884x.c 	crc = ether_crc(to, data);
to                354 drivers/net/ethernet/myricom/myri10ge/myri10ge.c #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
to               2061 drivers/net/ethernet/neterion/vxge/vxge-config.c 					 u32 to)
to               2071 drivers/net/ethernet/neterion/vxge/vxge-config.c 	to_item = mempoolh->items_arr[to];
to                270 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		u64 to = 0;
to                273 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
to                282 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			nfp_map->cache_to = to;
to                559 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		bool to;
to                563 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		to = !wait_event_timeout(nn->mbox_cmsg.wq,
to                577 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			WARN_ON(!to);
to               2182 drivers/net/ethernet/smsc/smsc911x.c 	unsigned int to = 100;
to               2214 drivers/net/ethernet/smsc/smsc911x.c 	while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
to               2217 drivers/net/ethernet/smsc/smsc911x.c 	if (to == 0) {
to               2608 drivers/net/ethernet/smsc/smsc911x.c 	unsigned int to = 100;
to               2622 drivers/net/ethernet/smsc/smsc911x.c 	while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
to               2625 drivers/net/ethernet/smsc/smsc911x.c 	if (to == 0)
to               1025 drivers/net/ethernet/toshiba/spider_net.c 			int to = (chain->num_desc + off - 1) % chain->num_desc;
to               1027 drivers/net/ethernet/toshiba/spider_net.c 			         "with stat=0x%08x\n", cnt, from, to, cstat);
to                117 drivers/net/fddi/defza.c static inline void fza_reads(const void __iomem *from, void *to,
to                123 drivers/net/fddi/defza.c 		u64 *dst = to;
to                135 drivers/net/fddi/defza.c 		u32 *dst = to;
to                142 drivers/net/fddi/defza.c static inline void fza_writes(const void *from, void __iomem *to,
to                148 drivers/net/fddi/defza.c 		u64 __iomem *dst = to;
to                160 drivers/net/fddi/defza.c 		u32 __iomem *dst = to;
to                167 drivers/net/fddi/defza.c static inline void fza_moves(const void __iomem *from, void __iomem *to,
to                173 drivers/net/fddi/defza.c 		u64 __iomem *dst = to;
to                185 drivers/net/fddi/defza.c 		u32 __iomem *dst = to;
to                192 drivers/net/fddi/defza.c static inline void fza_zeros(void __iomem *to, unsigned long size)
to                195 drivers/net/fddi/defza.c 		u64 __iomem *dst = to;
to                205 drivers/net/fddi/defza.c 		u32 __iomem *dst = to;
to                588 drivers/net/fddi/skfp/cfm.c int cem_build_path(struct s_smc *smc, char *to, int path_index)
to                616 drivers/net/fddi/skfp/cfm.c 	memcpy(to,path,len) ;
to                568 drivers/net/fddi/skfp/h/cmtdef.h int cem_build_path(struct s_smc *smc, char *to, int path_index);
to                556 drivers/net/fddi/skfp/pmf.c 	char		*to ;
to                579 drivers/net/fddi/skfp/pmf.c 	to = (char *) (pcon->pc_p) ;	/* destination pointer */
to                582 drivers/net/fddi/skfp/pmf.c 	pa = (struct smt_para *) to ;	/* type/length pointer */
to                583 drivers/net/fddi/skfp/pmf.c 	to += PARA_LEN ;		/* skip smt_para */
to                592 drivers/net/fddi/skfp/pmf.c 		to[0] = 0 ;
to                593 drivers/net/fddi/skfp/pmf.c 		to[1] = 0 ;
to                594 drivers/net/fddi/skfp/pmf.c 		to[2] = 0 ;
to                595 drivers/net/fddi/skfp/pmf.c 		to[3] = index ;
to                597 drivers/net/fddi/skfp/pmf.c 		to += 4 ;
to                672 drivers/net/fddi/skfp/pmf.c 		*(u32 *)to = 0 ;
to                699 drivers/net/fddi/skfp/pmf.c 			sp_len = cem_build_path(smc,to,path) ;
to                705 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_1048 *) to ;
to                714 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_208c *) to ;
to                731 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_208d *) to ;
to                748 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_208e *) to ;
to                763 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_208f *) to ;
to                786 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_2090 *) to ;
to                801 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_4050 *) to ;
to                822 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_4051 *) to ;
to                839 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_4052 *) to ;
to                850 drivers/net/fddi/skfp/pmf.c 			sp = (struct smt_p_4053 *) to ;
to                907 drivers/net/fddi/skfp/pmf.c 			to[0] = 0 ;
to                908 drivers/net/fddi/skfp/pmf.c 			to[1] = 0 ;
to                911 drivers/net/fddi/skfp/pmf.c 				to[2] = *from++ ;
to                912 drivers/net/fddi/skfp/pmf.c 				to[3] = *from++ ;
to                915 drivers/net/fddi/skfp/pmf.c 				to[3] = *from++ ;
to                916 drivers/net/fddi/skfp/pmf.c 				to[2] = *from++ ;
to                919 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to                920 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to                922 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to                929 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to                930 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to                932 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to                933 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to                935 drivers/net/fddi/skfp/pmf.c 			to += 2 ;
to                943 drivers/net/fddi/skfp/pmf.c 			to[0] = 0 ;
to                944 drivers/net/fddi/skfp/pmf.c 			to[1] = 0 ;
to                945 drivers/net/fddi/skfp/pmf.c 			to[2] = 0 ;
to                946 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to                947 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to                955 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to                956 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to                957 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to                958 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to                960 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to                961 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to                962 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to                963 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to                966 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to                971 drivers/net/fddi/skfp/pmf.c 			to[0] = 0 ;
to                972 drivers/net/fddi/skfp/pmf.c 			to[1] = 0 ;
to                973 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to                974 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to                976 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to                981 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to                982 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to                983 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to                984 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to                986 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to                991 drivers/net/fddi/skfp/pmf.c 			to[0] = 0 ;
to                992 drivers/net/fddi/skfp/pmf.c 			to[1] = 0 ;
to                993 drivers/net/fddi/skfp/pmf.c 			memcpy((char *) to+2,(char *) from,6) ;
to                994 drivers/net/fddi/skfp/pmf.c 			to += 8 ;
to               1001 drivers/net/fddi/skfp/pmf.c 			memcpy((char *) to,(char *) from,8) ;
to               1002 drivers/net/fddi/skfp/pmf.c 			to += 8 ;
to               1009 drivers/net/fddi/skfp/pmf.c 			memcpy((char *) to,(char *) from,32) ;
to               1010 drivers/net/fddi/skfp/pmf.c 			to += 32 ;
to               1017 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to               1018 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to               1019 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to               1020 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to               1021 drivers/net/fddi/skfp/pmf.c 			to[4] = *from++ ;
to               1022 drivers/net/fddi/skfp/pmf.c 			to[5] = *from++ ;
to               1023 drivers/net/fddi/skfp/pmf.c 			to[6] = *from++ ;
to               1024 drivers/net/fddi/skfp/pmf.c 			to[7] = *from++ ;
to               1025 drivers/net/fddi/skfp/pmf.c 			to += 8 ;
to               1040 drivers/net/fddi/skfp/pmf.c 		to[0] = 0 ;
to               1041 drivers/net/fddi/skfp/pmf.c 		to[1] = 0 ;
to               1042 drivers/net/fddi/skfp/pmf.c 		to += 4 - (len & 3 ) ;
to               1050 drivers/net/fddi/skfp/pmf.c 	pcon->pc_p = (void *) to ;
to               1056 drivers/net/fddi/skfp/pmf.c 	to += sp_len ;
to               1079 drivers/net/fddi/skfp/pmf.c 	char		*to ;
to               1165 drivers/net/fddi/skfp/pmf.c 	to = mib_addr + pt->p_offset ;
to               1171 drivers/net/fddi/skfp/pmf.c 			to = (char *) &byte_val ;
to               1174 drivers/net/fddi/skfp/pmf.c 			to = (char *) &word_val ;
to               1177 drivers/net/fddi/skfp/pmf.c 			to = (char *) &long_val ;
to               1190 drivers/net/fddi/skfp/pmf.c 				to[0] = from[2] ;
to               1191 drivers/net/fddi/skfp/pmf.c 				to[1] = from[3] ;
to               1194 drivers/net/fddi/skfp/pmf.c 				to[1] = from[2] ;
to               1195 drivers/net/fddi/skfp/pmf.c 				to[0] = from[3] ;
to               1198 drivers/net/fddi/skfp/pmf.c 			to[0] = from[2] ;
to               1199 drivers/net/fddi/skfp/pmf.c 			to[1] = from[3] ;
to               1202 drivers/net/fddi/skfp/pmf.c 			to += 2 ;
to               1212 drivers/net/fddi/skfp/pmf.c 			to[0] = from[3] ;
to               1215 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to               1224 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to               1225 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to               1226 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to               1227 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to               1229 drivers/net/fddi/skfp/pmf.c 			to[0] = *from++ ;
to               1230 drivers/net/fddi/skfp/pmf.c 			to[1] = *from++ ;
to               1231 drivers/net/fddi/skfp/pmf.c 			to[2] = *from++ ;
to               1232 drivers/net/fddi/skfp/pmf.c 			to[3] = *from++ ;
to               1235 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to               1241 drivers/net/fddi/skfp/pmf.c 				memcpy(to,from+2,6) ;
to               1242 drivers/net/fddi/skfp/pmf.c 			to += 8 ;
to               1250 drivers/net/fddi/skfp/pmf.c 				memcpy(to,from,4) ;
to               1251 drivers/net/fddi/skfp/pmf.c 			to += 4 ;
to               1259 drivers/net/fddi/skfp/pmf.c 				memcpy(to,from,8) ;
to               1260 drivers/net/fddi/skfp/pmf.c 			to += 8 ;
to               1268 drivers/net/fddi/skfp/pmf.c 				memcpy(to,from,32) ;
to               1269 drivers/net/fddi/skfp/pmf.c 			to += 32 ;
to               1275 drivers/net/fddi/skfp/pmf.c 				to[0] = *from++ ;
to               1276 drivers/net/fddi/skfp/pmf.c 				to[1] = *from++ ;
to               1277 drivers/net/fddi/skfp/pmf.c 				to[2] = *from++ ;
to               1278 drivers/net/fddi/skfp/pmf.c 				to[3] = *from++ ;
to               1279 drivers/net/fddi/skfp/pmf.c 				to[4] = *from++ ;
to               1280 drivers/net/fddi/skfp/pmf.c 				to[5] = *from++ ;
to               1281 drivers/net/fddi/skfp/pmf.c 				to[6] = *from++ ;
to               1282 drivers/net/fddi/skfp/pmf.c 				to[7] = *from++ ;
to               1284 drivers/net/fddi/skfp/pmf.c 			to += 8 ;
to                430 drivers/net/ppp/ppp_generic.c 	struct iov_iter to;
to                481 drivers/net/ppp/ppp_generic.c 	iov_iter_init(&to, READ, &iov, 1, count);
to                482 drivers/net/ppp/ppp_generic.c 	if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
to                826 drivers/net/tap.c 			   struct iov_iter *to,
to                832 drivers/net/tap.c 	if (!iov_iter_count(to)) {
to                865 drivers/net/tap.c 		ret = tap_put_user(q, skb, to);
to                874 drivers/net/tap.c static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                878 drivers/net/tap.c 	ssize_t len = iov_iter_count(to), ret;
to                880 drivers/net/tap.c 	ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
to               2213 drivers/net/tun.c 			   struct iov_iter *to,
to               2221 drivers/net/tun.c 	if (!iov_iter_count(to)) {
to               2236 drivers/net/tun.c 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
to               2241 drivers/net/tun.c 		ret = tun_put_user(tun, tfile, skb, to);
to               2251 drivers/net/tun.c static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               2256 drivers/net/tun.c 	ssize_t len = iov_iter_count(to), ret;
to               2260 drivers/net/tun.c 	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
to                106 drivers/net/wireless/ath/ath9k/dynack.c static void ath_dynack_set_timeout(struct ath_hw *ah, int to)
to                109 drivers/net/wireless/ath/ath9k/dynack.c 	int slottime = (to - 3) / 2;
to                112 drivers/net/wireless/ath/ath9k/dynack.c 		to, slottime);
to                114 drivers/net/wireless/ath/ath9k/dynack.c 	ath9k_hw_set_ack_timeout(ah, to);
to                115 drivers/net/wireless/ath/ath9k/dynack.c 	ath9k_hw_set_cts_timeout(ah, to);
to                128 drivers/net/wireless/ath/ath9k/dynack.c 	int to = 0;
to                131 drivers/net/wireless/ath/ath9k/dynack.c 		if (an->ackto > to)
to                132 drivers/net/wireless/ath/ath9k/dynack.c 			to = an->ackto;
to                134 drivers/net/wireless/ath/ath9k/dynack.c 	if (to && da->ackto != to) {
to                135 drivers/net/wireless/ath/ath9k/dynack.c 		ath_dynack_set_timeout(ah, to);
to                136 drivers/net/wireless/ath/ath9k/dynack.c 		da->ackto = to;
to                800 drivers/net/wireless/ath/wil6210/cfg80211.c 			       enum nl80211_iftype to)
to                803 drivers/net/wireless/ath/wil6210/cfg80211.c 	    to == NL80211_IFTYPE_P2P_CLIENT)
to               2362 drivers/net/wireless/ath/wil6210/debugfs.c 		blob->size = map->to - map->from;
to               1452 drivers/net/wireless/ath/wil6210/main.c 	ulong to = msecs_to_jiffies(2000);
to               1453 drivers/net/wireless/ath/wil6210/main.c 	ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
to               1460 drivers/net/wireless/ath/wil6210/main.c 			 jiffies_to_msecs(to-left), wil->hw_version);
to                426 drivers/net/wireless/ath/wil6210/wil6210.h 	u32 to;   /* linker address - to, exclusive */
to                 35 drivers/net/wireless/ath/wil6210/wil_crash_dump.c 	host_max = map->host + (map->to - map->from);
to                 46 drivers/net/wireless/ath/wil6210/wil_crash_dump.c 		tmp_max = map->host + (map->to - map->from);
to                 88 drivers/net/wireless/ath/wil6210/wil_crash_dump.c 		len = map->to - map->from;
to                252 drivers/net/wireless/ath/wil6210/wmi.c 		    ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)))
to                182 drivers/net/wireless/wl3501_cs.c static void iw_copy_mgmt_info_element(struct iw_mgmt_info_element *to,
to                185 drivers/net/wireless/wl3501_cs.c 	iw_set_mgmt_info_element(from->id, to, from->data, from->len);
to                 32 drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c static int bits(u32 rw, int from, int to)
to                 34 drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c 	rw &= ~(0xffffffffU << (to+1));
to                169 drivers/nfc/pn533/pn533.c 	u8 to;
to                182 drivers/nfc/pn533/pn533.c 	u8 to;
to                 80 drivers/nfc/st21nfca/dep.c 	u8 to;
to                166 drivers/nfc/st21nfca/dep.c 	atr_res->to = ST21NFCA_DEFAULT_TIMEOUT;
to                461 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
to                490 drivers/nfc/st21nfca/dep.c 		if (atr_res->to >= 0x0e)
to                491 drivers/nfc/st21nfca/dep.c 			info->dep_info.to = 0x0e;
to                493 drivers/nfc/st21nfca/dep.c 			info->dep_info.to = atr_res->to + 1;
to                495 drivers/nfc/st21nfca/dep.c 		info->dep_info.to |= 0x10;
to                522 drivers/nfc/st21nfca/dep.c 	info->dep_info.to = ST21NFCA_DEFAULT_TIMEOUT;
to                563 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */
to                628 drivers/nfc/st21nfca/dep.c 			*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
to                656 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
to                672 drivers/nfc/st21nfca/dep.c 	info->dep_info.to = ST21NFCA_DEFAULT_TIMEOUT;
to                120 drivers/nfc/st21nfca/st21nfca.h 	u8 to;
to                497 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu,		0x06),
to                498 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2,		0x07),
to                499 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu,		0x08),
to                500 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2,		0x09),
to                501 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
to                502 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
to                503 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
to                506 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu,		0x0f),
to                186 drivers/ras/cec.c static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
to                201 drivers/ras/cec.c 			if (to)
to                202 drivers/ras/cec.c 				*to = i;
to                217 drivers/ras/cec.c 	if (to)
to                218 drivers/ras/cec.c 		*to = min;
to                223 drivers/ras/cec.c static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
to                225 drivers/ras/cec.c 	WARN_ON(!to);
to                228 drivers/ras/cec.c 		*to = 0;
to                231 drivers/ras/cec.c 	return __find_elem(ca, pfn, to);
to                315 drivers/ras/cec.c 	unsigned int to = 0;
to                333 drivers/ras/cec.c 	ret = find_elem(ca, pfn, &to);
to                338 drivers/ras/cec.c 		memmove((void *)&ca->array[to + 1],
to                339 drivers/ras/cec.c 			(void *)&ca->array[to],
to                340 drivers/ras/cec.c 			(ca->n - to) * sizeof(u64));
to                342 drivers/ras/cec.c 		ca->array[to] = pfn << PAGE_SHIFT;
to                347 drivers/ras/cec.c 	ca->array[to] |= DECAY_MASK << COUNT_BITS;
to                348 drivers/ras/cec.c 	ca->array[to]++;
to                351 drivers/ras/cec.c 	count = COUNT(ca->array[to]);
to                353 drivers/ras/cec.c 		u64 pfn = ca->array[to] >> PAGE_SHIFT;
to                364 drivers/ras/cec.c 		del_elem(ca, to);
to                170 drivers/rpmsg/rpmsg_char.c static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                208 drivers/rpmsg/rpmsg_char.c 	use = min_t(size_t, iov_iter_count(to), skb->len);
to                209 drivers/rpmsg/rpmsg_char.c 	if (copy_to_iter(skb->data, use, to) != use)
to                 53 drivers/rtc/rtc-puv3.c static void puv3_rtc_setaie(struct device *dev, int to)
to                 57 drivers/rtc/rtc-puv3.c 	dev_dbg(dev, "%s: aie=%d\n", __func__, to);
to                 61 drivers/rtc/rtc-puv3.c 	if (to)
to                433 drivers/rtc/rtc-sun6i.c static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip)
to                440 drivers/rtc/rtc-sun6i.c 	if (to) {
to                165 drivers/rtc/rtc-sunxi.c static void sunxi_rtc_setaie(unsigned int to, struct sunxi_rtc_dev *chip)
to                170 drivers/rtc/rtc-sunxi.c 	if (to) {
to                313 drivers/s390/block/dasd_devmap.c 	int to, to_id0, to_id1;
to                336 drivers/s390/block/dasd_devmap.c 	to = from;
to                340 drivers/s390/block/dasd_devmap.c 		if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
to                344 drivers/s390/block/dasd_devmap.c 		if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
to                358 drivers/s390/block/dasd_devmap.c 	while (from <= to) {
to               3596 drivers/s390/block/dasd_eckd.c static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
to               3602 drivers/s390/block/dasd_eckd.c 	if (from == to)
to               3608 drivers/s390/block/dasd_eckd.c 		if (tmp > to)
to               3609 drivers/s390/block/dasd_eckd.c 			tmp = to;
to               3614 drivers/s390/block/dasd_eckd.c 	if (to - (from + cur_pos) + 1 >= trks_per_ext) {
to               3615 drivers/s390/block/dasd_eckd.c 		tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
to               3620 drivers/s390/block/dasd_eckd.c 	if (cur_pos < to)
to               3747 drivers/s390/block/dasd_eckd.c 					unsigned int from, unsigned int to)
to               3771 drivers/s390/block/dasd_eckd.c 		while (cur_pos < to) {
to               3774 drivers/s390/block/dasd_eckd.c 			if (stop > to)
to               3775 drivers/s390/block/dasd_eckd.c 				stop = to;
to               5395 drivers/s390/block/dasd_eckd.c dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
to               5401 drivers/s390/block/dasd_eckd.c 	while (from <= to) {
to               5453 drivers/s390/block/dasd_eckd.c 	struct ccw1 *first, *last, *fail, *from, *to;
to               5516 drivers/s390/block/dasd_eckd.c 		to = min(first + 6, last);
to               5519 drivers/s390/block/dasd_eckd.c 		dasd_eckd_dump_ccw_range(first, to, page + len);
to               5525 drivers/s390/block/dasd_eckd.c 		from = ++to;
to               5532 drivers/s390/block/dasd_eckd.c 		to = min(fail + 1, last);
to               5533 drivers/s390/block/dasd_eckd.c 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
to               5536 drivers/s390/block/dasd_eckd.c 		from = max(from, ++to);
to                230 drivers/s390/char/monwriter.c 	void *to;
to                237 drivers/s390/char/monwriter.c 			to = (char *) &monpriv->hdr +
to                239 drivers/s390/char/monwriter.c 			if (copy_from_user(to, data + written, len)) {
to                256 drivers/s390/char/monwriter.c 			to = monpriv->current_buf->data +
to                258 drivers/s390/char/monwriter.c 			if (copy_from_user(to, data + written, len)) {
to                317 drivers/s390/char/sclp_vt220.c 	int to;
to                329 drivers/s390/char/sclp_vt220.c 		for (from=0, to=0;
to                330 drivers/s390/char/sclp_vt220.c 		     (from < count) && (to < sclp_vt220_space_left(request));
to                336 drivers/s390/char/sclp_vt220.c 				if (to + 1 < sclp_vt220_space_left(request)) {
to                337 drivers/s390/char/sclp_vt220.c 					((unsigned char *) buffer)[to++] = c;
to                338 drivers/s390/char/sclp_vt220.c 					((unsigned char *) buffer)[to++] = 0x0d;
to                343 drivers/s390/char/sclp_vt220.c 				((unsigned char *) buffer)[to++] = c;
to                345 drivers/s390/char/sclp_vt220.c 		sccb->header.length += to;
to                346 drivers/s390/char/sclp_vt220.c 		sccb->evbuf.length += to;
to                 51 drivers/s390/cio/blacklist.c 			   unsigned int to, int msgtrigger)
to                 53 drivers/s390/cio/blacklist.c 	if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
to                 56 drivers/s390/cio/blacklist.c 				from_ssid, from, to_ssid, to);
to                 62 drivers/s390/cio/blacklist.c 	       (from <= to))) {
to                152 drivers/s390/cio/blacklist.c 	unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
to                175 drivers/s390/cio/blacklist.c 			to = __MAX_SUBCHANNEL;
to                191 drivers/s390/cio/blacklist.c 			to = from;
to                198 drivers/s390/cio/blacklist.c 			from = to = console_devno;
to                205 drivers/s390/cio/blacklist.c 							 &to_ssid, &to,
to                210 drivers/s390/cio/blacklist.c 					to = from;
to                215 drivers/s390/cio/blacklist.c 			rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
to                107 drivers/s390/cio/idset.c void idset_add_set(struct idset *to, struct idset *from)
to                109 drivers/s390/cio/idset.c 	int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
to                111 drivers/s390/cio/idset.c 	bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
to                 23 drivers/s390/cio/idset.h void idset_add_set(struct idset *to, struct idset *from);
to                193 drivers/s390/cio/vfio_ccw_cp.c 			   void *to, u64 iova,
to                221 drivers/s390/cio/vfio_ccw_cp.c 		memcpy(to + (n - l), (void *)from, m);
to               1470 drivers/s390/virtio/virtio_ccw.c 	unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
to               1482 drivers/s390/virtio/virtio_ccw.c 					 &to_ssid, &to);
to               1484 drivers/s390/virtio/virtio_ccw.c 			    ((from_ssid == to_ssid) && (from > to)))
to               1489 drivers/s390/virtio/virtio_ccw.c 			to = from;
to               1494 drivers/s390/virtio/virtio_ccw.c 		       ((from_ssid == to_ssid) && (from <= to))) {
to                474 drivers/scsi/cxlflash/main.c 	ulong to;
to                517 drivers/scsi/cxlflash/main.c 	to = msecs_to_jiffies(5000);
to                518 drivers/scsi/cxlflash/main.c 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
to                521 drivers/scsi/cxlflash/main.c 						       to);
to                522 drivers/scsi/cxlflash/main.c 	if (!to) {
to                341 drivers/scsi/cxlflash/superpipe.c 	u32 to = CMD_TIMEOUT * HZ;
to                361 drivers/scsi/cxlflash/superpipe.c 			      CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
to                432 drivers/scsi/cxlflash/vlun.c 	const u32 to = sdev->request_queue->rq_timeout;
to                454 drivers/scsi/cxlflash/vlun.c 				      CMD_BUFSIZE, NULL, NULL, to,
to                757 drivers/scsi/dc395x.c static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
to                761 drivers/scsi/dc395x.c 	if (time_before(jiffies + to, acb->last_reset - HZ / 2))
to                765 drivers/scsi/dc395x.c 		acb->waiting_timer.expires = jiffies + to + 1;
to               1069 drivers/scsi/esas2r/esas2r.h bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
to               1071 drivers/scsi/esas2r/esas2r.h bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
to               1000 drivers/scsi/esas2r/esas2r_flash.c 			     void *to,
to               1004 drivers/scsi/esas2r/esas2r_flash.c 	u8 *end = (u8 *)to;
to               1276 drivers/scsi/esas2r/esas2r_main.c 			   void *to,
to               1280 drivers/scsi/esas2r/esas2r_main.c 	u8 *end = (u8 *)to;
to                 34 drivers/scsi/fnic/fnic_trace.h extern ssize_t simple_read_from_buffer(void __user *to,
to               1279 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 		void *to = page_address(sg_page(sg_resp));
to               1285 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 		memcpy(to + sg_resp->offset,
to               2422 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 		void *to = page_address(sg_page(sg_resp));
to               2428 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 		memcpy(to + sg_resp->offset,
to               2235 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		void *to = page_address(sg_page(sg_resp));
to               2241 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		memcpy(to + sg_resp->offset,
to               1710 drivers/scsi/mvsas/mv_sas.c 	void *to;
to               1784 drivers/scsi/mvsas/mv_sas.c 			to = kmap_atomic(sg_page(sg_resp));
to               1785 drivers/scsi/mvsas/mv_sas.c 			memcpy(to + sg_resp->offset,
to               1788 drivers/scsi/mvsas/mv_sas.c 			kunmap_atomic(to);
to               1808 drivers/scsi/qla2xxx/qla_def.h #define SET_TARGET_ID(ha, to, from)			\
to               1811 drivers/scsi/qla2xxx/qla_def.h 		to.extended = cpu_to_le16(from);	\
to               1813 drivers/scsi/qla2xxx/qla_def.h 		to.id.standard = (uint8_t)from;		\
to                285 drivers/scsi/scsi_devinfo.c static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
to                292 drivers/scsi/scsi_devinfo.c 	strncpy(to, from, to_length);
to                297 drivers/scsi/scsi_devinfo.c 		memset(&to[from_length], ' ', to_length - from_length);
to                 23 drivers/scsi/snic/snic_trc.h extern ssize_t simple_read_from_buffer(void __user *to,
to                249 drivers/sh/maple/maple.c 	int port, unit, from, to, len;
to                256 drivers/sh/maple/maple.c 	to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
to                264 drivers/sh/maple/maple.c 	    mq->command | (to << 8) | (from << 16) | (len << 24);
to                434 drivers/spi/spi-ti-qspi.c 				     void *to, size_t readsize)
to                451 drivers/spi/spi-ti-qspi.c 		memcpy(to, qspi->rx_bb_addr, xfer_len);
to                454 drivers/spi/spi-ti-qspi.c 		to += xfer_len;
to                404 drivers/staging/android/vsoc.c 	struct hrtimer_sleeper timeout, *to = NULL;
to                425 drivers/staging/android/vsoc.c 		to = &timeout;
to                431 drivers/staging/android/vsoc.c 	if (to) {
to                440 drivers/staging/android/vsoc.c 		hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC,
to                442 drivers/staging/android/vsoc.c 		hrtimer_set_expires_range_ns(&to->timer, wake_time,
to                460 drivers/staging/android/vsoc.c 		if (to) {
to                461 drivers/staging/android/vsoc.c 			hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
to                462 drivers/staging/android/vsoc.c 			if (likely(to->task))
to                464 drivers/staging/android/vsoc.c 			hrtimer_cancel(&to->timer);
to                465 drivers/staging/android/vsoc.c 			if (!to->task) {
to                482 drivers/staging/android/vsoc.c 	if (to)
to                483 drivers/staging/android/vsoc.c 		destroy_hrtimer_on_stack(&to->timer);
to               3185 drivers/staging/exfat/exfat_super.c static void exfat_write_failed(struct address_space *mapping, loff_t to)
to               3189 drivers/staging/exfat/exfat_super.c 	if (to > i_size_read(inode)) {
to                 69 drivers/staging/fbtft/fbtft.h 	void (*mkdirty)(struct fb_info *info, int from, int to);
to               1639 drivers/staging/media/ipu3/ipu3-css-params.c 	struct imgu_abi_af_intra_frame_operations_data *to =
to               1659 drivers/staging/media/ipu3/ipu3-css-params.c 	return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
to               1667 drivers/staging/media/ipu3/ipu3-css-params.c 	struct imgu_abi_awb_fr_intra_frame_operations_data *to =
to               1686 drivers/staging/media/ipu3/ipu3-css-params.c 	return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
to               1693 drivers/staging/media/ipu3/ipu3-css-params.c 	struct imgu_abi_awb_intra_frame_operations_data *to =
to               1712 drivers/staging/media/ipu3/ipu3-css-params.c 	return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
to               1713 drivers/staging/media/ipu3/ipu3-css-params.c 					  to->transfer_data);
to                816 drivers/staging/speakup/main.c static int say_from_to(struct vc_data *vc, u_long from, u_long to,
to                825 drivers/staging/speakup/main.c 	while (from < to) {
to                846 drivers/staging/speakup/main.c static void say_line_from_to(struct vc_data *vc, u_long from, u_long to,
to                850 drivers/staging/speakup/main.c 	u_long end = start + (to * 2);
to                933 drivers/staging/speakup/main.c static void say_screen_from_to(struct vc_data *vc, u_long from, u_long to)
to                939 drivers/staging/speakup/main.c 	if (to > vc->vc_rows)
to                940 drivers/staging/speakup/main.c 		to = vc->vc_rows;
to                941 drivers/staging/speakup/main.c 	end = vc->vc_origin + (to * vc->vc_size_row);
to                942 drivers/staging/speakup/main.c 	for (from = start; from < end; from = to) {
to                943 drivers/staging/speakup/main.c 		to = from + vc->vc_size_row;
to                944 drivers/staging/speakup/main.c 		say_from_to(vc, from, to, 1);
to                955 drivers/staging/speakup/main.c 	u_long start, end, from, to;
to                965 drivers/staging/speakup/main.c 		to = start + (win_right * 2);
to                966 drivers/staging/speakup/main.c 		say_from_to(vc, from, to, 1);
to                669 drivers/target/target_core_user.c 	void *from, *to = NULL;
to                679 drivers/target/target_core_user.c 				if (to)
to                680 drivers/target/target_core_user.c 					kunmap_atomic(to);
to                685 drivers/target/target_core_user.c 				to = kmap_atomic(page);
to                722 drivers/target/target_core_user.c 				memcpy(to + offset,
to                725 drivers/target/target_core_user.c 				tcmu_flush_dcache_range(to, copy_bytes);
to                734 drivers/target/target_core_user.c 	if (to)
to                735 drivers/target/target_core_user.c 		kunmap_atomic(to);
to                744 drivers/target/target_core_user.c 	void *from = NULL, *to;
to                771 drivers/target/target_core_user.c 		to = kmap_atomic(sg_page(sg)) + sg->offset;
to                788 drivers/target/target_core_user.c 			memcpy(to + sg->length - sg_remaining, from + offset,
to                795 drivers/target/target_core_user.c 		kunmap_atomic(to - sg->offset);
to                 63 drivers/target/tcm_fc/tfc_io.c 	void *to = NULL;
to                117 drivers/target/tcm_fc/tfc_io.c 			to = fc_frame_payload_get(fp, 0);
to                147 drivers/target/tcm_fc/tfc_io.c 			memcpy(to, from, tlen);
to                149 drivers/target/tcm_fc/tfc_io.c 			to += tlen;
to                212 drivers/target/tcm_fc/tfc_io.c 	void *to;
to                302 drivers/target/tcm_fc/tfc_io.c 		to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
to                303 drivers/target/tcm_fc/tfc_io.c 		page_addr = to;
to                304 drivers/target/tcm_fc/tfc_io.c 		to += offset_in_page(mem_off);
to                307 drivers/target/tcm_fc/tfc_io.c 		memcpy(to, from, tlen);
to                167 drivers/tty/n_tty.c static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
to                177 drivers/tty/n_tty.c 		uncopied = copy_to_user(to, from, size);
to                181 drivers/tty/n_tty.c 		to += size;
to                187 drivers/tty/n_tty.c 	uncopied = copy_to_user(to, from, n);
to                113 drivers/tty/pty.c 	struct tty_struct *to = tty->link;
to                120 drivers/tty/pty.c 		spin_lock_irqsave(&to->port->lock, flags);
to                122 drivers/tty/pty.c 		c = tty_insert_flip_string(to->port, buf, c);
to                125 drivers/tty/pty.c 			tty_flip_buffer_push(to->port);
to                126 drivers/tty/pty.c 		spin_unlock_irqrestore(&to->port->lock, flags);
to                226 drivers/tty/pty.c 	struct tty_struct *to = tty->link;
to                228 drivers/tty/pty.c 	if (!to)
to                231 drivers/tty/pty.c 	tty_buffer_flush(to, NULL);
to                232 drivers/tty/pty.c 	if (to->packet) {
to                235 drivers/tty/pty.c 		wake_up_interruptible(&to->read_wait);
to               1343 drivers/tty/serial/mxs-auart.c 	unsigned int to = 1000;
to               1346 drivers/tty/serial/mxs-auart.c 		if (!to--)
to               1360 drivers/tty/serial/mxs-auart.c 	unsigned int to = 20000;
to               1382 drivers/tty/serial/mxs-auart.c 		if (!to--)
to                330 drivers/tty/serial/sirfsoc_uart.h #define SIRFSOC_UART_RX_TIMEOUT(br, to)	(((br) * (((to) + 999) / 1000)) / 1000)
to               1220 drivers/usb/gadget/function/f_fs.c static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
to               1240 drivers/usb/gadget/function/f_fs.c 		p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
to               1246 drivers/usb/gadget/function/f_fs.c 		p->data = *to;
to               1264 drivers/usb/gadget/function/f_fs.c 		*to = p->data;
to                434 drivers/usb/gadget/legacy/inode.c 	struct iov_iter		to;
to                466 drivers/usb/gadget/legacy/inode.c 	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
to                573 drivers/usb/gadget/legacy/inode.c ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                577 drivers/usb/gadget/legacy/inode.c 	size_t len = iov_iter_count(to);
to                607 drivers/usb/gadget/legacy/inode.c 		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
to                614 drivers/usb/gadget/legacy/inode.c 		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
to                708 drivers/usb/gadget/udc/lpc32xx_udc.c 	int to;
to                719 drivers/usb/gadget/udc/lpc32xx_udc.c 		to = 10000;
to                721 drivers/usb/gadget/udc/lpc32xx_udc.c 			 USBD_CCEMPTY) == 0) && (to > 0)) {
to                722 drivers/usb/gadget/udc/lpc32xx_udc.c 			to--;
to                725 drivers/usb/gadget/udc/lpc32xx_udc.c 		if (to > 0)
to                744 drivers/usb/gadget/udc/lpc32xx_udc.c 	int to = 1000;
to                754 drivers/usb/gadget/udc/lpc32xx_udc.c 	       && (to > 0))
to                755 drivers/usb/gadget/udc/lpc32xx_udc.c 		to--;
to                756 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!to)
to                836 drivers/usb/gadget/udc/lpc32xx_udc.c 	int to = 1000;
to                846 drivers/usb/gadget/udc/lpc32xx_udc.c 		  USBD_EP_RLZED)) && (to > 0))
to                847 drivers/usb/gadget/udc/lpc32xx_udc.c 		to--;
to                848 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!to)
to               1218 drivers/usb/gadget/udc/lpc32xx_udc.c 	int to = 1000;
to               1226 drivers/usb/gadget/udc/lpc32xx_udc.c 		 PKT_RDY) == 0)	&& (to > 0))
to               1227 drivers/usb/gadget/udc/lpc32xx_udc.c 		to--;
to               1228 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!to)
to                144 drivers/usb/host/ohci-s3c2410.c 				  int port, int to)
to                150 drivers/usb/host/ohci-s3c2410.c 		info->port[port-1].power = to;
to                151 drivers/usb/host/ohci-s3c2410.c 		(info->power_control)(port-1, to);
to                700 drivers/usb/misc/sisusbvga/sisusb_con.c 	int from, to, baseline;
to                731 drivers/usb/misc/sisusbvga/sisusb_con.c 					to   = c->vc_font.height;
to                734 drivers/usb/misc/sisusbvga/sisusb_con.c 					to   = baseline;
to                737 drivers/usb/misc/sisusbvga/sisusb_con.c 					to   = baseline;
to                740 drivers/usb/misc/sisusbvga/sisusb_con.c 					to   = baseline;
to                743 drivers/usb/misc/sisusbvga/sisusb_con.c 					to = 30;
to                747 drivers/usb/misc/sisusbvga/sisusb_con.c 					to   = baseline;
to                752 drivers/usb/misc/sisusbvga/sisusb_con.c 	    sisusb->sisusb_cursor_size_to != to) {
to                755 drivers/usb/misc/sisusbvga/sisusb_con.c 		sisusb_setidxregandor(sisusb, SISCR, 0x0b, 0xe0, to);
to                758 drivers/usb/misc/sisusbvga/sisusb_con.c 		sisusb->sisusb_cursor_size_to   = to;
to                262 drivers/usb/mon/mon_bin.c     char __user *to, int length)
to                281 drivers/usb/mon/mon_bin.c 		if (copy_to_user(to, buf, step_len))
to                284 drivers/usb/mon/mon_bin.c 		to += step_len;
to               1754 drivers/vhost/net.c static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               1761 drivers/vhost/net.c 	return vhost_chr_read_iter(dev, to, noblock);
to                779 drivers/vhost/vhost.c static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
to                785 drivers/vhost/vhost.c 		return __copy_to_user(to, from, size);
to                794 drivers/vhost/vhost.c 				     (u64)(uintptr_t)to, size,
to                800 drivers/vhost/vhost.c 		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
to                814 drivers/vhost/vhost.c static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
to                820 drivers/vhost/vhost.c 		return __copy_from_user(to, from, size);
to                833 drivers/vhost/vhost.c 			return __copy_from_user(to, uaddr, size);
to                845 drivers/vhost/vhost.c 		ret = copy_from_iter(to, size, &f);
to                903 drivers/vhost/vhost.c 		__typeof__(ptr) to = \
to                906 drivers/vhost/vhost.c 		if (to != NULL) \
to                907 drivers/vhost/vhost.c 			ret = __put_user(x, to); \
to               1201 drivers/vhost/vhost.c ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
to               1209 drivers/vhost/vhost.c 	if (iov_iter_count(to) < size)
to               1257 drivers/vhost/vhost.c 		ret = copy_to_iter(start, size, to);
to                225 drivers/vhost/vhost.h ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
to                159 drivers/video/console/mdacon.c static inline void mda_set_cursor_size(int from, int to)
to                161 drivers/video/console/mdacon.c 	if (mda_cursor_size_from==from && mda_cursor_size_to==to)
to                164 drivers/video/console/mdacon.c 	if (from > to) {
to                168 drivers/video/console/mdacon.c 		write_mda_b(to,   0x0b);	/* cursor end */
to                172 drivers/video/console/mdacon.c 	mda_cursor_size_to   = to;
to                679 drivers/video/console/vgacon.c static void vgacon_set_cursor_size(int xpos, int from, int to)
to                684 drivers/video/console/vgacon.c 	if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto))
to                687 drivers/video/console/vgacon.c 	cursor_size_lastto = to;
to                701 drivers/video/console/vgacon.c 	cure = (cure & 0xe0) | to;
to                166 drivers/video/fbdev/core/fbcmap.c int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
to                171 drivers/video/fbdev/core/fbcmap.c 	if (to->start > from->start)
to                172 drivers/video/fbdev/core/fbcmap.c 		fromoff = to->start - from->start;
to                174 drivers/video/fbdev/core/fbcmap.c 		tooff = from->start - to->start;
to                175 drivers/video/fbdev/core/fbcmap.c 	if (fromoff >= from->len || tooff >= to->len)
to                178 drivers/video/fbdev/core/fbcmap.c 	size = min_t(size_t, to->len - tooff, from->len - fromoff);
to                183 drivers/video/fbdev/core/fbcmap.c 	memcpy(to->red+tooff, from->red+fromoff, size);
to                184 drivers/video/fbdev/core/fbcmap.c 	memcpy(to->green+tooff, from->green+fromoff, size);
to                185 drivers/video/fbdev/core/fbcmap.c 	memcpy(to->blue+tooff, from->blue+fromoff, size);
to                186 drivers/video/fbdev/core/fbcmap.c 	if (from->transp && to->transp)
to                187 drivers/video/fbdev/core/fbcmap.c 		memcpy(to->transp+tooff, from->transp+fromoff, size);
to                191 drivers/video/fbdev/core/fbcmap.c int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
to                196 drivers/video/fbdev/core/fbcmap.c 	if (to->start > from->start)
to                197 drivers/video/fbdev/core/fbcmap.c 		fromoff = to->start - from->start;
to                199 drivers/video/fbdev/core/fbcmap.c 		tooff = from->start - to->start;
to                200 drivers/video/fbdev/core/fbcmap.c 	if (fromoff >= from->len || tooff >= to->len)
to                203 drivers/video/fbdev/core/fbcmap.c 	size = min_t(size_t, to->len - tooff, from->len - fromoff);
to                208 drivers/video/fbdev/core/fbcmap.c 	if (copy_to_user(to->red+tooff, from->red+fromoff, size))
to                210 drivers/video/fbdev/core/fbcmap.c 	if (copy_to_user(to->green+tooff, from->green+fromoff, size))
to                212 drivers/video/fbdev/core/fbcmap.c 	if (copy_to_user(to->blue+tooff, from->blue+fromoff, size))
to                214 drivers/video/fbdev/core/fbcmap.c 	if (from->transp && to->transp)
to                215 drivers/video/fbdev/core/fbcmap.c 		if (copy_to_user(to->transp+tooff, from->transp+fromoff, size))
to                 33 drivers/video/fbdev/kyro/STG4000Reg.h #define CLEAR_BITS_FRM_TO(frm, to) \
to                 36 drivers/video/fbdev/kyro/STG4000Reg.h     for(i = frm; i<= to; i++) \
to                 43 drivers/video/fbdev/kyro/STG4000Reg.h #define CLEAR_BITS_FRM_TO_2(frm, to) \
to                 46 drivers/video/fbdev/kyro/STG4000Reg.h     for(i = frm; i<= to; i++) \
to                 21 drivers/video/fbdev/nvidia/nv_type.h #define SetBitField(value,from,to) SetBF(to, GetBF(value,from))
to                 83 drivers/video/fbdev/riva/fbdev.c #define SetBitField(value,from,to) SetBF(to,GetBF(value,from))
to               3326 drivers/video/fbdev/sis/init.c #define GETBITSTR(val,from,to)  ((GETBITS(val,from)) << (0?to))
to               8282 drivers/video/fbdev/sis/init301.c                SiS_SetCH70xxANDOR(SiS_Pr,0x21,0x00,0xFE);	* ACIV off, need to set FSCI */
to                712 drivers/video/fbdev/sm501fb.c static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
to                720 drivers/video/fbdev/sm501fb.c 	if (to && (control & SM501_DC_PANEL_CONTROL_VDD) == 0) {
to                756 drivers/video/fbdev/sm501fb.c 	} else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) {
to                127 drivers/watchdog/asm9260_wdt.c static int asm9260_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
to                129 drivers/watchdog/asm9260_wdt.c 	wdd->timeout = to;
to                 92 drivers/watchdog/atlas7_wdt.c static int atlas7_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
to                 94 drivers/watchdog/atlas7_wdt.c 	wdd->timeout = to;
to                113 drivers/watchdog/sirfsoc_wdt.c static int sirfsoc_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
to                115 drivers/watchdog/sirfsoc_wdt.c 	wdd->timeout = to;
to                 80 drivers/watchdog/ts72xx_wdt.c static int ts72xx_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
to                 84 drivers/watchdog/ts72xx_wdt.c 	if (to == 1) {
to                 86 drivers/watchdog/ts72xx_wdt.c 	} else if (to == 2) {
to                 88 drivers/watchdog/ts72xx_wdt.c 	} else if (to <= 4) {
to                 90 drivers/watchdog/ts72xx_wdt.c 		to = 4;
to                 93 drivers/watchdog/ts72xx_wdt.c 		if (to <= 8)
to                 94 drivers/watchdog/ts72xx_wdt.c 			to = 8;
to                 97 drivers/watchdog/ts72xx_wdt.c 	wdd->timeout = to;
to                 43 fs/9p/vfs_addr.c 	struct iov_iter to;
to                 54 fs/9p/vfs_addr.c 	iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE);
to                 56 fs/9p/vfs_addr.c 	retval = p9_client_read(fid, page_offset(page), &to, &err);
to                109 fs/9p/vfs_dir.c 			struct iov_iter to;
to                111 fs/9p/vfs_dir.c 			iov_iter_kvec(&to, READ, &kvec, 1, buflen);
to                112 fs/9p/vfs_dir.c 			n = p9_client_read(file->private_data, ctx->pos, &to,
to                383 fs/9p/vfs_file.c v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                389 fs/9p/vfs_file.c 		 iov_iter_count(to), iocb->ki_pos);
to                391 fs/9p/vfs_file.c 	ret = p9_client_read(fid, iocb->ki_pos, to, &err);
to                581 fs/9p/vfs_file.c v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                584 fs/9p/vfs_file.c 	return v9fs_file_read_iter(iocb, to);
to                 32 fs/9p/xattr.c  	struct iov_iter to;
to                 35 fs/9p/xattr.c  	iov_iter_kvec(&to, READ, &kvec, 1, buffer_size);
to                 50 fs/9p/xattr.c  		iov_iter_truncate(&to, attr_size);
to                 51 fs/9p/xattr.c  		retval = p9_client_read(attr_fid, 0, &to, &err);
to                132 fs/adfs/dir_fplus.c dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
to                143 fs/adfs/dir_fplus.c 		memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
to                145 fs/adfs/dir_fplus.c 		char *c = (char *)to;
to                 45 fs/adfs/inode.c static void adfs_write_failed(struct address_space *mapping, loff_t to)
to                 49 fs/adfs/inode.c 	if (to > inode->i_size)
to                382 fs/affs/file.c static void affs_write_failed(struct address_space *mapping, loff_t to)
to                386 fs/affs/file.c 	if (to > inode->i_size) {
to                503 fs/affs/file.c affs_do_readpage_ofs(struct page *page, unsigned to, int create)
to                514 fs/affs/file.c 		 page->index, to);
to                515 fs/affs/file.c 	BUG_ON(to > PAGE_SIZE);
to                521 fs/affs/file.c 	while (pos < to) {
to                525 fs/affs/file.c 		tmp = min(bsize - boff, to - pos);
to                526 fs/affs/file.c 		BUG_ON(pos + tmp > to || tmp > bsize);
to                614 fs/affs/file.c 	u32 to;
to                618 fs/affs/file.c 	to = PAGE_SIZE;
to                620 fs/affs/file.c 		to = inode->i_size & ~PAGE_MASK;
to                621 fs/affs/file.c 		memset(page_address(page) + to, 0, PAGE_SIZE - to);
to                624 fs/affs/file.c 	err = affs_do_readpage_ofs(page, to, 0);
to                678 fs/affs/file.c 	unsigned from, to;
to                683 fs/affs/file.c 	to = from + len;
to                706 fs/affs/file.c 		tmp = min(bsize - boff, to - from);
to                722 fs/affs/file.c 	while (from + bsize <= to) {
to                754 fs/affs/file.c 	if (from < to) {
to                759 fs/affs/file.c 		tmp = min(bsize, to - from);
to               1140 fs/afs/fsclient.c 			       unsigned offset, unsigned to,
to               1163 fs/afs/fsclient.c 	call->last_to = to;
to               1199 fs/afs/fsclient.c 		      unsigned offset, unsigned to,
to               1209 fs/afs/fsclient.c 		return yfs_fs_store_data(fc, mapping, first, last, offset, to, scb);
to               1214 fs/afs/fsclient.c 	size = (loff_t)to - (loff_t)offset;
to               1229 fs/afs/fsclient.c 		return afs_fs_store_data64(fc, mapping, first, last, offset, to,
to               1243 fs/afs/fsclient.c 	call->last_to = to;
to               1175 fs/afs/internal.h 				      enum afs_call_state to)
to               1181 fs/afs/internal.h 		call->state = to;
to               1182 fs/afs/internal.h 		trace_afs_call_state(call, from, to, 0, 0);
to                287 fs/afs/rxrpc.c 	unsigned int nr, n, i, to, bytes = 0;
to                295 fs/afs/rxrpc.c 		to = PAGE_SIZE;
to                297 fs/afs/rxrpc.c 			to = call->last_to;
to                301 fs/afs/rxrpc.c 		bv[i].bv_len = to - offset;
to                303 fs/afs/rxrpc.c 		bytes += to - offset;
to                 86 fs/afs/write.c 	unsigned t, to = from + len;
to                 91 fs/afs/write.c 	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
to                139 fs/afs/write.c 		    (to < f || from > t))
to                143 fs/afs/write.c 		if (to > t)
to                144 fs/afs/write.c 			t = to;
to                147 fs/afs/write.c 		t = to;
to                356 fs/afs/write.c 			  unsigned offset, unsigned to)
to                370 fs/afs/write.c 	       first, last, offset, to);
to                410 fs/afs/write.c 			afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
to                424 fs/afs/write.c 		atomic_long_add((last * PAGE_SIZE + to) -
to                459 fs/afs/write.c 	unsigned n, offset, to, f, t;
to                477 fs/afs/write.c 	to = priv >> AFS_PRIV_SHIFT;
to                481 fs/afs/write.c 	WARN_ON(offset == to);
to                482 fs/afs/write.c 	if (offset == to)
to                487 fs/afs/write.c 	    (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
to                509 fs/afs/write.c 			if (to != PAGE_SIZE &&
to                529 fs/afs/write.c 			to = t;
to                561 fs/afs/write.c 	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
to                563 fs/afs/write.c 	ret = afs_store_data(mapping, first, last, offset, to);
to               1249 fs/afs/yfsclient.c 		      unsigned offset, unsigned to,
to               1261 fs/afs/yfsclient.c 	size = (loff_t)to - (loff_t)offset;
to               1291 fs/afs/yfsclient.c 	call->last_to = to;
to                 33 fs/bfs/file.c  static int bfs_move_block(unsigned long from, unsigned long to,
to                 41 fs/bfs/file.c  	new = sb_getblk(sb, to);
to                163 fs/bfs/file.c  static void bfs_write_failed(struct address_space *mapping, loff_t to)
to                167 fs/bfs/file.c  	if (to > inode->i_size)
to               1265 fs/block_dev.c static int add_symlink(struct kobject *from, struct kobject *to)
to               1267 fs/block_dev.c 	return sysfs_create_link(from, to, kobject_name(to));
to               1270 fs/block_dev.c static void del_symlink(struct kobject *from, struct kobject *to)
to               1272 fs/block_dev.c 	sysfs_remove_link(from, kobject_name(to));
to               2003 fs/block_dev.c ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               2014 fs/block_dev.c 	iov_iter_truncate(to, size);
to               2015 fs/block_dev.c 	return generic_file_read_iter(iocb, to);
to                729 fs/btrfs/send.c 		     struct fs_path *from, struct fs_path *to)
to                734 fs/btrfs/send.c 	btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
to                741 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
to               1849 fs/buffer.c    void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
to               1864 fs/buffer.c    			if (block_end > from && block_start < to) {
to               1869 fs/buffer.c    					size = min(to, block_end) - start;
to               1945 fs/buffer.c    	unsigned to = from + len;
to               1955 fs/buffer.c    	BUG_ON(to > PAGE_SIZE);
to               1956 fs/buffer.c    	BUG_ON(from > to);
to               1967 fs/buffer.c    		if (block_end <= from || block_start >= to) {
to               1994 fs/buffer.c    				if (block_end > to || block_start < from)
to               1996 fs/buffer.c    						to, block_end,
to               2008 fs/buffer.c    		     (block_start < from || block_end > to)) {
to               2022 fs/buffer.c    		page_zero_new_buffers(page, from, to);
to               2034 fs/buffer.c    		unsigned from, unsigned to)
to               2047 fs/buffer.c    		if (block_end <= from || block_start >= to) {
to               2186 fs/buffer.c    	unsigned to;
to               2195 fs/buffer.c    	to = min_t(unsigned, PAGE_SIZE - from, count);
to               2196 fs/buffer.c    	to = from + to;
to               2197 fs/buffer.c    	if (from < blocksize && to > PAGE_SIZE - blocksize)
to               2204 fs/buffer.c    		if (block_end > from && block_start < to) {
to               2209 fs/buffer.c    			if (block_end >= to)
to               2441 fs/buffer.c    int block_commit_write(struct page *page, unsigned from, unsigned to)
to               2444 fs/buffer.c    	__block_commit_write(inode,page,from,to);
to               2556 fs/buffer.c    	unsigned from, to;
to               2566 fs/buffer.c    	to = from + len;
to               2614 fs/buffer.c    		if (block_start >= to)
to               2630 fs/buffer.c    							to, block_end);
to               2635 fs/buffer.c    		if (block_start < from || block_end > to) {
to               2675 fs/buffer.c    	page_zero_new_buffers(page, from, to);
to                582 fs/ceph/file.c static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
to                592 fs/ceph/file.c 	u64 len = iov_iter_count(to);
to                611 fs/ceph/file.c 	while ((len = iov_iter_count(to)) > 0) {
to                629 fs/ceph/file.c 		more = len < iov_iter_count(to);
to                631 fs/ceph/file.c 		if (unlikely(iov_iter_is_pipe(to))) {
to                632 fs/ceph/file.c 			ret = iov_iter_get_pages_alloc(to, &pages, len,
to                678 fs/ceph/file.c 		if (unlikely(iov_iter_is_pipe(to))) {
to                680 fs/ceph/file.c 				iov_iter_advance(to, ret);
to                683 fs/ceph/file.c 				iov_iter_advance(to, 0);
to                694 fs/ceph/file.c 							   page_off, len, to);
to                717 fs/ceph/file.c 		    iov_iter_count(to) > 0 && off >= i_size_read(inode))
to               1255 fs/ceph/file.c static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               1259 fs/ceph/file.c 	size_t len = iov_iter_count(to);
to               1300 fs/ceph/file.c 				ret = ceph_direct_read_write(iocb, to,
to               1305 fs/ceph/file.c 				ret = ceph_sync_read(iocb, to, &retry_op);
to               1316 fs/ceph/file.c 		ret = generic_file_read_iter(iocb, to);
to               1367 fs/ceph/file.c 						end - iocb->ki_pos, to);
to               1374 fs/ceph/file.c 				ret = iov_iter_zero(zlen, to);
to               1907 fs/ceph/inode.c 	u64 to;
to               1936 fs/ceph/inode.c 	to = ci->i_truncate_size;
to               1939 fs/ceph/inode.c 	     ci->i_truncate_pending, to);
to               1942 fs/ceph/inode.c 	truncate_pagecache(inode, to);
to               1945 fs/ceph/inode.c 	if (to == ci->i_truncate_size) {
to                265 fs/ceph/super.c 					       argstr[0].to-argstr[0].from,
to                273 fs/ceph/super.c 						argstr[0].to-argstr[0].from,
to                280 fs/ceph/super.c 			     argstr[0].to - argstr[0].from)) {
to                283 fs/ceph/super.c 				    argstr[0].to - argstr[0].from)) {
to                293 fs/ceph/super.c 					       argstr[0].to-argstr[0].from,
to                203 fs/char_dev.c  	dev_t to = from + count;
to                206 fs/char_dev.c  	for (n = from; n < to; n = next) {
to                208 fs/char_dev.c  		if (next > to)
to                209 fs/char_dev.c  			next = to;
to                217 fs/char_dev.c  	to = n;
to                218 fs/char_dev.c  	for (n = from; n < to; n = next) {
to                313 fs/char_dev.c  	dev_t to = from + count;
to                316 fs/char_dev.c  	for (n = from; n < to; n = next) {
to                318 fs/char_dev.c  		if (next > to)
to                319 fs/char_dev.c  			next = to;
to                179 fs/cifs/cifs_unicode.c cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
to                221 fs/cifs/cifs_unicode.c 		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
to                238 fs/cifs/cifs_unicode.c 		to[outlen++] = 0;
to                250 fs/cifs/cifs_unicode.c cifs_strtoUTF16(__le16 *to, const char *from, int len,
to                265 fs/cifs/cifs_unicode.c 				       (wchar_t *) to, len);
to                287 fs/cifs/cifs_unicode.c 		put_unaligned_le16(wchar_to, &to[i]);
to                291 fs/cifs/cifs_unicode.c 	put_unaligned_le16(0, &to[i]);
to                 93 fs/cifs/cifs_unicode.h int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
to                105 fs/cifs/cifsfs.h extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
to                106 fs/cifs/cifsfs.h extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
to                107 fs/cifs/cifsfs.h extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
to               2094 fs/cifs/file.c static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
to               2113 fs/cifs/file.c 	if ((to > PAGE_SIZE) || (from > to)) {
to               2125 fs/cifs/file.c 	if (mapping->host->i_size - offset < (loff_t)to)
to               2126 fs/cifs/file.c 		to = (unsigned)(mapping->host->i_size - offset);
to               2132 fs/cifs/file.c 					   write_data, to - from, &offset);
to               3690 fs/cifs/file.c 	struct iov_iter *to = &ctx->iter;
to               3729 fs/cifs/file.c 						rc = cifs_readdata_to_iov(rdata, to);
to               3762 fs/cifs/file.c 				rc = cifs_readdata_to_iov(rdata, to);
to               3775 fs/cifs/file.c 		ctx->total_len = ctx->len - iov_iter_count(to);
to               3792 fs/cifs/file.c 	struct kiocb *iocb, struct iov_iter *to, bool direct)
to               3808 fs/cifs/file.c 	if (direct && to->type & ITER_KVEC) {
to               3813 fs/cifs/file.c 	len = iov_iter_count(to);
to               3836 fs/cifs/file.c 	if (iter_is_iovec(to))
to               3842 fs/cifs/file.c 		ctx->iter = *to;
to               3845 fs/cifs/file.c 		rc = setup_aio_ctx_iter(ctx, to, READ);
to               3894 fs/cifs/file.c ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
to               3896 fs/cifs/file.c 	return __cifs_readv(iocb, to, true);
to               3899 fs/cifs/file.c ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
to               3901 fs/cifs/file.c 	return __cifs_readv(iocb, to, false);
to               3905 fs/cifs/file.c cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
to               3924 fs/cifs/file.c 		return cifs_user_readv(iocb, to);
to               3929 fs/cifs/file.c 		return generic_file_read_iter(iocb, to);
to               3936 fs/cifs/file.c 	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
to               3939 fs/cifs/file.c 		rc = generic_file_read_iter(iocb, to);
to                449 fs/cifs/smb2misc.c 	__le16 *to;
to                471 fs/cifs/smb2misc.c 	to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
to                473 fs/cifs/smb2misc.c 	return to;
to                 38 fs/coda/file.c coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                 44 fs/coda/file.c 	size_t count = iov_iter_count(to);
to                 53 fs/coda/file.c 	ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
to                 63 fs/coda/file.c coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
to                 70 fs/coda/file.c 	size_t count = iov_iter_count(to);
to                 81 fs/coda/file.c 	ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
to                684 fs/dax.c       		sector_t sector, size_t size, struct page *to,
to                702 fs/dax.c       	vto = kmap_atomic(to);
to                703 fs/dax.c       	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
to                112 fs/direct-io.c 	size_t from, to;
to                188 fs/direct-io.c 		sdio->to = PAGE_SIZE;
to                197 fs/direct-io.c 		sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
to                971 fs/direct-io.c 		size_t from, to;
to                979 fs/direct-io.c 		to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
to                982 fs/direct-io.c 		while (from < to) {
to               1077 fs/direct-io.c 			u = (to - from) >> blkbits;
to                 33 fs/ecryptfs/file.c 				struct iov_iter *to)
to                 39 fs/ecryptfs/file.c 	rc = generic_file_read_iter(iocb, to);
to                235 fs/ecryptfs/mmap.c static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
to                243 fs/ecryptfs/mmap.c 	if (to > end_byte_in_page)
to                244 fs/ecryptfs/mmap.c 		end_byte_in_page = to;
to                469 fs/ecryptfs/mmap.c 	unsigned to = from + copied;
to                476 fs/ecryptfs/mmap.c 			"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
to                479 fs/ecryptfs/mmap.c 						       to);
to                495 fs/ecryptfs/mmap.c 	rc = fill_zeros_to_end_of_page(page, to);
to                246 fs/erofs/super.c 		args[0].to = args[0].from = NULL;
to               1831 fs/eventpoll.c 	ktime_t expires, *to = NULL;
to               1839 fs/eventpoll.c 		to = &expires;
to               1840 fs/eventpoll.c 		*to = timespec64_to_ktime(end_time);
to               1916 fs/eventpoll.c 		if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) {
to                565 fs/ext2/dir.c  	unsigned to = ((char *)dir - kaddr) +
to                586 fs/ext2/dir.c  	err = ext2_prepare_chunk(page, pos, to - from);
to                589 fs/ext2/dir.c  		pde->rec_len = ext2_rec_len_to_disk(to - from);
to                591 fs/ext2/dir.c  	err = ext2_commit_chunk(page, pos, to - from);
to                 33 fs/ext2/file.c static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                 38 fs/ext2/file.c 	if (!iov_iter_count(to))
to                 42 fs/ext2/file.c 	ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
to                163 fs/ext2/file.c static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                167 fs/ext2/file.c 		return ext2_dax_read_iter(iocb, to);
to                169 fs/ext2/file.c 	return generic_file_read_iter(iocb, to);
to                 59 fs/ext2/inode.c static void ext2_write_failed(struct address_space *mapping, loff_t to)
to                 63 fs/ext2/inode.c 	if (to > inode->i_size) {
to                126 fs/ext2/inode.c static inline int verify_chain(Indirect *from, Indirect *to)
to                128 fs/ext2/inode.c 	while (from <= to && from->key == *from->p)
to                130 fs/ext2/inode.c 	return (from > to);
to               2593 fs/ext4/ext4.h 			   unsigned to,
to               2597 fs/ext4/extents.c 			      ext4_lblk_t from, ext4_lblk_t to)
to               2607 fs/ext4/extents.c 	    to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
to               2610 fs/ext4/extents.c 			   from, to, le32_to_cpu(ex->ee_block), ee_len);
to               2627 fs/ext4/extents.c 	trace_ext4_remove_blocks(inode, ex, from, to, partial);
to               2662 fs/ext4/extents.c 	if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
to               2663 fs/ext4/extents.c 	    (EXT4_LBLK_CMASK(sbi, to) >= from) &&
to               2665 fs/ext4/extents.c 		if (ext4_is_pending(inode, to))
to               2671 fs/ext4/extents.c 			ext4_rereserve_cluster(inode, to);
to                 38 fs/ext4/file.c static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                 56 fs/ext4/file.c 		return generic_file_read_iter(iocb, to);
to                 58 fs/ext4/file.c 	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
to                 66 fs/ext4/file.c static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                 71 fs/ext4/file.c 	if (!iov_iter_count(to))
to                 76 fs/ext4/file.c 		return ext4_dax_read_iter(iocb, to);
to                 78 fs/ext4/file.c 	return generic_file_read_iter(iocb, to);
to                533 fs/ext4/inline.c 	unsigned from, to;
to                578 fs/ext4/inline.c 	to = ext4_get_inline_size(inode);
to                590 fs/ext4/inline.c 		ret = __block_write_begin(page, from, to,
to                593 fs/ext4/inline.c 		ret = __block_write_begin(page, from, to, ext4_get_block);
to                597 fs/ext4/inline.c 					     from, to, NULL,
to                625 fs/ext4/inline.c 		block_commit_write(page, from, to);
to               1093 fs/ext4/inode.c 			   unsigned to,
to               1109 fs/ext4/inode.c 		if (block_end <= from || block_start >= to) {
to               1175 fs/ext4/inode.c 	unsigned to = from + len;
to               1188 fs/ext4/inode.c 	BUG_ON(to > PAGE_SIZE);
to               1189 fs/ext4/inode.c 	BUG_ON(from > to);
to               1200 fs/ext4/inode.c 		if (block_end <= from || block_start >= to) {
to               1221 fs/ext4/inode.c 				if (block_end > to || block_start < from)
to               1222 fs/ext4/inode.c 					zero_user_segments(page, to, block_end,
to               1234 fs/ext4/inode.c 		    (block_start < from || block_end > to)) {
to               1248 fs/ext4/inode.c 		page_zero_new_buffers(page, from, to);
to               1276 fs/ext4/inode.c 	unsigned from, to;
to               1289 fs/ext4/inode.c 	to = from + len;
to               1347 fs/ext4/inode.c 					     from, to, NULL,
to               1491 fs/ext4/inode.c 					    unsigned from, unsigned to)
to               1500 fs/ext4/inode.c 			if (block_end > from && block_start < to) {
to               1505 fs/ext4/inode.c 					size = min(to, block_end) - start;
to               1528 fs/ext4/inode.c 	unsigned from, to;
to               1535 fs/ext4/inode.c 	to = from + len;
to               1550 fs/ext4/inode.c 		ext4_journalled_zero_new_buffers(handle, page, from, to);
to               1554 fs/ext4/inode.c 							 from + copied, to);
to                167 fs/ext4/move_extent.c mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
to                189 fs/ext4/move_extent.c 		if (block_end <= from || block_start >= to) {
to                280 fs/ext4/namei.c static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
to               1762 fs/ext4/namei.c dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
to               1771 fs/ext4/namei.c 		memcpy (to, de, rec_len);
to               1772 fs/ext4/namei.c 		((struct ext4_dir_entry_2 *) to)->rec_len =
to               1776 fs/ext4/namei.c 		to += rec_len;
to               1778 fs/ext4/namei.c 	return (struct ext4_dir_entry_2 *) (to - rec_len);
to               1787 fs/ext4/namei.c 	struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
to               1790 fs/ext4/namei.c 	prev = to = de;
to               1795 fs/ext4/namei.c 			if (de > to)
to               1796 fs/ext4/namei.c 				memmove(to, de, rec_len);
to               1797 fs/ext4/namei.c 			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
to               1798 fs/ext4/namei.c 			prev = to;
to               1799 fs/ext4/namei.c 			to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
to               2087 fs/ext4/super.c 		args[0].to = args[0].from = NULL;
to               2522 fs/ext4/xattr.c 				     int value_offs_shift, void *to,
to               2540 fs/ext4/xattr.c 	memmove(to, from, n);
to               2496 fs/f2fs/data.c static void f2fs_write_failed(struct address_space *mapping, loff_t to)
to               2502 fs/f2fs/data.c 	if (to > i_size && !f2fs_verity_in_progress(inode)) {
to                413 fs/f2fs/super.c 		args[0].to = args[0].from = NULL;
to                218 fs/fat/inode.c static void fat_write_failed(struct address_space *mapping, loff_t to)
to                222 fs/fat/inode.c 	if (to > inode->i_size) {
to                 93 fs/fuse/cuse.c static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
to                 98 fs/fuse/cuse.c 	return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
to               1316 fs/fuse/dev.c  static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
to               1325 fs/fuse/dev.c  	if (!iter_is_iovec(to))
to               1328 fs/fuse/dev.c  	fuse_copy_init(&cs, 1, to);
to               1330 fs/fuse/dev.c  	return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
to                998 fs/fuse/file.c static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               1009 fs/fuse/file.c 	    (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
to               1016 fs/fuse/file.c 	return generic_file_read_iter(iocb, to);
to               1525 fs/fuse/file.c static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               1530 fs/fuse/file.c 		res = fuse_direct_IO(iocb, to);
to               1534 fs/fuse/file.c 		res = __fuse_direct_read(&io, to, &iocb->ki_pos);
to               1565 fs/fuse/file.c static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               1574 fs/fuse/file.c 		return fuse_cache_read_iter(iocb, to);
to               1576 fs/fuse/file.c 		return fuse_direct_read_iter(iocb, to);
to                 46 fs/gfs2/aops.c 	unsigned int to = from + len;
to                 54 fs/gfs2/aops.c 		if (start >= to)
to                746 fs/gfs2/file.c static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
to                750 fs/gfs2/file.c 	size_t count = iov_iter_count(to);
to                762 fs/gfs2/file.c 	ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
to                806 fs/gfs2/file.c static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                811 fs/gfs2/file.c 		ret = gfs2_file_direct_read(iocb, to);
to                816 fs/gfs2/file.c 	return generic_file_read_iter(iocb, to);
to               1274 fs/gfs2/inode.c static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
to               1276 fs/gfs2/inode.c 	struct inode *dir = &to->i_inode;
to                 41 fs/hfs/inode.c static void hfs_write_failed(struct address_space *mapping, loff_t to)
to                 45 fs/hfs/inode.c 	if (to > inode->i_size) {
to                212 fs/hfs/super.c 	if (arg->to - arg->from != 4)
to                 35 fs/hfsplus/inode.c static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
to                 39 fs/hfsplus/inode.c 	if (to > inode->i_size) {
to                 66 fs/hfsplus/options.c 	if (arg->to - arg->from != 4)
to                 84 fs/hostfs/hostfs.h extern int make_symlink(const char *from, const char *to);
to                 90 fs/hostfs/hostfs.h extern int link_file(const char *to, const char *from);
to                 92 fs/hostfs/hostfs.h extern int rename_file(char *from, char *to);
to                 93 fs/hostfs/hostfs.h extern int rename2_file(char *from, char *to, unsigned int flags);
to                624 fs/hostfs/hostfs_kern.c static int hostfs_link(struct dentry *to, struct inode *ino,
to                632 fs/hostfs/hostfs_kern.c 	to_name = dentry_name(to);
to                660 fs/hostfs/hostfs_kern.c 			  const char *to)
to                667 fs/hostfs/hostfs_kern.c 	err = make_symlink(file, to);
to                277 fs/hostfs/hostfs_user.c int make_symlink(const char *from, const char *to)
to                281 fs/hostfs/hostfs_user.c 	err = symlink(to, from);
to                327 fs/hostfs/hostfs_user.c int link_file(const char *to, const char *from)
to                331 fs/hostfs/hostfs_user.c 	err = link(to, from);
to                349 fs/hostfs/hostfs_user.c int rename_file(char *from, char *to)
to                353 fs/hostfs/hostfs_user.c 	err = rename(from, to);
to                359 fs/hostfs/hostfs_user.c int rename2_file(char *from, char *to, unsigned int flags)
to                373 fs/hostfs/hostfs_user.c 	err = syscall(SYS_renameat2, AT_FDCWD, from, AT_FDCWD, to, flags);
to                433 fs/hpfs/dnode.c static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
to                436 fs/hpfs/dnode.c 	dnode_secno chk_up = to;
to                474 fs/hpfs/dnode.c 		if (up == to) return to;
to                477 fs/hpfs/dnode.c 			hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to);
to                507 fs/hpfs/dnode.c 	a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from);
to                140 fs/hpfs/file.c static void hpfs_write_failed(struct address_space *mapping, loff_t to)
to                146 fs/hpfs/file.c 	if (to > inode->i_size) {
to                 57 fs/hpfs/name.c 	unsigned char *to;
to                 67 fs/hpfs/name.c 	if (!(to = kmalloc(len, GFP_KERNEL))) {
to                 71 fs/hpfs/name.c 	for (i = 0; i < len; i++) to[i] = locase(hpfs_sb(s)->sb_cp_table,from[i]);
to                 72 fs/hpfs/name.c 	return to;
to                238 fs/hugetlbfs/inode.c 			struct iov_iter *to, unsigned long size)
to                254 fs/hugetlbfs/inode.c 		n = copy_page_to_iter(&page[i], offset, chunksize, to);
to                270 fs/hugetlbfs/inode.c static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                282 fs/hugetlbfs/inode.c 	while (iov_iter_count(to)) {
to                308 fs/hugetlbfs/inode.c 			copied = iov_iter_zero(nr, to);
to                315 fs/hugetlbfs/inode.c 			copied = hugetlbfs_read_actor(page, offset, to, nr);
to                320 fs/hugetlbfs/inode.c 		if (copied != nr && iov_iter_count(to)) {
to                529 fs/iomap/buffered-io.c 		unsigned poff, unsigned plen, unsigned from, unsigned to,
to                536 fs/iomap/buffered-io.c 		zero_user_segments(page, poff, from, to, poff + plen);
to                557 fs/iomap/buffered-io.c 	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
to                570 fs/iomap/buffered-io.c 		    (to > poff && to < poff + plen)) {
to                572 fs/iomap/buffered-io.c 					poff, plen, from, to, iomap);
to                116 fs/jffs2/os-linux.h int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino);
to                191 fs/jffs2/os-linux.h 		       unsigned long count, loff_t to, size_t *retlen);
to                182 fs/jffs2/summary.h 			unsigned long count,  uint32_t to);
to                795 fs/jffs2/wbuf.c 		       unsigned long count, loff_t to, size_t *retlen,
to                800 fs/jffs2/wbuf.c 	uint32_t outvec_to = to;
to                805 fs/jffs2/wbuf.c 		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
to                811 fs/jffs2/wbuf.c 		c->wbuf_ofs = PAGE_DIV(to);
to                812 fs/jffs2/wbuf.c 		c->wbuf_len = PAGE_MOD(to);
to                823 fs/jffs2/wbuf.c 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
to                827 fs/jffs2/wbuf.c 				  __func__, (unsigned long)to, c->wbuf_ofs);
to                833 fs/jffs2/wbuf.c 		c->wbuf_ofs = PAGE_DIV(to);
to                834 fs/jffs2/wbuf.c 		c->wbuf_len = PAGE_MOD(to);
to                837 fs/jffs2/wbuf.c 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
to                840 fs/jffs2/wbuf.c 			__func__, (unsigned long)to);
to                848 fs/jffs2/wbuf.c 	if (c->wbuf_len != PAGE_MOD(to)) {
to                849 fs/jffs2/wbuf.c 		c->wbuf_len = PAGE_MOD(to);
to                906 fs/jffs2/wbuf.c 		int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
to                 17 fs/jffs2/writev.c 			      unsigned long count, loff_t to, size_t *retlen)
to                 22 fs/jffs2/writev.c 			res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to);
to                 29 fs/jffs2/writev.c 	return mtd_writev(c->mtd, vecs, count, to, retlen);
to                305 fs/jfs/inode.c static void jfs_write_failed(struct address_space *mapping, loff_t to)
to                309 fs/jfs/inode.c 	if (to > inode->i_size) {
to                 19 fs/jfs/jfs_unicode.c int jfs_strfromUCS_le(char *to, const __le16 * from,
to                 32 fs/jfs/jfs_unicode.c 					       &to[outlen],
to                 37 fs/jfs/jfs_unicode.c 				to[outlen++] = '?';
to                 42 fs/jfs/jfs_unicode.c 				to[i] = '?';
to                 55 fs/jfs/jfs_unicode.c 				to[i] = (char) (le16_to_cpu(from[i]));
to                 59 fs/jfs/jfs_unicode.c 	to[outlen] = 0;
to                 69 fs/jfs/jfs_unicode.c static int jfs_strtoUCS(wchar_t * to, const unsigned char *from, int len,
to                 78 fs/jfs/jfs_unicode.c 			charlen = codepage->char2uni(from, len, &to[i]);
to                 89 fs/jfs/jfs_unicode.c 			to[i] = (wchar_t) from[i];
to                 92 fs/jfs/jfs_unicode.c 	to[i] = 0;
to                 51 fs/kernfs/dir.c static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
to                 55 fs/kernfs/dir.c 	while (to->parent && to != from) {
to                 57 fs/kernfs/dir.c 		to = to->parent;
to                210 fs/kernfs/dir.c int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
to                217 fs/kernfs/dir.c 	ret = kernfs_path_from_node_locked(to, from, buf, buflen);
to                149 fs/libfs.c     		struct dentry *to = NULL;
to                154 fs/libfs.c     			to = scan_positives(cursor, &dentry->d_subdirs,
to                157 fs/libfs.c     		if (to)
to                158 fs/libfs.c     			list_move(&cursor->d_child, &to->d_child);
to                162 fs/libfs.c     		dput(to);
to                645 fs/libfs.c     ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
to                657 fs/libfs.c     	ret = copy_to_user(to, from + pos, count);
to                680 fs/libfs.c     ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
to                692 fs/libfs.c     	res = copy_from_user(to + pos, from, count);
to                715 fs/libfs.c     ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
to                726 fs/libfs.c     	memcpy(to, from + pos, count);
to                396 fs/minix/inode.c static void minix_write_failed(struct address_space *mapping, loff_t to)
to                400 fs/minix/inode.c 	if (to > inode->i_size) {
to                 18 fs/minix/itree_common.c static inline int verify_chain(Indirect *from, Indirect *to)
to                 20 fs/minix/itree_common.c 	while (from <= to && from->key == *from->p)
to                 22 fs/minix/itree_common.c 	return (from > to);
to               4521 fs/namei.c     	struct filename *to;
to               4547 fs/namei.c     	to = filename_parentat(newdfd, getname(newname), lookup_flags,
to               4549 fs/namei.c     	if (IS_ERR(to)) {
to               4550 fs/namei.c     		error = PTR_ERR(to);
to               4641 fs/namei.c     	putname(to);
to               2988 fs/namespace.c static long exact_copy_from_user(void *to, const void __user * from,
to               2991 fs/namespace.c 	char *t = to;
to                449 fs/nfs/client.c void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
to                452 fs/nfs/client.c 	to->to_initval = timeo * HZ / 10;
to                453 fs/nfs/client.c 	to->to_retries = retrans;
to                459 fs/nfs/client.c 			to->to_retries = NFS_DEF_TCP_RETRANS;
to                460 fs/nfs/client.c 		if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
to                461 fs/nfs/client.c 			to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
to                462 fs/nfs/client.c 		if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
to                463 fs/nfs/client.c 			to->to_initval = NFS_MAX_TCP_TIMEOUT;
to                464 fs/nfs/client.c 		to->to_increment = to->to_initval;
to                465 fs/nfs/client.c 		to->to_maxval = to->to_initval + (to->to_increment * to->to_retries);
to                466 fs/nfs/client.c 		if (to->to_maxval > NFS_MAX_TCP_TIMEOUT)
to                467 fs/nfs/client.c 			to->to_maxval = NFS_MAX_TCP_TIMEOUT;
to                468 fs/nfs/client.c 		if (to->to_maxval < to->to_initval)
to                469 fs/nfs/client.c 			to->to_maxval = to->to_initval;
to                470 fs/nfs/client.c 		to->to_exponential = 0;
to                474 fs/nfs/client.c 			to->to_retries = NFS_DEF_UDP_RETRANS;
to                475 fs/nfs/client.c 		if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
to                476 fs/nfs/client.c 			to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
to                477 fs/nfs/client.c 		if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
to                478 fs/nfs/client.c 			to->to_initval = NFS_MAX_UDP_TIMEOUT;
to                479 fs/nfs/client.c 		to->to_maxval = NFS_MAX_UDP_TIMEOUT;
to                480 fs/nfs/client.c 		to->to_exponential = 1;
to                155 fs/nfs/file.c  nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
to                161 fs/nfs/file.c  		return nfs_file_direct_read(iocb, to);
to                165 fs/nfs/file.c  		iov_iter_count(to), (unsigned long) iocb->ki_pos);
to                170 fs/nfs/file.c  		result = generic_file_read_iter(iocb, to);
to                161 fs/nfs/internal.h void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
to                 88 fs/nilfs2/dir.c 			       unsigned int to)
to                 92 fs/nilfs2/dir.c 	return __block_write_begin(page, pos, to - from, nilfs_get_block);
to                 97 fs/nilfs2/dir.c 			       unsigned int from, unsigned int to)
to                101 fs/nilfs2/dir.c 	unsigned int len = to - from;
to                105 fs/nilfs2/dir.c 	nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
to                421 fs/nilfs2/dir.c 	unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len);
to                426 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, from, to);
to                430 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, mapping, from, to);
to                451 fs/nilfs2/dir.c 	unsigned int from, to;
to                505 fs/nilfs2/dir.c 	to = from + rec_len;
to                506 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, from, to);
to                521 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, page->mapping, from, to);
to                543 fs/nilfs2/dir.c 	unsigned int from, to;
to                548 fs/nilfs2/dir.c 	to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len);
to                564 fs/nilfs2/dir.c 	err = nilfs_prepare_chunk(page, from, to);
to                567 fs/nilfs2/dir.c 		pde->rec_len = nilfs_rec_len_to_disk(to - from);
to                569 fs/nilfs2/dir.c 	nilfs_commit_chunk(page, mapping, from, to);
to                246 fs/nilfs2/inode.c void nilfs_write_failed(struct address_space *mapping, loff_t to)
to                250 fs/nilfs2/inode.c 	if (to > inode->i_size) {
to                271 fs/nilfs2/nilfs.h extern void nilfs_write_failed(struct address_space *mapping, loff_t to);
to                425 fs/nilfs2/page.c 					    unsigned int from, unsigned int to)
to                435 fs/nilfs2/page.c 		if (block_end > from && block_start < to && !buffer_dirty(bh))
to               6806 fs/ocfs2/alloc.c 			      unsigned int from, unsigned int to,
to               6811 fs/ocfs2/alloc.c 	loff_t length = to - from;
to               6813 fs/ocfs2/alloc.c 	ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
to               6818 fs/ocfs2/alloc.c 		zero_user_segment(page, from, to);
to               6826 fs/ocfs2/alloc.c 				from, to, &partial,
to               6849 fs/ocfs2/alloc.c 	unsigned int from, to = PAGE_SIZE;
to               6857 fs/ocfs2/alloc.c 	to = PAGE_SIZE;
to               6863 fs/ocfs2/alloc.c 			to = end & (PAGE_SIZE - 1);
to               6866 fs/ocfs2/alloc.c 		BUG_ON(to > PAGE_SIZE);
to               6868 fs/ocfs2/alloc.c 		ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
to                262 fs/ocfs2/alloc.h 			      unsigned int from, unsigned int to,
to                428 fs/ocfs2/aops.c 			unsigned to,
to                445 fs/ocfs2/aops.c 		if (block_end <= from || block_start >= to) {
to                555 fs/ocfs2/aops.c 				     unsigned from, unsigned to)
to                564 fs/ocfs2/aops.c 	if (from || to) {
to                567 fs/ocfs2/aops.c 		if (to < cluster_end)
to                568 fs/ocfs2/aops.c 			memset(kaddr + to, 0, cluster_end - to);
to                606 fs/ocfs2/aops.c 			  unsigned int to, int new)
to                627 fs/ocfs2/aops.c 		if (block_start >= to || block_end <= from) {
to                651 fs/ocfs2/aops.c 			   (block_start < from || block_end > to)) {
to                681 fs/ocfs2/aops.c 		if (block_start >= to)
to                888 fs/ocfs2/aops.c static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
to                903 fs/ocfs2/aops.c 			if (block_end > from && block_start < to) {
to                908 fs/ocfs2/aops.c 					end = min(to, block_end);
to                934 fs/ocfs2/aops.c 		to = user_pos + user_len;
to                938 fs/ocfs2/aops.c 		ocfs2_zero_new_buffers(wc->w_target_page, from, to);
to                948 fs/ocfs2/aops.c 			block_commit_write(tmppage, from, to);
to               1967 fs/ocfs2/aops.c 	unsigned from, to, start = pos & (PAGE_SIZE - 1);
to               2011 fs/ocfs2/aops.c 			to = wc->w_target_to;
to               2014 fs/ocfs2/aops.c 			       to > PAGE_SIZE ||
to               2015 fs/ocfs2/aops.c 			       to < from);
to               2023 fs/ocfs2/aops.c 			to = PAGE_SIZE;
to               2031 fs/ocfs2/aops.c 				loff_t length = to - from;
to               2035 fs/ocfs2/aops.c 			block_commit_write(tmppage, from, to);
to                 16 fs/ocfs2/aops.h 							 unsigned to);
to                 20 fs/ocfs2/aops.h 			  unsigned int to, int new);
to                 27 fs/ocfs2/aops.h 			unsigned to,
to                 88 fs/ocfs2/dlm/dlmmaster.c 				 struct dlm_master_list_entry *mle, int to);
to               1309 fs/ocfs2/dlm/dlmmaster.c 				 struct dlm_master_list_entry *mle, int to)
to               1325 fs/ocfs2/dlm/dlmmaster.c 				 sizeof(request), to, &response);
to               1348 fs/ocfs2/dlm/dlmmaster.c 		mlog(ML_ERROR, "link to %d went down!\n", to);
to               1357 fs/ocfs2/dlm/dlmmaster.c 			set_bit(to, mle->response_map);
to               1358 fs/ocfs2/dlm/dlmmaster.c 			mlog(0, "node %u is the master, response=YES\n", to);
to               1361 fs/ocfs2/dlm/dlmmaster.c 			     res->lockname.name, to);
to               1362 fs/ocfs2/dlm/dlmmaster.c 			mle->master = to;
to               1365 fs/ocfs2/dlm/dlmmaster.c 			mlog(0, "node %u not master, response=NO\n", to);
to               1366 fs/ocfs2/dlm/dlmmaster.c 			set_bit(to, mle->response_map);
to               1369 fs/ocfs2/dlm/dlmmaster.c 			mlog(0, "node %u not master, response=MAYBE\n", to);
to               1370 fs/ocfs2/dlm/dlmmaster.c 			set_bit(to, mle->response_map);
to               1371 fs/ocfs2/dlm/dlmmaster.c 			set_bit(to, mle->maybe_map);
to               1374 fs/ocfs2/dlm/dlmmaster.c 			mlog(0, "node %u hit an error, resending\n", to);
to               1657 fs/ocfs2/dlm/dlmmaster.c 	int to, tmpret;
to               1675 fs/ocfs2/dlm/dlmmaster.c 	while ((to = dlm_node_iter_next(&iter)) >= 0) {
to               1679 fs/ocfs2/dlm/dlmmaster.c 		mlog(0, "sending assert master to %d (%.*s)\n", to,
to               1688 fs/ocfs2/dlm/dlmmaster.c 					    &assert, sizeof(assert), to, &r);
to               1692 fs/ocfs2/dlm/dlmmaster.c 			     DLM_ASSERT_MASTER_MSG, dlm->key, to);
to               1698 fs/ocfs2/dlm/dlmmaster.c 			mlog(0, "link to %d went down!\n", to);
to               1705 fs/ocfs2/dlm/dlmmaster.c 			     "got %d.\n", namelen, lockname, to, r);
to               1722 fs/ocfs2/dlm/dlmmaster.c 				     namelen, lockname, to);
to               1728 fs/ocfs2/dlm/dlmmaster.c 			     namelen, lockname, to);
to               1734 fs/ocfs2/dlm/dlmmaster.c 			     namelen, lockname, to);
to               1736 fs/ocfs2/dlm/dlmmaster.c 			dlm_lockres_set_refmap_bit(dlm, res, to);
to               2433 fs/ocfs2/file.c 				   struct iov_iter *to)
to               2445 fs/ocfs2/file.c 			to->nr_segs);	/* GRRRRR */
to               2495 fs/ocfs2/file.c 	ret = generic_file_read_iter(iocb, to);
to               2913 fs/ocfs2/refcounttree.c 	unsigned int from, to;
to               2937 fs/ocfs2/refcounttree.c 		to = PAGE_SIZE;
to               2939 fs/ocfs2/refcounttree.c 			to = map_end & (PAGE_SIZE - 1);
to               2974 fs/ocfs2/refcounttree.c 						from, to, &partial,
to               2983 fs/ocfs2/refcounttree.c 					 handle, from, to,
to               1377 fs/ocfs2/super.c 			if (((args[0].to - args[0].from) !=
to                309 fs/omfs/file.c static void omfs_write_failed(struct address_space *mapping, loff_t to)
to                313 fs/omfs/file.c 	if (to > inode->i_size) {
to                492 fs/orangefs/orangefs-bufmap.c 	struct orangefs_bufmap_desc *to;
to                499 fs/orangefs/orangefs-bufmap.c 	to = &__orangefs_bufmap->desc_array[buffer_index];
to                501 fs/orangefs/orangefs-bufmap.c 		struct page *page = to->page_array[i];
to                381 fs/overlayfs/overlayfs.h static inline void ovl_copyattr(struct inode *from, struct inode *to)
to                383 fs/overlayfs/overlayfs.h 	to->i_uid = from->i_uid;
to                384 fs/overlayfs/overlayfs.h 	to->i_gid = from->i_gid;
to                385 fs/overlayfs/overlayfs.h 	to->i_mode = from->i_mode;
to                386 fs/overlayfs/overlayfs.h 	to->i_atime = from->i_atime;
to                387 fs/overlayfs/overlayfs.h 	to->i_mtime = from->i_mtime;
to                388 fs/overlayfs/overlayfs.h 	to->i_ctime = from->i_ctime;
to                389 fs/overlayfs/overlayfs.h 	i_size_write(to, i_size_read(from));
to                392 fs/overlayfs/overlayfs.h static inline void ovl_copyflags(struct inode *from, struct inode *to)
to                396 fs/overlayfs/overlayfs.h 	inode_set_flags(to, from->i_flags & mask, mask);
to                272 fs/pipe.c      pipe_read(struct kiocb *iocb, struct iov_iter *to)
to                274 fs/pipe.c      	size_t total_len = iov_iter_count(to);
to                306 fs/pipe.c      			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
to                667 fs/posix_acl.c 	struct user_namespace *to, struct user_namespace *from,
to                693 fs/posix_acl.c 			entry->e_id = cpu_to_le32(from_kuid(to, uid));
to                697 fs/posix_acl.c 			entry->e_id = cpu_to_le32(from_kgid(to, gid));
to                 73 fs/proc/proc_tty.c 	dev_t to = from + p->num;
to                 95 fs/proc/proc_tty.c 	while (MAJOR(from) < MAJOR(to)) {
to                100 fs/proc/proc_tty.c 	if (from != to)
to                101 fs/proc/proc_tty.c 		show_tty_range(m, p, from, to - from);
to                177 fs/reiserfs/file.c 			 unsigned from, unsigned to)
to                208 fs/reiserfs/file.c 		if (block_end <= from || block_start >= to) {
to                376 fs/reiserfs/fix_node.c 		       int to, int to_bytes, short *snum012, int flow)
to                431 fs/reiserfs/fix_node.c 		i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
to                447 fs/reiserfs/fix_node.c 	end_item = vn->vn_nr_item - to - 1;
to                127 fs/reiserfs/ibalance.c 				   int to, int count,
to                151 fs/reiserfs/ibalance.c 	dc = B_N_CHILD(cur, to + 1);
to                153 fs/reiserfs/ibalance.c 	memmove(dc + count, dc, (nr + 1 - (to + 1)) * DC_SIZE);
to                164 fs/reiserfs/ibalance.c 	ih = internal_key(cur, ((to == -1) ? 0 : to));
to                167 fs/reiserfs/ibalance.c 		(nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE);
to                 25 fs/reiserfs/inode.c 			  unsigned from, unsigned to);
to               2999 fs/reiserfs/inode.c 			  unsigned from, unsigned to)
to               3002 fs/reiserfs/inode.c 	loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
to               3015 fs/reiserfs/inode.c 	reiserfs_commit_page(inode, page, from, to);
to                162 fs/reiserfs/ioctl.c 			  unsigned from, unsigned to);
to                583 fs/reiserfs/item_ops.c 	int from, to;
to                591 fs/reiserfs/item_ops.c 	to = from + count - 1;
to                593 fs/reiserfs/item_ops.c 	for (i = from; i <= to; i++)
to                912 fs/reiserfs/lbalance.c 	char *to;
to                936 fs/reiserfs/lbalance.c 	to = bh->b_data + unmoved_loc - ih_item_len(inserted_item_ih);
to                937 fs/reiserfs/lbalance.c 	memset(to, 0, zeros_number);
to                938 fs/reiserfs/lbalance.c 	to += zeros_number;
to                942 fs/reiserfs/lbalance.c 		memmove(to, inserted_item_body,
to                945 fs/reiserfs/lbalance.c 		memset(to, '\0', ih_item_len(inserted_item_ih) - zeros_number);
to                447 fs/reiserfs/prints.c 	int from, to;
to                456 fs/reiserfs/prints.c 		to = B_NR_ITEMS(bh);
to                459 fs/reiserfs/prints.c 		to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh);
to                467 fs/reiserfs/prints.c 	for (i = from, key = internal_key(bh, from), dc++; i < to;
to                483 fs/reiserfs/prints.c 	int from, to;
to                510 fs/reiserfs/prints.c 		to = nr;
to                512 fs/reiserfs/prints.c 		to = last;
to                519 fs/reiserfs/prints.c 	for (i = from; i < to; i++, ih++) {
to               2641 fs/reiserfs/reiserfs.h 	int (*part_size) (struct virtual_item * vi, int from, int to);
to               2655 fs/reiserfs/reiserfs.h #define op_part_size(vi,from,to)                     item_ops[(vi)->vi_index]->part_size (vi, from, to)
to               2919 fs/reiserfs/reiserfs.h 			 unsigned from, unsigned to);
to               2967 fs/reiserfs/reiserfs.h extern void copy_item_head(struct item_head *to,
to               2973 fs/reiserfs/reiserfs.h extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from);
to               2995 fs/reiserfs/reiserfs.h static inline void copy_key(struct reiserfs_key *to,
to               2998 fs/reiserfs/reiserfs.h 	memcpy(to, from, KEY_SIZE);
to                 30 fs/reiserfs/stree.c inline void copy_item_head(struct item_head *to,
to                 33 fs/reiserfs/stree.c 	memcpy(to, from, IH_SIZE);
to                115 fs/reiserfs/stree.c inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from)
to                118 fs/reiserfs/stree.c 	to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id);
to                119 fs/reiserfs/stree.c 	to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid);
to                123 fs/reiserfs/stree.c 	to->version = version;
to                124 fs/reiserfs/stree.c 	to->on_disk_key.k_offset = le_key_k_offset(version, from);
to                125 fs/reiserfs/stree.c 	to->on_disk_key.k_type = le_key_k_type(version, from);
to                470 fs/reiserfs/xattr.c 			  unsigned from, unsigned to);
to                273 fs/select.c    int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
to                282 fs/select.c    		to->tv_sec = to->tv_nsec = 0;
to                284 fs/select.c    		ktime_get_ts64(to);
to                285 fs/select.c    		*to = timespec64_add_safe(*to, ts);
to                478 fs/select.c    	ktime_t expire, *to = NULL;
to                598 fs/select.c    		if (end_time && !to) {
to                600 fs/select.c    			to = &expire;
to                604 fs/select.c    					   to, slack))
to                703 fs/select.c    	struct timespec64 end_time, *to = NULL;
to                711 fs/select.c    		to = &end_time;
to                712 fs/select.c    		if (poll_select_set_timeout(to,
to                718 fs/select.c    	ret = core_sys_select(n, inp, outp, exp, to);
to                733 fs/select.c    	struct timespec64 ts, end_time, *to = NULL;
to                750 fs/select.c    		to = &end_time;
to                751 fs/select.c    		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
to                759 fs/select.c    	ret = core_sys_select(n, inp, outp, exp, to);
to                875 fs/select.c    	ktime_t expire, *to = NULL;
to                946 fs/select.c    		if (end_time && !to) {
to                948 fs/select.c    			to = &expire;
to                951 fs/select.c    		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
to               1029 fs/select.c    	struct timespec64 *to = NULL, end_time;
to               1035 fs/select.c    		to = &end_time;
to               1038 fs/select.c    	ret = do_sys_poll(ufds, nfds, to);
to               1050 fs/select.c    	struct timespec64 end_time, *to = NULL;
to               1054 fs/select.c    		to = &end_time;
to               1055 fs/select.c    		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
to               1059 fs/select.c    	ret = do_sys_poll(ufds, nfds, to);
to               1085 fs/select.c    	struct timespec64 ts, end_time, *to = NULL;
to               1092 fs/select.c    		to = &end_time;
to               1093 fs/select.c    		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
to               1101 fs/select.c    	ret = do_sys_poll(ufds, nfds, to);
to               1111 fs/select.c    	struct timespec64 ts, end_time, *to = NULL;
to               1118 fs/select.c    		to = &end_time;
to               1119 fs/select.c    		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
to               1127 fs/select.c    	ret = do_sys_poll(ufds, nfds, to);
to               1249 fs/select.c    	struct timespec64 end_time, *to = NULL;
to               1257 fs/select.c    		to = &end_time;
to               1258 fs/select.c    		if (poll_select_set_timeout(to,
to               1264 fs/select.c    	ret = compat_core_sys_select(n, inp, outp, exp, to);
to               1298 fs/select.c    	struct timespec64 ts, end_time, *to = NULL;
to               1315 fs/select.c    		to = &end_time;
to               1316 fs/select.c    		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
to               1324 fs/select.c    	ret = compat_core_sys_select(n, inp, outp, exp, to);
to               1377 fs/select.c    	struct timespec64 ts, end_time, *to = NULL;
to               1384 fs/select.c    		to = &end_time;
to               1385 fs/select.c    		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
to               1393 fs/select.c    	ret = do_sys_poll(ufds, nfds, to);
to               1403 fs/select.c    	struct timespec64 ts, end_time, *to = NULL;
to               1410 fs/select.c    		to = &end_time;
to               1411 fs/select.c    		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
to               1419 fs/select.c    	ret = do_sys_poll(ufds, nfds, to);
to                299 fs/splice.c    	struct iov_iter to;
to                303 fs/splice.c    	iov_iter_pipe(&to, READ, pipe, len);
to                304 fs/splice.c    	idx = to.idx;
to                307 fs/splice.c    	ret = call_read_iter(in, &kiocb, &to);
to                312 fs/splice.c    		to.idx = idx;
to                313 fs/splice.c    		to.iov_offset = 0;
to                314 fs/splice.c    		iov_iter_advance(&to, 0); /* to free what was emitted */
to                370 fs/splice.c    	struct iov_iter to;
to                386 fs/splice.c    	iov_iter_pipe(&to, READ, pipe, len + offset);
to                388 fs/splice.c    	res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base);
to                403 fs/splice.c    	pipe->bufs[to.idx].offset = offset;
to                404 fs/splice.c    	pipe->bufs[to.idx].len -= offset;
to                426 fs/splice.c    	iov_iter_advance(&to, copied);	/* truncates and discards */
to                110 fs/sysv/inode.c 	unsigned char * from, unsigned char * to)
to                113 fs/sysv/inode.c 		to[0] = from[0];
to                114 fs/sysv/inode.c 		to[1] = 0;
to                115 fs/sysv/inode.c 		to[2] = from[1];
to                116 fs/sysv/inode.c 		to[3] = from[2];
to                118 fs/sysv/inode.c 		to[0] = from[0];
to                119 fs/sysv/inode.c 		to[1] = from[1];
to                120 fs/sysv/inode.c 		to[2] = from[2];
to                121 fs/sysv/inode.c 		to[3] = 0;
to                123 fs/sysv/inode.c 		to[0] = 0;
to                124 fs/sysv/inode.c 		to[1] = from[0];
to                125 fs/sysv/inode.c 		to[2] = from[1];
to                126 fs/sysv/inode.c 		to[3] = from[2];
to                131 fs/sysv/inode.c 	unsigned char * from, unsigned char * to)
to                134 fs/sysv/inode.c 		to[0] = from[0];
to                135 fs/sysv/inode.c 		to[1] = from[2];
to                136 fs/sysv/inode.c 		to[2] = from[3];
to                138 fs/sysv/inode.c 		to[0] = from[0];
to                139 fs/sysv/inode.c 		to[1] = from[1];
to                140 fs/sysv/inode.c 		to[2] = from[2];
to                142 fs/sysv/inode.c 		to[0] = from[1];
to                143 fs/sysv/inode.c 		to[1] = from[2];
to                144 fs/sysv/inode.c 		to[2] = from[3];
to                 73 fs/sysv/itree.c static inline int verify_chain(Indirect *from, Indirect *to)
to                 75 fs/sysv/itree.c 	while (from <= to && from->key == *from->p)
to                 77 fs/sysv/itree.c 	return (from > to);
to                469 fs/sysv/itree.c static void sysv_write_failed(struct address_space *mapping, loff_t to)
to                473 fs/sysv/itree.c 	if (to > inode->i_size) {
to               2550 fs/ubifs/debug.c 	unsigned int from, to, ffs = chance(1, 2);
to               2555 fs/ubifs/debug.c 	to = min(len, ALIGN(from + 1, c->max_write_size));
to               2557 fs/ubifs/debug.c 	ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1,
to               2561 fs/ubifs/debug.c 		memset(p + from, 0xFF, to - from);
to               2563 fs/ubifs/debug.c 		prandom_bytes(p + from, to - from);
to               2565 fs/ubifs/debug.c 	return to;
to                419 fs/ubifs/key.h 			    union ubifs_key *to)
to                423 fs/ubifs/key.h 	to->u32[0] = le32_to_cpu(f->j32[0]);
to                424 fs/ubifs/key.h 	to->u32[1] = le32_to_cpu(f->j32[1]);
to                434 fs/ubifs/key.h 			     const union ubifs_key *from, void *to)
to                436 fs/ubifs/key.h 	union ubifs_key *t = to;
to                440 fs/ubifs/key.h 	memset(to + 8, 0, UBIFS_MAX_KEY_LEN - 8);
to                450 fs/ubifs/key.h 				 const union ubifs_key *from, void *to)
to                452 fs/ubifs/key.h 	union ubifs_key *t = to;
to                465 fs/ubifs/key.h 			    const union ubifs_key *from, union ubifs_key *to)
to                467 fs/ubifs/key.h 	to->u64[0] = from->u64[0];
to               1639 fs/ubifs/ubifs.h 				   u8 *to)
to               1642 fs/ubifs/ubifs.h 		memcpy(to, from, c->hash_len);
to                165 fs/udf/inode.c static void udf_write_failed(struct address_space *mapping, loff_t to)
to                171 fs/udf/inode.c 	if (to > isize) {
to                 33 fs/udf/symlink.c 			  int fromlen, unsigned char *to, int tolen)
to                 38 fs/udf/symlink.c 	unsigned char *p = to;
to                 59 fs/udf/symlink.c 			p = to;
to                 97 fs/udf/symlink.c 	if (p > to + 1)
to                501 fs/ufs/dir.c   	unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
to                529 fs/ufs/dir.c   	err = ufs_prepare_chunk(page, pos, to - from);
to                532 fs/ufs/dir.c   		pde->d_reclen = cpu_to_fs16(sb, to - from);
to                534 fs/ufs/dir.c   	err = ufs_commit_chunk(page, pos, to - from);
to                 89 fs/ufs/inode.c 			       Indirect *from, Indirect *to)
to                 93 fs/ufs/inode.c 	to->bh = bh;
to                 96 fs/ufs/inode.c 		to->key32 = *(__fs32 *)(to->p = v);
to                 97 fs/ufs/inode.c 		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
to                100 fs/ufs/inode.c 	return (p > to);
to                105 fs/ufs/inode.c 			       Indirect *from, Indirect *to)
to                109 fs/ufs/inode.c 	to->bh = bh;
to                112 fs/ufs/inode.c 		to->key64 = *(__fs64 *)(to->p = v);
to                113 fs/ufs/inode.c 		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
to                116 fs/ufs/inode.c 	return (p > to);
to                487 fs/ufs/inode.c static void ufs_write_failed(struct address_space *mapping, loff_t to)
to                491 fs/ufs/inode.c 	if (to > inode->i_size) {
to                872 fs/ufs/inode.c 	u64 to;
to                878 fs/ufs/inode.c 	if (ctx->count && ctx->to != from) {
to                879 fs/ufs/inode.c 		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
to                883 fs/ufs/inode.c 	ctx->to = from + count;
to                766 fs/userfaultfd.c 				 unsigned long from, unsigned long to,
to                775 fs/userfaultfd.c 	if (to & ~PAGE_MASK) {
to                784 fs/userfaultfd.c 	ewq.msg.arg.remap.to = to;
to                 94 fs/xfs/libxfs/xfs_attr_leaf.c 	struct xfs_attr3_icleaf_hdr	*to,
to                101 fs/xfs/libxfs/xfs_attr_leaf.c 		to->firstused = be16_to_cpu(hdr3->firstused);
to                103 fs/xfs/libxfs/xfs_attr_leaf.c 		to->firstused = be16_to_cpu(from->hdr.firstused);
to                111 fs/xfs/libxfs/xfs_attr_leaf.c 	if (to->firstused == XFS_ATTR3_LEAF_NULLOFF) {
to                112 fs/xfs/libxfs/xfs_attr_leaf.c 		ASSERT(!to->count && !to->usedbytes);
to                114 fs/xfs/libxfs/xfs_attr_leaf.c 		to->firstused = geo->blksize;
to                121 fs/xfs/libxfs/xfs_attr_leaf.c 	struct xfs_attr_leafblock	*to,
to                142 fs/xfs/libxfs/xfs_attr_leaf.c 		hdr3 = (struct xfs_attr3_leaf_hdr *) to;
to                145 fs/xfs/libxfs/xfs_attr_leaf.c 		to->hdr.firstused = cpu_to_be16(firstused);
to                152 fs/xfs/libxfs/xfs_attr_leaf.c 	struct xfs_attr3_icleaf_hdr	*to,
to                163 fs/xfs/libxfs/xfs_attr_leaf.c 		to->forw = be32_to_cpu(hdr3->info.hdr.forw);
to                164 fs/xfs/libxfs/xfs_attr_leaf.c 		to->back = be32_to_cpu(hdr3->info.hdr.back);
to                165 fs/xfs/libxfs/xfs_attr_leaf.c 		to->magic = be16_to_cpu(hdr3->info.hdr.magic);
to                166 fs/xfs/libxfs/xfs_attr_leaf.c 		to->count = be16_to_cpu(hdr3->count);
to                167 fs/xfs/libxfs/xfs_attr_leaf.c 		to->usedbytes = be16_to_cpu(hdr3->usedbytes);
to                168 fs/xfs/libxfs/xfs_attr_leaf.c 		xfs_attr3_leaf_firstused_from_disk(geo, to, from);
to                169 fs/xfs/libxfs/xfs_attr_leaf.c 		to->holes = hdr3->holes;
to                172 fs/xfs/libxfs/xfs_attr_leaf.c 			to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base);
to                173 fs/xfs/libxfs/xfs_attr_leaf.c 			to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size);
to                177 fs/xfs/libxfs/xfs_attr_leaf.c 	to->forw = be32_to_cpu(from->hdr.info.forw);
to                178 fs/xfs/libxfs/xfs_attr_leaf.c 	to->back = be32_to_cpu(from->hdr.info.back);
to                179 fs/xfs/libxfs/xfs_attr_leaf.c 	to->magic = be16_to_cpu(from->hdr.info.magic);
to                180 fs/xfs/libxfs/xfs_attr_leaf.c 	to->count = be16_to_cpu(from->hdr.count);
to                181 fs/xfs/libxfs/xfs_attr_leaf.c 	to->usedbytes = be16_to_cpu(from->hdr.usedbytes);
to                182 fs/xfs/libxfs/xfs_attr_leaf.c 	xfs_attr3_leaf_firstused_from_disk(geo, to, from);
to                183 fs/xfs/libxfs/xfs_attr_leaf.c 	to->holes = from->hdr.holes;
to                186 fs/xfs/libxfs/xfs_attr_leaf.c 		to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base);
to                187 fs/xfs/libxfs/xfs_attr_leaf.c 		to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size);
to                194 fs/xfs/libxfs/xfs_attr_leaf.c 	struct xfs_attr_leafblock	*to,
to                203 fs/xfs/libxfs/xfs_attr_leaf.c 		struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to;
to                210 fs/xfs/libxfs/xfs_attr_leaf.c 		xfs_attr3_leaf_firstused_to_disk(geo, to, from);
to                220 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.info.forw = cpu_to_be32(from->forw);
to                221 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.info.back = cpu_to_be32(from->back);
to                222 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.info.magic = cpu_to_be16(from->magic);
to                223 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.count = cpu_to_be16(from->count);
to                224 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.usedbytes = cpu_to_be16(from->usedbytes);
to                225 fs/xfs/libxfs/xfs_attr_leaf.c 	xfs_attr3_leaf_firstused_to_disk(geo, to, from);
to                226 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.holes = from->holes;
to                227 fs/xfs/libxfs/xfs_attr_leaf.c 	to->hdr.pad1 = 0;
to                230 fs/xfs/libxfs/xfs_attr_leaf.c 		to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base);
to                231 fs/xfs/libxfs/xfs_attr_leaf.c 		to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size);
to                 91 fs/xfs/libxfs/xfs_attr_leaf.h 				     struct xfs_attr3_icleaf_hdr *to,
to                 94 fs/xfs/libxfs/xfs_attr_leaf.h 				   struct xfs_attr_leafblock *to,
to                123 fs/xfs/libxfs/xfs_da_format.c 	uint8_t			*to,
to                129 fs/xfs/libxfs/xfs_da_format.c 		put_unaligned_be64(ino, to);
to                131 fs/xfs/libxfs/xfs_da_format.c 		put_unaligned_be32(ino, to);
to                434 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir3_icleaf_hdr	*to,
to                437 fs/xfs/libxfs/xfs_da_format.c 	to->forw = be32_to_cpu(from->hdr.info.forw);
to                438 fs/xfs/libxfs/xfs_da_format.c 	to->back = be32_to_cpu(from->hdr.info.back);
to                439 fs/xfs/libxfs/xfs_da_format.c 	to->magic = be16_to_cpu(from->hdr.info.magic);
to                440 fs/xfs/libxfs/xfs_da_format.c 	to->count = be16_to_cpu(from->hdr.count);
to                441 fs/xfs/libxfs/xfs_da_format.c 	to->stale = be16_to_cpu(from->hdr.stale);
to                443 fs/xfs/libxfs/xfs_da_format.c 	ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
to                444 fs/xfs/libxfs/xfs_da_format.c 	       to->magic == XFS_DIR2_LEAFN_MAGIC);
to                449 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir2_leaf		*to,
to                455 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.info.forw = cpu_to_be32(from->forw);
to                456 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.info.back = cpu_to_be32(from->back);
to                457 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.info.magic = cpu_to_be16(from->magic);
to                458 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.count = cpu_to_be16(from->count);
to                459 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.stale = cpu_to_be16(from->stale);
to                464 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir3_icleaf_hdr	*to,
to                469 fs/xfs/libxfs/xfs_da_format.c 	to->forw = be32_to_cpu(hdr3->info.hdr.forw);
to                470 fs/xfs/libxfs/xfs_da_format.c 	to->back = be32_to_cpu(hdr3->info.hdr.back);
to                471 fs/xfs/libxfs/xfs_da_format.c 	to->magic = be16_to_cpu(hdr3->info.hdr.magic);
to                472 fs/xfs/libxfs/xfs_da_format.c 	to->count = be16_to_cpu(hdr3->count);
to                473 fs/xfs/libxfs/xfs_da_format.c 	to->stale = be16_to_cpu(hdr3->stale);
to                475 fs/xfs/libxfs/xfs_da_format.c 	ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
to                476 fs/xfs/libxfs/xfs_da_format.c 	       to->magic == XFS_DIR3_LEAFN_MAGIC);
to                481 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir2_leaf		*to,
to                484 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
to                514 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_da3_icnode_hdr	*to,
to                518 fs/xfs/libxfs/xfs_da_format.c 	to->forw = be32_to_cpu(from->hdr.info.forw);
to                519 fs/xfs/libxfs/xfs_da_format.c 	to->back = be32_to_cpu(from->hdr.info.back);
to                520 fs/xfs/libxfs/xfs_da_format.c 	to->magic = be16_to_cpu(from->hdr.info.magic);
to                521 fs/xfs/libxfs/xfs_da_format.c 	to->count = be16_to_cpu(from->hdr.__count);
to                522 fs/xfs/libxfs/xfs_da_format.c 	to->level = be16_to_cpu(from->hdr.__level);
to                527 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_da_intnode		*to,
to                531 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.info.forw = cpu_to_be32(from->forw);
to                532 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.info.back = cpu_to_be32(from->back);
to                533 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.info.magic = cpu_to_be16(from->magic);
to                534 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.__count = cpu_to_be16(from->count);
to                535 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.__level = cpu_to_be16(from->level);
to                540 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_da3_icnode_hdr	*to,
to                546 fs/xfs/libxfs/xfs_da_format.c 	to->forw = be32_to_cpu(hdr3->info.hdr.forw);
to                547 fs/xfs/libxfs/xfs_da_format.c 	to->back = be32_to_cpu(hdr3->info.hdr.back);
to                548 fs/xfs/libxfs/xfs_da_format.c 	to->magic = be16_to_cpu(hdr3->info.hdr.magic);
to                549 fs/xfs/libxfs/xfs_da_format.c 	to->count = be16_to_cpu(hdr3->__count);
to                550 fs/xfs/libxfs/xfs_da_format.c 	to->level = be16_to_cpu(hdr3->__level);
to                555 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_da_intnode		*to,
to                558 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
to                638 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir3_icfree_hdr	*to,
to                641 fs/xfs/libxfs/xfs_da_format.c 	to->magic = be32_to_cpu(from->hdr.magic);
to                642 fs/xfs/libxfs/xfs_da_format.c 	to->firstdb = be32_to_cpu(from->hdr.firstdb);
to                643 fs/xfs/libxfs/xfs_da_format.c 	to->nvalid = be32_to_cpu(from->hdr.nvalid);
to                644 fs/xfs/libxfs/xfs_da_format.c 	to->nused = be32_to_cpu(from->hdr.nused);
to                645 fs/xfs/libxfs/xfs_da_format.c 	ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
to                650 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir2_free		*to,
to                655 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.magic = cpu_to_be32(from->magic);
to                656 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.firstdb = cpu_to_be32(from->firstdb);
to                657 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.nvalid = cpu_to_be32(from->nvalid);
to                658 fs/xfs/libxfs/xfs_da_format.c 	to->hdr.nused = cpu_to_be32(from->nused);
to                663 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir3_icfree_hdr	*to,
to                668 fs/xfs/libxfs/xfs_da_format.c 	to->magic = be32_to_cpu(hdr3->hdr.magic);
to                669 fs/xfs/libxfs/xfs_da_format.c 	to->firstdb = be32_to_cpu(hdr3->firstdb);
to                670 fs/xfs/libxfs/xfs_da_format.c 	to->nvalid = be32_to_cpu(hdr3->nvalid);
to                671 fs/xfs/libxfs/xfs_da_format.c 	to->nused = be32_to_cpu(hdr3->nused);
to                673 fs/xfs/libxfs/xfs_da_format.c 	ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
to                678 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir2_free		*to,
to                681 fs/xfs/libxfs/xfs_da_format.c 	struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
to                 74 fs/xfs/libxfs/xfs_dir2.h 	void	(*leaf_hdr_to_disk)(struct xfs_dir2_leaf *to,
to                 76 fs/xfs/libxfs/xfs_dir2.h 	void	(*leaf_hdr_from_disk)(struct xfs_dir3_icleaf_hdr *to,
to                 83 fs/xfs/libxfs/xfs_dir2.h 	void	(*node_hdr_to_disk)(struct xfs_da_intnode *to,
to                 85 fs/xfs/libxfs/xfs_dir2.h 	void	(*node_hdr_from_disk)(struct xfs_da3_icnode_hdr *to,
to                 91 fs/xfs/libxfs/xfs_dir2.h 	void	(*free_hdr_to_disk)(struct xfs_dir2_free *to,
to                 93 fs/xfs/libxfs/xfs_dir2.h 	void	(*free_hdr_from_disk)(struct xfs_dir3_icfree_hdr *to,
to                915 fs/xfs/libxfs/xfs_dir2_block.c 	int			to;		/* block/leaf to index */
to               1006 fs/xfs/libxfs/xfs_dir2_block.c 	for (from = to = 0; from < leafhdr.count; from++) {
to               1009 fs/xfs/libxfs/xfs_dir2_block.c 		lep[to++] = ents[from];
to               1011 fs/xfs/libxfs/xfs_dir2_block.c 	ASSERT(to == be32_to_cpu(btp->count));
to                876 fs/xfs/libxfs/xfs_dir2_leaf.c 	int		to;		/* target leaf index */
to                888 fs/xfs/libxfs/xfs_dir2_leaf.c 	for (from = to = 0, loglow = -1; from < leafhdr->count; from++) {
to                894 fs/xfs/libxfs/xfs_dir2_leaf.c 		if (from > to) {
to                896 fs/xfs/libxfs/xfs_dir2_leaf.c 				loglow = to;
to                897 fs/xfs/libxfs/xfs_dir2_leaf.c 			ents[to] = ents[from];
to                899 fs/xfs/libxfs/xfs_dir2_leaf.c 		to++;
to                904 fs/xfs/libxfs/xfs_dir2_leaf.c 	ASSERT(leafhdr->stale == from - to);
to                911 fs/xfs/libxfs/xfs_dir2_leaf.c 		xfs_dir3_leaf_log_ents(args, bp, loglow, to - 1);
to                938 fs/xfs/libxfs/xfs_dir2_leaf.c 	int		to;		/* destination copy index */
to                958 fs/xfs/libxfs/xfs_dir2_leaf.c 	for (from = to = 0; from < leafhdr->count; from++) {
to                963 fs/xfs/libxfs/xfs_dir2_leaf.c 			newindex = to;
to                966 fs/xfs/libxfs/xfs_dir2_leaf.c 			if (from == to)
to                967 fs/xfs/libxfs/xfs_dir2_leaf.c 				*lowlogp = to;
to                974 fs/xfs/libxfs/xfs_dir2_leaf.c 			lowstale = highstale = to;
to                978 fs/xfs/libxfs/xfs_dir2_leaf.c 		if (from > to)
to                979 fs/xfs/libxfs/xfs_dir2_leaf.c 			ents[to] = ents[from];
to                980 fs/xfs/libxfs/xfs_dir2_leaf.c 		to++;
to                982 fs/xfs/libxfs/xfs_dir2_leaf.c 	ASSERT(from > to);
to                988 fs/xfs/libxfs/xfs_dir2_leaf.c 		newindex = to;
to                993 fs/xfs/libxfs/xfs_dir2_leaf.c 	leafhdr->count -= from - to;
to                349 fs/xfs/libxfs/xfs_dir2_node.c 	__be16			*to;		/* pointer to freespace entry */
to                384 fs/xfs/libxfs/xfs_dir2_node.c 	to = dp->d_ops->free_bests_p(free);
to                385 fs/xfs/libxfs/xfs_dir2_node.c 	for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
to                388 fs/xfs/libxfs/xfs_dir2_node.c 		*to = cpu_to_be16(off);
to                205 fs/xfs/libxfs/xfs_inode_buf.c 	struct xfs_icdinode	*to = &ip->i_d;
to                213 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_version = from->di_version;
to                214 fs/xfs/libxfs/xfs_inode_buf.c 	if (to->di_version == 1) {
to                216 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_projid_lo = 0;
to                217 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_projid_hi = 0;
to                218 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_version = 2;
to                221 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
to                222 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
to                225 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_format = from->di_format;
to                226 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_uid = be32_to_cpu(from->di_uid);
to                227 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_gid = be32_to_cpu(from->di_gid);
to                228 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_flushiter = be16_to_cpu(from->di_flushiter);
to                245 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_size = be64_to_cpu(from->di_size);
to                246 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nblocks = be64_to_cpu(from->di_nblocks);
to                247 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_extsize = be32_to_cpu(from->di_extsize);
to                248 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nextents = be32_to_cpu(from->di_nextents);
to                249 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_anextents = be16_to_cpu(from->di_anextents);
to                250 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_forkoff = from->di_forkoff;
to                251 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_aformat	= from->di_aformat;
to                252 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
to                253 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
to                254 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_flags	= be16_to_cpu(from->di_flags);
to                256 fs/xfs/libxfs/xfs_inode_buf.c 	if (to->di_version == 3) {
to                259 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
to                260 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
to                261 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flags2 = be64_to_cpu(from->di_flags2);
to                262 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
to                269 fs/xfs/libxfs/xfs_inode_buf.c 	struct xfs_dinode	*to,
to                275 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to                276 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_onlink = 0;
to                278 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_version = from->di_version;
to                279 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_format = from->di_format;
to                280 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_uid = cpu_to_be32(from->di_uid);
to                281 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_gid = cpu_to_be32(from->di_gid);
to                282 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
to                283 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
to                285 fs/xfs/libxfs/xfs_inode_buf.c 	memset(to->di_pad, 0, sizeof(to->di_pad));
to                286 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
to                287 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
to                288 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
to                289 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
to                290 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
to                291 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
to                292 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nlink = cpu_to_be32(inode->i_nlink);
to                293 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_gen = cpu_to_be32(inode->i_generation);
to                294 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_mode = cpu_to_be16(inode->i_mode);
to                296 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_size = cpu_to_be64(from->di_size);
to                297 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
to                298 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_extsize = cpu_to_be32(from->di_extsize);
to                299 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nextents = cpu_to_be32(from->di_nextents);
to                300 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_anextents = cpu_to_be16(from->di_anextents);
to                301 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_forkoff = from->di_forkoff;
to                302 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_aformat = from->di_aformat;
to                303 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to                304 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
to                305 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_flags = cpu_to_be16(from->di_flags);
to                308 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to                309 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to                310 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to                311 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flags2 = cpu_to_be64(from->di_flags2);
to                312 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to                313 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_ino = cpu_to_be64(ip->i_ino);
to                314 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_lsn = cpu_to_be64(lsn);
to                315 fs/xfs/libxfs/xfs_inode_buf.c 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
to                316 fs/xfs/libxfs/xfs_inode_buf.c 		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to                317 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flushiter = 0;
to                319 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
to                326 fs/xfs/libxfs/xfs_inode_buf.c 	struct xfs_dinode	*to)
to                328 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_magic = cpu_to_be16(from->di_magic);
to                329 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_mode = cpu_to_be16(from->di_mode);
to                330 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_version = from->di_version;
to                331 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_format = from->di_format;
to                332 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_onlink = 0;
to                333 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_uid = cpu_to_be32(from->di_uid);
to                334 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_gid = cpu_to_be32(from->di_gid);
to                335 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nlink = cpu_to_be32(from->di_nlink);
to                336 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
to                337 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
to                338 fs/xfs/libxfs/xfs_inode_buf.c 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
to                340 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
to                341 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
to                342 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
to                343 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
to                344 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
to                345 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
to                347 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_size = cpu_to_be64(from->di_size);
to                348 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
to                349 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_extsize = cpu_to_be32(from->di_extsize);
to                350 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_nextents = cpu_to_be32(from->di_nextents);
to                351 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_anextents = cpu_to_be16(from->di_anextents);
to                352 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_forkoff = from->di_forkoff;
to                353 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_aformat = from->di_aformat;
to                354 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to                355 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
to                356 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_flags = cpu_to_be16(from->di_flags);
to                357 fs/xfs/libxfs/xfs_inode_buf.c 	to->di_gen = cpu_to_be32(from->di_gen);
to                360 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_changecount = cpu_to_be64(from->di_changecount);
to                361 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to                362 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to                363 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flags2 = cpu_to_be64(from->di_flags2);
to                364 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to                365 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_ino = cpu_to_be64(from->di_ino);
to                366 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_lsn = cpu_to_be64(from->di_lsn);
to                367 fs/xfs/libxfs/xfs_inode_buf.c 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
to                368 fs/xfs/libxfs/xfs_inode_buf.c 		uuid_copy(&to->di_uuid, &from->di_uuid);
to                369 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flushiter = 0;
to                371 fs/xfs/libxfs/xfs_inode_buf.c 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
to                 59 fs/xfs/libxfs/xfs_inode_buf.h void	xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to,
to                 63 fs/xfs/libxfs/xfs_inode_buf.h 			       struct xfs_dinode *to);
to                451 fs/xfs/libxfs/xfs_sb.c 	struct xfs_sb	*to,
to                455 fs/xfs/libxfs/xfs_sb.c 	to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
to                456 fs/xfs/libxfs/xfs_sb.c 	to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
to                457 fs/xfs/libxfs/xfs_sb.c 	to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
to                458 fs/xfs/libxfs/xfs_sb.c 	to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
to                459 fs/xfs/libxfs/xfs_sb.c 	to->sb_rextents = be64_to_cpu(from->sb_rextents);
to                460 fs/xfs/libxfs/xfs_sb.c 	memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
to                461 fs/xfs/libxfs/xfs_sb.c 	to->sb_logstart = be64_to_cpu(from->sb_logstart);
to                462 fs/xfs/libxfs/xfs_sb.c 	to->sb_rootino = be64_to_cpu(from->sb_rootino);
to                463 fs/xfs/libxfs/xfs_sb.c 	to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
to                464 fs/xfs/libxfs/xfs_sb.c 	to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
to                465 fs/xfs/libxfs/xfs_sb.c 	to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
to                466 fs/xfs/libxfs/xfs_sb.c 	to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
to                467 fs/xfs/libxfs/xfs_sb.c 	to->sb_agcount = be32_to_cpu(from->sb_agcount);
to                468 fs/xfs/libxfs/xfs_sb.c 	to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
to                469 fs/xfs/libxfs/xfs_sb.c 	to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
to                470 fs/xfs/libxfs/xfs_sb.c 	to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
to                471 fs/xfs/libxfs/xfs_sb.c 	to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
to                472 fs/xfs/libxfs/xfs_sb.c 	to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
to                473 fs/xfs/libxfs/xfs_sb.c 	to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
to                474 fs/xfs/libxfs/xfs_sb.c 	memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
to                475 fs/xfs/libxfs/xfs_sb.c 	to->sb_blocklog = from->sb_blocklog;
to                476 fs/xfs/libxfs/xfs_sb.c 	to->sb_sectlog = from->sb_sectlog;
to                477 fs/xfs/libxfs/xfs_sb.c 	to->sb_inodelog = from->sb_inodelog;
to                478 fs/xfs/libxfs/xfs_sb.c 	to->sb_inopblog = from->sb_inopblog;
to                479 fs/xfs/libxfs/xfs_sb.c 	to->sb_agblklog = from->sb_agblklog;
to                480 fs/xfs/libxfs/xfs_sb.c 	to->sb_rextslog = from->sb_rextslog;
to                481 fs/xfs/libxfs/xfs_sb.c 	to->sb_inprogress = from->sb_inprogress;
to                482 fs/xfs/libxfs/xfs_sb.c 	to->sb_imax_pct = from->sb_imax_pct;
to                483 fs/xfs/libxfs/xfs_sb.c 	to->sb_icount = be64_to_cpu(from->sb_icount);
to                484 fs/xfs/libxfs/xfs_sb.c 	to->sb_ifree = be64_to_cpu(from->sb_ifree);
to                485 fs/xfs/libxfs/xfs_sb.c 	to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
to                486 fs/xfs/libxfs/xfs_sb.c 	to->sb_frextents = be64_to_cpu(from->sb_frextents);
to                487 fs/xfs/libxfs/xfs_sb.c 	to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
to                488 fs/xfs/libxfs/xfs_sb.c 	to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
to                489 fs/xfs/libxfs/xfs_sb.c 	to->sb_qflags = be16_to_cpu(from->sb_qflags);
to                490 fs/xfs/libxfs/xfs_sb.c 	to->sb_flags = from->sb_flags;
to                491 fs/xfs/libxfs/xfs_sb.c 	to->sb_shared_vn = from->sb_shared_vn;
to                492 fs/xfs/libxfs/xfs_sb.c 	to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
to                493 fs/xfs/libxfs/xfs_sb.c 	to->sb_unit = be32_to_cpu(from->sb_unit);
to                494 fs/xfs/libxfs/xfs_sb.c 	to->sb_width = be32_to_cpu(from->sb_width);
to                495 fs/xfs/libxfs/xfs_sb.c 	to->sb_dirblklog = from->sb_dirblklog;
to                496 fs/xfs/libxfs/xfs_sb.c 	to->sb_logsectlog = from->sb_logsectlog;
to                497 fs/xfs/libxfs/xfs_sb.c 	to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
to                498 fs/xfs/libxfs/xfs_sb.c 	to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
to                499 fs/xfs/libxfs/xfs_sb.c 	to->sb_features2 = be32_to_cpu(from->sb_features2);
to                500 fs/xfs/libxfs/xfs_sb.c 	to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
to                501 fs/xfs/libxfs/xfs_sb.c 	to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
to                502 fs/xfs/libxfs/xfs_sb.c 	to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
to                503 fs/xfs/libxfs/xfs_sb.c 	to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
to                504 fs/xfs/libxfs/xfs_sb.c 	to->sb_features_log_incompat =
to                507 fs/xfs/libxfs/xfs_sb.c 	to->sb_crc = 0;
to                508 fs/xfs/libxfs/xfs_sb.c 	to->sb_spino_align = be32_to_cpu(from->sb_spino_align);
to                509 fs/xfs/libxfs/xfs_sb.c 	to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
to                510 fs/xfs/libxfs/xfs_sb.c 	to->sb_lsn = be64_to_cpu(from->sb_lsn);
to                515 fs/xfs/libxfs/xfs_sb.c 	if (xfs_sb_version_hasmetauuid(to))
to                516 fs/xfs/libxfs/xfs_sb.c 		uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid);
to                518 fs/xfs/libxfs/xfs_sb.c 		uuid_copy(&to->sb_meta_uuid, &from->sb_uuid);
to                521 fs/xfs/libxfs/xfs_sb.c 		xfs_sb_quota_from_disk(to);
to                526 fs/xfs/libxfs/xfs_sb.c 	struct xfs_sb	*to,
to                529 fs/xfs/libxfs/xfs_sb.c 	__xfs_sb_from_disk(to, from, true);
to                534 fs/xfs/libxfs/xfs_sb.c 	struct xfs_dsb	*to,
to                539 fs/xfs/libxfs/xfs_sb.c 	to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
to                541 fs/xfs/libxfs/xfs_sb.c 		to->sb_qflags = cpu_to_be16(from->sb_qflags);
to                542 fs/xfs/libxfs/xfs_sb.c 		to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
to                543 fs/xfs/libxfs/xfs_sb.c 		to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
to                561 fs/xfs/libxfs/xfs_sb.c 	to->sb_qflags = cpu_to_be16(qflags);
to                573 fs/xfs/libxfs/xfs_sb.c 		to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
to                575 fs/xfs/libxfs/xfs_sb.c 		to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
to                585 fs/xfs/libxfs/xfs_sb.c 			to->sb_gquotino = cpu_to_be64(NULLFSINO);
to                588 fs/xfs/libxfs/xfs_sb.c 	to->sb_pquotino = 0;
to                593 fs/xfs/libxfs/xfs_sb.c 	struct xfs_dsb	*to,
to                596 fs/xfs/libxfs/xfs_sb.c 	xfs_sb_quota_to_disk(to, from);
to                598 fs/xfs/libxfs/xfs_sb.c 	to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
to                599 fs/xfs/libxfs/xfs_sb.c 	to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
to                600 fs/xfs/libxfs/xfs_sb.c 	to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
to                601 fs/xfs/libxfs/xfs_sb.c 	to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
to                602 fs/xfs/libxfs/xfs_sb.c 	to->sb_rextents = cpu_to_be64(from->sb_rextents);
to                603 fs/xfs/libxfs/xfs_sb.c 	memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
to                604 fs/xfs/libxfs/xfs_sb.c 	to->sb_logstart = cpu_to_be64(from->sb_logstart);
to                605 fs/xfs/libxfs/xfs_sb.c 	to->sb_rootino = cpu_to_be64(from->sb_rootino);
to                606 fs/xfs/libxfs/xfs_sb.c 	to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
to                607 fs/xfs/libxfs/xfs_sb.c 	to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
to                608 fs/xfs/libxfs/xfs_sb.c 	to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
to                609 fs/xfs/libxfs/xfs_sb.c 	to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
to                610 fs/xfs/libxfs/xfs_sb.c 	to->sb_agcount = cpu_to_be32(from->sb_agcount);
to                611 fs/xfs/libxfs/xfs_sb.c 	to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
to                612 fs/xfs/libxfs/xfs_sb.c 	to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
to                613 fs/xfs/libxfs/xfs_sb.c 	to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
to                614 fs/xfs/libxfs/xfs_sb.c 	to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
to                615 fs/xfs/libxfs/xfs_sb.c 	to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
to                616 fs/xfs/libxfs/xfs_sb.c 	to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
to                617 fs/xfs/libxfs/xfs_sb.c 	memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
to                618 fs/xfs/libxfs/xfs_sb.c 	to->sb_blocklog = from->sb_blocklog;
to                619 fs/xfs/libxfs/xfs_sb.c 	to->sb_sectlog = from->sb_sectlog;
to                620 fs/xfs/libxfs/xfs_sb.c 	to->sb_inodelog = from->sb_inodelog;
to                621 fs/xfs/libxfs/xfs_sb.c 	to->sb_inopblog = from->sb_inopblog;
to                622 fs/xfs/libxfs/xfs_sb.c 	to->sb_agblklog = from->sb_agblklog;
to                623 fs/xfs/libxfs/xfs_sb.c 	to->sb_rextslog = from->sb_rextslog;
to                624 fs/xfs/libxfs/xfs_sb.c 	to->sb_inprogress = from->sb_inprogress;
to                625 fs/xfs/libxfs/xfs_sb.c 	to->sb_imax_pct = from->sb_imax_pct;
to                626 fs/xfs/libxfs/xfs_sb.c 	to->sb_icount = cpu_to_be64(from->sb_icount);
to                627 fs/xfs/libxfs/xfs_sb.c 	to->sb_ifree = cpu_to_be64(from->sb_ifree);
to                628 fs/xfs/libxfs/xfs_sb.c 	to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
to                629 fs/xfs/libxfs/xfs_sb.c 	to->sb_frextents = cpu_to_be64(from->sb_frextents);
to                631 fs/xfs/libxfs/xfs_sb.c 	to->sb_flags = from->sb_flags;
to                632 fs/xfs/libxfs/xfs_sb.c 	to->sb_shared_vn = from->sb_shared_vn;
to                633 fs/xfs/libxfs/xfs_sb.c 	to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
to                634 fs/xfs/libxfs/xfs_sb.c 	to->sb_unit = cpu_to_be32(from->sb_unit);
to                635 fs/xfs/libxfs/xfs_sb.c 	to->sb_width = cpu_to_be32(from->sb_width);
to                636 fs/xfs/libxfs/xfs_sb.c 	to->sb_dirblklog = from->sb_dirblklog;
to                637 fs/xfs/libxfs/xfs_sb.c 	to->sb_logsectlog = from->sb_logsectlog;
to                638 fs/xfs/libxfs/xfs_sb.c 	to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
to                639 fs/xfs/libxfs/xfs_sb.c 	to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
to                647 fs/xfs/libxfs/xfs_sb.c 	to->sb_features2 = cpu_to_be32(from->sb_features2);
to                648 fs/xfs/libxfs/xfs_sb.c 	to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
to                651 fs/xfs/libxfs/xfs_sb.c 		to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
to                652 fs/xfs/libxfs/xfs_sb.c 		to->sb_features_ro_compat =
to                654 fs/xfs/libxfs/xfs_sb.c 		to->sb_features_incompat =
to                656 fs/xfs/libxfs/xfs_sb.c 		to->sb_features_log_incompat =
to                658 fs/xfs/libxfs/xfs_sb.c 		to->sb_spino_align = cpu_to_be32(from->sb_spino_align);
to                659 fs/xfs/libxfs/xfs_sb.c 		to->sb_lsn = cpu_to_be64(from->sb_lsn);
to                661 fs/xfs/libxfs/xfs_sb.c 			uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid);
to                 29 fs/xfs/libxfs/xfs_sb.h extern void	xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
to                 30 fs/xfs/libxfs/xfs_sb.h extern void	xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
to                177 fs/xfs/xfs_file.c 	struct iov_iter		*to)
to                180 fs/xfs/xfs_file.c 	size_t			count = iov_iter_count(to);
to                191 fs/xfs/xfs_file.c 	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
to                200 fs/xfs/xfs_file.c 	struct iov_iter		*to)
to                203 fs/xfs/xfs_file.c 	size_t			count = iov_iter_count(to);
to                218 fs/xfs/xfs_file.c 	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
to                228 fs/xfs/xfs_file.c 	struct iov_iter		*to)
to                233 fs/xfs/xfs_file.c 	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
to                241 fs/xfs/xfs_file.c 	ret = generic_file_read_iter(iocb, to);
to                250 fs/xfs/xfs_file.c 	struct iov_iter		*to)
to                262 fs/xfs/xfs_file.c 		ret = xfs_file_dax_read(iocb, to);
to                264 fs/xfs/xfs_file.c 		ret = xfs_file_dio_aio_read(iocb, to);
to                266 fs/xfs/xfs_file.c 		ret = xfs_file_buffered_aio_read(iocb, to);
to                300 fs/xfs/xfs_inode_item.c 	struct xfs_log_dinode	*to,
to                306 fs/xfs/xfs_inode_item.c 	to->di_magic = XFS_DINODE_MAGIC;
to                308 fs/xfs/xfs_inode_item.c 	to->di_version = from->di_version;
to                309 fs/xfs/xfs_inode_item.c 	to->di_format = from->di_format;
to                310 fs/xfs/xfs_inode_item.c 	to->di_uid = from->di_uid;
to                311 fs/xfs/xfs_inode_item.c 	to->di_gid = from->di_gid;
to                312 fs/xfs/xfs_inode_item.c 	to->di_projid_lo = from->di_projid_lo;
to                313 fs/xfs/xfs_inode_item.c 	to->di_projid_hi = from->di_projid_hi;
to                315 fs/xfs/xfs_inode_item.c 	memset(to->di_pad, 0, sizeof(to->di_pad));
to                316 fs/xfs/xfs_inode_item.c 	memset(to->di_pad3, 0, sizeof(to->di_pad3));
to                317 fs/xfs/xfs_inode_item.c 	to->di_atime.t_sec = inode->i_atime.tv_sec;
to                318 fs/xfs/xfs_inode_item.c 	to->di_atime.t_nsec = inode->i_atime.tv_nsec;
to                319 fs/xfs/xfs_inode_item.c 	to->di_mtime.t_sec = inode->i_mtime.tv_sec;
to                320 fs/xfs/xfs_inode_item.c 	to->di_mtime.t_nsec = inode->i_mtime.tv_nsec;
to                321 fs/xfs/xfs_inode_item.c 	to->di_ctime.t_sec = inode->i_ctime.tv_sec;
to                322 fs/xfs/xfs_inode_item.c 	to->di_ctime.t_nsec = inode->i_ctime.tv_nsec;
to                323 fs/xfs/xfs_inode_item.c 	to->di_nlink = inode->i_nlink;
to                324 fs/xfs/xfs_inode_item.c 	to->di_gen = inode->i_generation;
to                325 fs/xfs/xfs_inode_item.c 	to->di_mode = inode->i_mode;
to                327 fs/xfs/xfs_inode_item.c 	to->di_size = from->di_size;
to                328 fs/xfs/xfs_inode_item.c 	to->di_nblocks = from->di_nblocks;
to                329 fs/xfs/xfs_inode_item.c 	to->di_extsize = from->di_extsize;
to                330 fs/xfs/xfs_inode_item.c 	to->di_nextents = from->di_nextents;
to                331 fs/xfs/xfs_inode_item.c 	to->di_anextents = from->di_anextents;
to                332 fs/xfs/xfs_inode_item.c 	to->di_forkoff = from->di_forkoff;
to                333 fs/xfs/xfs_inode_item.c 	to->di_aformat = from->di_aformat;
to                334 fs/xfs/xfs_inode_item.c 	to->di_dmevmask = from->di_dmevmask;
to                335 fs/xfs/xfs_inode_item.c 	to->di_dmstate = from->di_dmstate;
to                336 fs/xfs/xfs_inode_item.c 	to->di_flags = from->di_flags;
to                339 fs/xfs/xfs_inode_item.c 	to->di_next_unlinked = NULLAGINO;
to                342 fs/xfs/xfs_inode_item.c 		to->di_changecount = inode_peek_iversion(inode);
to                343 fs/xfs/xfs_inode_item.c 		to->di_crtime.t_sec = from->di_crtime.t_sec;
to                344 fs/xfs/xfs_inode_item.c 		to->di_crtime.t_nsec = from->di_crtime.t_nsec;
to                345 fs/xfs/xfs_inode_item.c 		to->di_flags2 = from->di_flags2;
to                346 fs/xfs/xfs_inode_item.c 		to->di_cowextsize = from->di_cowextsize;
to                347 fs/xfs/xfs_inode_item.c 		to->di_ino = ip->i_ino;
to                348 fs/xfs/xfs_inode_item.c 		to->di_lsn = lsn;
to                349 fs/xfs/xfs_inode_item.c 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
to                350 fs/xfs/xfs_inode_item.c 		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to                351 fs/xfs/xfs_inode_item.c 		to->di_flushiter = 0;
to                353 fs/xfs/xfs_inode_item.c 		to->di_flushiter = from->di_flushiter;
to                 29 include/asm-generic/page.h #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
to                 32 include/asm-generic/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
to                 14 include/asm-generic/uaccess.h raw_copy_from_user(void *to, const void __user * from, unsigned long n)
to                 19 include/asm-generic/uaccess.h 			*(u8 *)to = *(u8 __force *)from;
to                 22 include/asm-generic/uaccess.h 			*(u16 *)to = *(u16 __force *)from;
to                 25 include/asm-generic/uaccess.h 			*(u32 *)to = *(u32 __force *)from;
to                 29 include/asm-generic/uaccess.h 			*(u64 *)to = *(u64 __force *)from;
to                 35 include/asm-generic/uaccess.h 	memcpy(to, (const void __force *)from, n);
to                 40 include/asm-generic/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n)
to                 45 include/asm-generic/uaccess.h 			*(u8 __force *)to = *(u8 *)from;
to                 48 include/asm-generic/uaccess.h 			*(u16 __force *)to = *(u16 *)from;
to                 51 include/asm-generic/uaccess.h 			*(u32 __force *)to = *(u32 *)from;
to                 55 include/asm-generic/uaccess.h 			*(u64 __force *)to = *(u64 *)from;
to                 63 include/asm-generic/uaccess.h 	memcpy((void __force *)to, from, n);
to                264 include/asm-generic/uaccess.h __clear_user(void __user *to, unsigned long n)
to                266 include/asm-generic/uaccess.h 	memset((void __force *)to, 0, n);
to                272 include/asm-generic/uaccess.h clear_user(void __user *to, unsigned long n)
to                275 include/asm-generic/uaccess.h 	if (!access_ok(to, n))
to                278 include/asm-generic/uaccess.h 	return __clear_user(to, n);
to                 91 include/linux/assoc_array_priv.h 		struct assoc_array_ptr	*to;
to                 95 include/linux/assoc_array_priv.h 		u8			to;
to                113 include/linux/bitfield.h #define ____MAKE_OP(type,base,to,from)					\
to                118 include/linux/bitfield.h 	return to((v & field_mask(field)) * field_multiplier(field));	\
to                123 include/linux/bitfield.h 	return (old & ~to(field)) | type##_encode_bits(val, field);	\
to                128 include/linux/bitfield.h 	*p = (*p & ~to(field)) | type##_encode_bits(val, field);	\
to                689 include/linux/blk-cgroup.h static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
to                700 include/linux/blk-cgroup.h 			     &to->aux_cnt[i]);
to                237 include/linux/buffer_head.h void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
to                243 include/linux/buffer_head.h int block_commit_write(struct page *page, unsigned from, unsigned to);
to                112 include/linux/cgroup.h int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
to                204 include/linux/clocksource.h clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
to                421 include/linux/compat.h int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from);
to                422 include/linux/compat.h int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from);
to                760 include/linux/fb.h extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to);
to                761 include/linux/fb.h extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to);
to               3118 include/linux/fs.h extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
to               3338 include/linux/fs.h extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
to               3340 include/linux/fs.h extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
to                250 include/linux/highmem.h static inline void copy_user_highpage(struct page *to, struct page *from,
to                256 include/linux/highmem.h 	vto = kmap_atomic(to);
to                257 include/linux/highmem.h 	copy_user_page(vto, vfrom, vaddr, to);
to                266 include/linux/highmem.h static inline void copy_highpage(struct page *to, struct page *from)
to                271 include/linux/highmem.h 	vto = kmap_atomic(to);
to                 97 include/linux/hugetlb.h int hugetlb_reserve_pages(struct inode *inode, long from, long to,
to                113 include/linux/hwspinlock.h int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
to                282 include/linux/hwspinlock.h 				unsigned int to, unsigned long *flags)
to                284 include/linux/hwspinlock.h 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
to                305 include/linux/hwspinlock.h int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
to                307 include/linux/hwspinlock.h 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
to                328 include/linux/hwspinlock.h int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
to                330 include/linux/hwspinlock.h 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
to                350 include/linux/hwspinlock.h int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
to                352 include/linux/hwspinlock.h 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
to                375 include/linux/hwspinlock.h int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
to                377 include/linux/hwspinlock.h 	return __hwspin_lock_timeout(hwlock, to, 0, NULL);
to                 19 include/linux/io.h __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
to                 20 include/linux/io.h void __ioread32_copy(void *to, const void __iomem *from, size_t count);
to                 21 include/linux/io.h void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
to                155 include/linux/iova.h void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
to                229 include/linux/iova.h 				      struct iova_domain *to)
to                617 include/linux/kfifo.h #define	kfifo_to_user(fifo, to, len, copied) \
to                621 include/linux/kfifo.h 	void __user *__to = (to); \
to                776 include/linux/kfifo.h 	void __user *to, unsigned long len, unsigned int *copied);
to                797 include/linux/kfifo.h extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
to                167 include/linux/lockdep.h static inline void lockdep_copy_map(struct lockdep_map *to,
to                172 include/linux/lockdep.h 	*to = *from;
to                182 include/linux/lockdep.h 		to->class_cache[i] = NULL;
to               1461 include/linux/lsm_hooks.h 					struct task_struct *to);
to               1463 include/linux/lsm_hooks.h 					struct task_struct *to);
to               1465 include/linux/lsm_hooks.h 					struct task_struct *to,
to                166 include/linux/mempolicy.h 		     const nodemask_t *to, int flags);
to                286 include/linux/mempolicy.h 				   const nodemask_t *to, int flags)
to               1481 include/linux/mm.h void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
to                 48 include/linux/mtd/hyperbus.h 	void (*copy_from)(struct hyperbus_device *hbdev, void *to,
to                 50 include/linux/mtd/hyperbus.h 	void (*copy_to)(struct hyperbus_device *dev, unsigned long to,
to                430 include/linux/mtd/map.h static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
to                433 include/linux/mtd/map.h 		memcpy(to, (char *)map->cached + from, len);
to                435 include/linux/mtd/map.h 		memcpy_fromio(to, map->virt + from, len);
to                438 include/linux/mtd/map.h static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
to                440 include/linux/mtd/map.h 	memcpy_toio(map->virt + to, from, len);
to                445 include/linux/mtd/map.h #define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
to                447 include/linux/mtd/map.h #define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
to                454 include/linux/mtd/map.h #define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
to                456 include/linux/mtd/map.h #define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
to                282 include/linux/mtd/mtd.h 	int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
to                284 include/linux/mtd/mtd.h 	int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
to                288 include/linux/mtd/mtd.h 	int (*_write_oob) (struct mtd_info *mtd, loff_t to,
to                298 include/linux/mtd/mtd.h 	int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
to                303 include/linux/mtd/mtd.h 			unsigned long count, loff_t to, size_t *retlen);
to                417 include/linux/mtd/mtd.h int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
to                419 include/linux/mtd/mtd.h int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
to                423 include/linux/mtd/mtd.h int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
to                433 include/linux/mtd/mtd.h int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
to                438 include/linux/mtd/mtd.h 	       unsigned long count, loff_t to, size_t *retlen);
to                 27 include/linux/mtd/plat-ram.h 	void	(*set_rw)(struct device *dev, int to);
to                598 include/linux/mtd/spi-nor.h 	ssize_t (*write)(struct spi_nor *nor, loff_t to,
to               4144 include/linux/netdevice.h int dev_uc_sync(struct net_device *to, struct net_device *from);
to               4145 include/linux/netdevice.h int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
to               4146 include/linux/netdevice.h void dev_uc_unsync(struct net_device *to, struct net_device *from);
to               4188 include/linux/netdevice.h int dev_mc_sync(struct net_device *to, struct net_device *from);
to               4189 include/linux/netdevice.h int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
to               4190 include/linux/netdevice.h void dev_mc_unsync(struct net_device *to, struct net_device *from);
to                 18 include/linux/netfilter/ipset/ip_set_bitmap.h range_to_mask(u32 from, u32 to, u8 *bits)
to                 23 include/linux/netfilter/ipset/ip_set_bitmap.h 	while (--(*bits) > 0 && mask && (to & mask) != from)
to                 37 include/linux/netfilter/ipset/pfxlen.h extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr);
to                 39 include/linux/netfilter/ipset/pfxlen.h #define ip_set_mask_from_to(from, to, cidr)	\
to                 42 include/linux/netfilter/ipset/pfxlen.h 	to = from | ~ip_set_hostmask(cidr);	\
to                 26 include/linux/parser.h 	char *to;
to                102 include/linux/percpu.h typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
to                 26 include/linux/platform_data/usb-ohci-s3c2410.h 	void		(*power_control)(int port, int to);
to                122 include/linux/poll.h extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
to                125 include/linux/poll.h #define __MAP(v, from, to) \
to                126 include/linux/poll.h 	(from < to ? (v & from) * (to/from) : (v & from) / (from/to))
to                 55 include/linux/projid.h extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid);
to                 56 include/linux/projid.h extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid);
to                 70 include/linux/projid.h static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid)
to                 75 include/linux/projid.h static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid)
to                 77 include/linux/projid.h 	projid_t projid = from_kprojid(to, kprojid);
to                 30 include/linux/psi.h void cgroup_move_task(struct task_struct *p, struct css_set *to);
to                 55 include/linux/psi.h static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
to                 57 include/linux/psi.h 	rcu_assign_pointer(p->cgroups, to);
to                 79 include/linux/quota.h extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
to                 80 include/linux/quota.h extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
to               1690 include/linux/sched.h extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
to                254 include/linux/security.h 				struct task_struct *to);
to                256 include/linux/security.h 				    struct task_struct *to);
to                258 include/linux/security.h 				  struct task_struct *to, struct file *file);
to                490 include/linux/security.h 					      struct task_struct *to)
to                496 include/linux/security.h 						  struct task_struct *to)
to                502 include/linux/security.h 						struct task_struct *to,
to                 14 include/linux/signal.h static inline void copy_siginfo(kernel_siginfo_t *to,
to                 17 include/linux/signal.h 	memcpy(to, from, sizeof(*to));
to                 27 include/linux/signal.h int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from);
to                 28 include/linux/signal.h int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from);
to               1031 include/linux/skbuff.h bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
to               1161 include/linux/skbuff.h 			  unsigned int to, struct skb_seq_state *st);
to               1167 include/linux/skbuff.h 			   unsigned int to, struct ts_config *config);
to               1367 include/linux/skbuff.h static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to               1369 include/linux/skbuff.h 	to->hash = from->hash;
to               1370 include/linux/skbuff.h 	to->sw_hash = from->sw_hash;
to               1371 include/linux/skbuff.h 	to->l4_hash = from->l4_hash;
to               1374 include/linux/skbuff.h static inline void skb_copy_decrypted(struct sk_buff *to,
to               1378 include/linux/skbuff.h 	to->decrypted = from->decrypted;
to               3487 include/linux/skbuff.h 			   struct iov_iter *to, int size);
to               3496 include/linux/skbuff.h 			   struct iov_iter *to, int len,
to               3509 include/linux/skbuff.h int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
to               3511 include/linux/skbuff.h __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
to               3518 include/linux/skbuff.h void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
to               3520 include/linux/skbuff.h int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
to               3604 include/linux/skbuff.h 					     void *to,
to               3607 include/linux/skbuff.h 	memcpy(to, skb->data, len);
to               3611 include/linux/skbuff.h 						    const int offset, void *to,
to               3614 include/linux/skbuff.h 	memcpy(to, skb->data + offset, len);
to               4236 include/linux/skbuff.h static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
to               4238 include/linux/skbuff.h 	to->secmark = from->secmark;
to               4246 include/linux/skbuff.h static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
to               4281 include/linux/skbuff.h static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
to               4283 include/linux/skbuff.h 	to->queue_mapping = from->queue_mapping;
to                 10 include/linux/sm501.h 			    unsigned int unit, unsigned int to);
to                 16 include/linux/sonet.h extern void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to);
to                 18 include/linux/sonet.h     struct sonet_stats *to);
to                216 include/linux/string.h extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
to                204 include/linux/sunrpc/xdr.h typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
to                893 include/linux/syscalls.h 				const unsigned long __user *to);
to                487 include/linux/tcp.h int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
to                 52 include/linux/topology.h #define node_distance(from,to)	((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
to                 59 include/linux/uaccess.h __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
to                 61 include/linux/uaccess.h 	kasan_check_write(to, n);
to                 62 include/linux/uaccess.h 	check_object_size(to, n, false);
to                 63 include/linux/uaccess.h 	return raw_copy_from_user(to, from, n);
to                 67 include/linux/uaccess.h __copy_from_user(void *to, const void __user *from, unsigned long n)
to                 70 include/linux/uaccess.h 	kasan_check_write(to, n);
to                 71 include/linux/uaccess.h 	check_object_size(to, n, false);
to                 72 include/linux/uaccess.h 	return raw_copy_from_user(to, from, n);
to                 89 include/linux/uaccess.h __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
to                 93 include/linux/uaccess.h 	return raw_copy_to_user(to, from, n);
to                 97 include/linux/uaccess.h __copy_to_user(void __user *to, const void *from, unsigned long n)
to                102 include/linux/uaccess.h 	return raw_copy_to_user(to, from, n);
to                107 include/linux/uaccess.h _copy_from_user(void *to, const void __user *from, unsigned long n)
to                112 include/linux/uaccess.h 		kasan_check_write(to, n);
to                113 include/linux/uaccess.h 		res = raw_copy_from_user(to, from, n);
to                116 include/linux/uaccess.h 		memset(to + (n - res), 0, res);
to                126 include/linux/uaccess.h _copy_to_user(void __user *to, const void *from, unsigned long n)
to                129 include/linux/uaccess.h 	if (access_ok(to, n)) {
to                131 include/linux/uaccess.h 		n = raw_copy_to_user(to, from, n);
to                141 include/linux/uaccess.h copy_from_user(void *to, const void __user *from, unsigned long n)
to                143 include/linux/uaccess.h 	if (likely(check_copy_size(to, n, false)))
to                144 include/linux/uaccess.h 		n = _copy_from_user(to, from, n);
to                149 include/linux/uaccess.h copy_to_user(void __user *to, const void *from, unsigned long n)
to                152 include/linux/uaccess.h 		n = _copy_to_user(to, from, n);
to                157 include/linux/uaccess.h copy_in_user(void __user *to, const void __user *from, unsigned long n)
to                160 include/linux/uaccess.h 	if (access_ok(to, n) && access_ok(from, n))
to                161 include/linux/uaccess.h 		n = raw_copy_in_user(to, from, n);
to                226 include/linux/uaccess.h __copy_from_user_inatomic_nocache(void *to, const void __user *from,
to                229 include/linux/uaccess.h 	return __copy_from_user_inatomic(to, from, n);
to                126 include/linux/uidgid.h extern uid_t from_kuid(struct user_namespace *to, kuid_t uid);
to                127 include/linux/uidgid.h extern gid_t from_kgid(struct user_namespace *to, kgid_t gid);
to                128 include/linux/uidgid.h extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid);
to                129 include/linux/uidgid.h extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid);
to                153 include/linux/uidgid.h static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid)
to                158 include/linux/uidgid.h static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid)
to                163 include/linux/uidgid.h static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid)
to                165 include/linux/uidgid.h 	uid_t uid = from_kuid(to, kuid);
to                171 include/linux/uidgid.h static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid)
to                173 include/linux/uidgid.h 	gid_t gid = from_kgid(to, kgid);
to                 66 include/linux/userfaultfd_k.h 					unsigned long from, unsigned long to,
to                121 include/linux/userfaultfd_k.h 					       unsigned long to,
to                146 include/media/v4l2-rect.h 				   const struct v4l2_rect *to)
to                152 include/media/v4l2-rect.h 	r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
to                153 include/media/v4l2-rect.h 	r->width = ((r->width * to->width) / from->width) & ~1;
to                154 include/media/v4l2-rect.h 	r->top = ((r->top - from->top) * to->height) / from->height;
to                155 include/media/v4l2-rect.h 	r->height = (r->height * to->height) / from->height;
to                202 include/net/9p/client.h int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
to                124 include/net/checksum.h static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
to                128 include/net/checksum.h 	*sum = csum_fold(csum_add(tmp, (__force __wsum)to));
to                144 include/net/checksum.h 			      __be32 from, __be32 to, bool pseudohdr);
to                146 include/net/checksum.h 			       const __be32 *from, const __be32 *to,
to                152 include/net/checksum.h 					    __be16 from, __be16 to,
to                156 include/net/checksum.h 				 (__force __be32)to, pseudohdr);
to                137 include/net/inet_ecn.h 	__be32 from, to;
to                143 include/net/inet_ecn.h 	to = from | htonl(INET_ECN_CE << 20);
to                144 include/net/inet_ecn.h 	*(__be32 *)iph = to;
to                147 include/net/inet_ecn.h 				     (__force __wsum)to);
to                153 include/net/inet_ecn.h 	__be32 from, to;
to                159 include/net/inet_ecn.h 	to = from ^ htonl(INET_ECN_MASK << 20);
to                160 include/net/inet_ecn.h 	*(__be32 *)iph = to;
to                163 include/net/inet_ecn.h 				     (__force __wsum)to);
to                211 include/net/ip.h 		   int getfrag(void *from, char *to, int offset, int len,
to                217 include/net/ip.h int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
to                228 include/net/ip.h 			    int getfrag(void *from, char *to, int offset,
to                465 include/net/ip_tunnels.h static inline void ip_tunnel_info_opts_get(void *to,
to                468 include/net/ip_tunnels.h 	memcpy(to, info + 1, info->options_len);
to                513 include/net/ip_tunnels.h static inline void ip_tunnel_info_opts_get(void *to,
to                989 include/net/ipv6.h 		    int getfrag(void *from, char *to, int offset, int len,
to               1005 include/net/ipv6.h 			     int getfrag(void *from, char *to, int offset,
to                 71 include/net/ping.h int  ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
to                115 include/net/sctp/command.h 	enum sctp_event_timeout to;
to                156 include/net/sctp/command.h SCTP_ARG_CONSTRUCTOR(TO,	enum sctp_event_timeout, to)
to                 18 include/net/seg6.h 				     __be32 to)
to                 20 include/net/seg6.h 	__be32 diff[] = { ~from, to };
to                 26 include/net/seg6.h 				      __be32 *to)
to                 30 include/net/seg6.h 		to[0], to[1], to[2], to[3],
to               1982 include/net/sock.h 					   struct iov_iter *from, char *to,
to               1987 include/net/sock.h 		if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
to               1991 include/net/sock.h 		if (!copy_from_iter_full_nocache(to, copy, from))
to               1993 include/net/sock.h 	} else if (!copy_from_iter_full(to, copy, from))
to                384 include/net/udp.h 				  struct iov_iter *to)
to                388 include/net/udp.h 	n = copy_to_iter(skb->data + off, len, to);
to                392 include/net/udp.h 	iov_iter_revert(to, n);
to                 20 include/net/udplite.h static __inline__ int udplite_getfrag(void *from, char *to, int  offset,
to                 24 include/net/udplite.h 	return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
to                804 include/rdma/uverbs_ioctl.h static inline int _uverbs_copy_from(void *to,
to                823 include/rdma/uverbs_ioctl.h 		memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
to                824 include/rdma/uverbs_ioctl.h 	else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
to                831 include/rdma/uverbs_ioctl.h static inline int _uverbs_copy_from_or_zero(void *to,
to                845 include/rdma/uverbs_ioctl.h 		memcpy(to, &attr->ptr_attr.data, min_size);
to                846 include/rdma/uverbs_ioctl.h 	else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
to                851 include/rdma/uverbs_ioctl.h 		memset(to + min_size, 0, size - min_size);
to                856 include/rdma/uverbs_ioctl.h #define uverbs_copy_from(to, attrs_bundle, idx)				      \
to                857 include/rdma/uverbs_ioctl.h 	_uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
to                859 include/rdma/uverbs_ioctl.h #define uverbs_copy_from_or_zero(to, attrs_bundle, idx)			      \
to                860 include/rdma/uverbs_ioctl.h 	_uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to))
to                869 include/rdma/uverbs_ioctl.h int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                871 include/rdma/uverbs_ioctl.h int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                889 include/rdma/uverbs_ioctl.h int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                896 include/rdma/uverbs_ioctl.h uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                902 include/rdma/uverbs_ioctl.h uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                923 include/rdma/uverbs_ioctl.h _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
to                 88 include/sound/pcm_params.h 				      unsigned int from, unsigned int to)
to                 91 include/sound/pcm_params.h 	for (i = from; i <= to; i++)
to                 96 include/sound/pcm_params.h 					unsigned int from, unsigned int to)
to                 99 include/sound/pcm_params.h 	for (i = from; i <= to; i++)
to                 13 include/sound/wavefront.h      You will not be able to compile this file correctly without gcc, because
to                 14 include/sound/wavefront.h      it is necessary to pack the "wavefront_alias" structure to a size
to                 15 include/sound/wavefront.h      of 22 bytes, corresponding to 16-bit alignment (as would have been
to                 18 include/sound/wavefront.h      The method used to do this here ("__attribute__((packed)") is
to                 21 include/sound/wavefront.h      All other wavefront_* types end up aligned to 32 bit values and
to                889 include/trace/events/afs.h 		     enum afs_call_state to,
to                892 include/trace/events/afs.h 	    TP_ARGS(call, from, to, ret, remote_abort),
to                897 include/trace/events/afs.h 		    __field(enum afs_call_state,	to		)
to                905 include/trace/events/afs.h 		    __entry->to = to;
to                912 include/trace/events/afs.h 		      __entry->from, __entry->to,
to                194 include/trace/events/cachefiles.h 		     struct dentry *to,
to                197 include/trace/events/cachefiles.h 	    TP_ARGS(obj, de, to, why),
to                203 include/trace/events/cachefiles.h 		    __field(struct dentry *,		to		)
to                210 include/trace/events/cachefiles.h 		    __entry->to		= to;
to                215 include/trace/events/cachefiles.h 		      __entry->obj, __entry->de, __entry->to,
to               1968 include/trace/events/ext4.h 	TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
to               1971 include/trace/events/ext4.h 	TP_ARGS(inode, from, to, reverse, found, found_blk),
to               1977 include/trace/events/ext4.h 		__field(	ext4_lblk_t,	to		)
to               1987 include/trace/events/ext4.h 		__entry->to		= to;
to               1997 include/trace/events/ext4.h 		  (unsigned) __entry->from, (unsigned) __entry->to,
to               2060 include/trace/events/ext4.h 		 ext4_lblk_t from, ext4_fsblk_t to,
to               2063 include/trace/events/ext4.h 	TP_ARGS(inode, ex, from, to, pc),
to               2069 include/trace/events/ext4.h 		__field(	ext4_lblk_t,	to	)
to               2082 include/trace/events/ext4.h 		__entry->to		= to;
to               2099 include/trace/events/ext4.h 		  (unsigned) __entry->to,
to                 70 include/trace/events/power.h 		u32 to,
to                 81 include/trace/events/power.h 		to,
to                 93 include/trace/events/power.h 		__field(u32, to)
to                105 include/trace/events/power.h 		__entry->to = to;
to                117 include/trace/events/power.h 		(unsigned long)__entry->to,
to                134 include/trace/events/xdp.h #define _trace_xdp_redirect(dev, xdp, to)		\
to                135 include/trace/events/xdp.h 	 trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
to                137 include/trace/events/xdp.h #define _trace_xdp_redirect_err(dev, xdp, to, err)	\
to                138 include/trace/events/xdp.h 	 trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
to                395 include/uapi/linux/if_link.h 	__u32 to;
to                 13 include/uapi/linux/iso_fs.h #define ISODCL(from, to) (to - from + 1)
to                 22 include/uapi/linux/netfilter/xt_connbytes.h 		__aligned_u64 to;	/* count to be matched */
to               1172 include/uapi/linux/perf_event.h 	__u64	to;
to                 95 include/uapi/linux/userfaultfd.h 			__u64	to;
to                 38 ipc/compat.c   int get_compat_ipc64_perm(struct ipc64_perm *to,
to                 44 ipc/compat.c   	to->uid = v.uid;
to                 45 ipc/compat.c   	to->gid = v.gid;
to                 46 ipc/compat.c   	to->mode = v.mode;
to                 50 ipc/compat.c   int get_compat_ipc_perm(struct ipc64_perm *to,
to                 56 ipc/compat.c   	to->uid = v.uid;
to                 57 ipc/compat.c   	to->gid = v.gid;
to                 58 ipc/compat.c   	to->mode = v.mode;
to                 62 ipc/compat.c   void to_compat_ipc64_perm(struct compat_ipc64_perm *to, struct ipc64_perm *from)
to                 64 ipc/compat.c   	to->key = from->key;
to                 65 ipc/compat.c   	to->uid = from->uid;
to                 66 ipc/compat.c   	to->gid = from->gid;
to                 67 ipc/compat.c   	to->cuid = from->cuid;
to                 68 ipc/compat.c   	to->cgid = from->cgid;
to                 69 ipc/compat.c   	to->mode = from->mode;
to                 70 ipc/compat.c   	to->seq = from->seq;
to                 73 ipc/compat.c   void to_compat_ipc_perm(struct compat_ipc_perm *to, struct ipc64_perm *from)
to                 75 ipc/compat.c   	to->key = from->key;
to                 76 ipc/compat.c   	SET_UID(to->uid, from->uid);
to                 77 ipc/compat.c   	SET_GID(to->gid, from->gid);
to                 78 ipc/compat.c   	SET_UID(to->cuid, from->cuid);
to                 79 ipc/compat.c   	SET_GID(to->cgid, from->cgid);
to                 80 ipc/compat.c   	to->mode = from->mode;
to                 81 ipc/compat.c   	to->seq = from->seq;
to               1207 ipc/sem.c      		time64_t to = sma->sems[i].sem_otime;
to               1209 ipc/sem.c      		if (to > res)
to               1210 ipc/sem.c      			res = to;
to                896 kernel/bpf/core.c 	struct bpf_insn *to = to_buff;
to                926 kernel/bpf/core.c 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
to                940 kernel/bpf/core.c 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
to                941 kernel/bpf/core.c 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to                942 kernel/bpf/core.c 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
to                954 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
to                955 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to                956 kernel/bpf/core.c 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
to                974 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
to                975 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to                976 kernel/bpf/core.c 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
to                994 kernel/bpf/core.c 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
to                995 kernel/bpf/core.c 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to                996 kernel/bpf/core.c 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
to               1001 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
to               1002 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to               1003 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
to               1004 kernel/bpf/core.c 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
to               1007 kernel/bpf/core.c 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
to               1008 kernel/bpf/core.c 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to               1010 kernel/bpf/core.c 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
to               1011 kernel/bpf/core.c 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
to               1018 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
to               1019 kernel/bpf/core.c 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
to               1020 kernel/bpf/core.c 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
to               1024 kernel/bpf/core.c 	return to - to_buff;
to               2102 kernel/bpf/core.c int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
to                 98 kernel/cgroup/cgroup-v1.c int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
to                106 kernel/cgroup/cgroup-v1.c 	if (cgroup_on_dfl(to))
to                109 kernel/cgroup/cgroup-v1.c 	ret = cgroup_migrate_vet_dst(to);
to                120 kernel/cgroup/cgroup-v1.c 		cgroup_migrate_add_src(link->cset, to, &mgctx);
to                145 kernel/cgroup/cgroup-v1.c 				TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
to               1571 kernel/cgroup/cpuset.c 	nodemask_t		to;
to               1580 kernel/cgroup/cpuset.c 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
to               1586 kernel/cgroup/cpuset.c 							const nodemask_t *to)
to               1594 kernel/cgroup/cpuset.c 		mwork->to = *to;
to                374 kernel/cgroup/rdma.c 	argstr.to = value + len;
to               9394 kernel/events/core.c 			*args[0].to = 0;
to               9400 kernel/events/core.c 				*args[1].to = 0;
to                451 kernel/events/hw_breakpoint.c static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
to                454 kernel/events/hw_breakpoint.c 	to->bp_addr = from->bp_addr;
to                455 kernel/events/hw_breakpoint.c 	to->bp_type = from->bp_type;
to                456 kernel/events/hw_breakpoint.c 	to->bp_len  = from->bp_len;
to                457 kernel/events/hw_breakpoint.c 	to->disabled = from->disabled;
to               2805 kernel/futex.c 	struct hrtimer_sleeper timeout, *to;
to               2815 kernel/futex.c 	to = futex_setup_timer(abs_time, &timeout, flags,
to               2827 kernel/futex.c 	futex_wait_queue_me(hb, &q, to);
to               2835 kernel/futex.c 	if (to && !to->task)
to               2860 kernel/futex.c 	if (to) {
to               2861 kernel/futex.c 		hrtimer_cancel(&to->timer);
to               2862 kernel/futex.c 		destroy_hrtimer_on_stack(&to->timer);
to               2896 kernel/futex.c 	struct hrtimer_sleeper timeout, *to;
to               2910 kernel/futex.c 	to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
to               3002 kernel/futex.c 	if (unlikely(to))
to               3003 kernel/futex.c 		hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
to               3005 kernel/futex.c 	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
to               3059 kernel/futex.c 	if (to) {
to               3060 kernel/futex.c 		hrtimer_cancel(&to->timer);
to               3061 kernel/futex.c 		destroy_hrtimer_on_stack(&to->timer);
to               3315 kernel/futex.c 	struct hrtimer_sleeper timeout, *to;
to               3332 kernel/futex.c 	to = futex_setup_timer(abs_time, &timeout, flags,
to               3368 kernel/futex.c 	futex_wait_queue_me(hb, &q, to);
to               3371 kernel/futex.c 	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
to               3415 kernel/futex.c 		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
to               3470 kernel/futex.c 	if (to) {
to               3471 kernel/futex.c 		hrtimer_cancel(&to->timer);
to               3472 kernel/futex.c 		destroy_hrtimer_on_stack(&to->timer);
to               1849 kernel/locking/rtmutex.c 			       struct hrtimer_sleeper *to,
to               1857 kernel/locking/rtmutex.c 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
to                146 kernel/locking/rtmutex_common.h 			       struct hrtimer_sleeper *to,
to               1961 kernel/sched/core.c 			ktime_t to = NSEC_PER_SEC / HZ;
to               1964 kernel/sched/core.c 			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
to                902 kernel/sched/psi.c void cgroup_move_task(struct task_struct *task, struct css_set *to)
to                913 kernel/sched/psi.c 		rcu_assign_pointer(task->cgroups, to);
to                931 kernel/sched/psi.c 	rcu_assign_pointer(task->cgroups, to);
to               3192 kernel/signal.c int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
to               3194 kernel/signal.c 	char __user *expansion = si_expansion(to);
to               3195 kernel/signal.c 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
to               3225 kernel/signal.c static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
to               3228 kernel/signal.c 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
to               3230 kernel/signal.c 	to->si_signo = signo;
to               3231 kernel/signal.c 	return post_copy_siginfo_from_user(to, from);
to               3234 kernel/signal.c int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
to               3236 kernel/signal.c 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
to               3238 kernel/signal.c 	return post_copy_siginfo_from_user(to, from);
to               3242 kernel/signal.c int copy_siginfo_to_user32(struct compat_siginfo __user *to,
to               3246 kernel/signal.c 	return __copy_siginfo_to_user32(to, from, in_x32_syscall());
to               3248 kernel/signal.c int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
to               3327 kernel/signal.c 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
to               3333 kernel/signal.c static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
to               3336 kernel/signal.c 	clear_siginfo(to);
to               3337 kernel/signal.c 	to->si_signo = from->si_signo;
to               3338 kernel/signal.c 	to->si_errno = from->si_errno;
to               3339 kernel/signal.c 	to->si_code  = from->si_code;
to               3342 kernel/signal.c 		to->si_pid = from->si_pid;
to               3343 kernel/signal.c 		to->si_uid = from->si_uid;
to               3346 kernel/signal.c 		to->si_tid     = from->si_tid;
to               3347 kernel/signal.c 		to->si_overrun = from->si_overrun;
to               3348 kernel/signal.c 		to->si_int     = from->si_int;
to               3351 kernel/signal.c 		to->si_band = from->si_band;
to               3352 kernel/signal.c 		to->si_fd   = from->si_fd;
to               3355 kernel/signal.c 		to->si_addr = compat_ptr(from->si_addr);
to               3357 kernel/signal.c 		to->si_trapno = from->si_trapno;
to               3361 kernel/signal.c 		to->si_addr = compat_ptr(from->si_addr);
to               3363 kernel/signal.c 		to->si_trapno = from->si_trapno;
to               3365 kernel/signal.c 		to->si_addr_lsb = from->si_addr_lsb;
to               3368 kernel/signal.c 		to->si_addr = compat_ptr(from->si_addr);
to               3370 kernel/signal.c 		to->si_trapno = from->si_trapno;
to               3372 kernel/signal.c 		to->si_lower = compat_ptr(from->si_lower);
to               3373 kernel/signal.c 		to->si_upper = compat_ptr(from->si_upper);
to               3376 kernel/signal.c 		to->si_addr = compat_ptr(from->si_addr);
to               3378 kernel/signal.c 		to->si_trapno = from->si_trapno;
to               3380 kernel/signal.c 		to->si_pkey = from->si_pkey;
to               3383 kernel/signal.c 		to->si_pid    = from->si_pid;
to               3384 kernel/signal.c 		to->si_uid    = from->si_uid;
to               3385 kernel/signal.c 		to->si_status = from->si_status;
to               3388 kernel/signal.c 			to->si_utime = from->_sifields._sigchld_x32._utime;
to               3389 kernel/signal.c 			to->si_stime = from->_sifields._sigchld_x32._stime;
to               3393 kernel/signal.c 			to->si_utime = from->si_utime;
to               3394 kernel/signal.c 			to->si_stime = from->si_stime;
to               3398 kernel/signal.c 		to->si_pid = from->si_pid;
to               3399 kernel/signal.c 		to->si_uid = from->si_uid;
to               3400 kernel/signal.c 		to->si_int = from->si_int;
to               3403 kernel/signal.c 		to->si_call_addr = compat_ptr(from->si_call_addr);
to               3404 kernel/signal.c 		to->si_syscall   = from->si_syscall;
to               3405 kernel/signal.c 		to->si_arch      = from->si_arch;
to               3411 kernel/signal.c static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
to               3420 kernel/signal.c 	return post_copy_siginfo_from_user32(to, &from);
to               3423 kernel/signal.c int copy_siginfo_from_user32(struct kernel_siginfo *to,
to               3431 kernel/signal.c 	return post_copy_siginfo_from_user32(to, &from);
to               3444 kernel/signal.c 	ktime_t *to = NULL, timeout = KTIME_MAX;
to               3453 kernel/signal.c 		to = &timeout;
to               3477 kernel/signal.c 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
to                 45 kernel/time/clocksource.c clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
to                 65 kernel/time/clocksource.c 		tmp = (u64) to << sft;
to                588 kernel/trace/trace_kprobe.c static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
to                592 kernel/trace/trace_kprobe.c 	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
to                599 kernel/trace/trace_kprobe.c 	if (trace_kprobe_has_same_kprobe(to, tk)) {
to                606 kernel/trace/trace_kprobe.c 	ret = trace_probe_append(&tk->tp, &to->tp);
to                955 kernel/trace/trace_probe.c int trace_probe_append(struct trace_probe *tp, struct trace_probe *to)
to                963 kernel/trace/trace_probe.c 	tp->event = to->event;
to                964 kernel/trace/trace_probe.c 	list_add_tail(&tp->list, trace_probe_probe_list(to));
to                334 kernel/trace/trace_probe.h int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
to                438 kernel/trace/trace_uprobe.c static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
to                442 kernel/trace/trace_uprobe.c 	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
to                449 kernel/trace/trace_uprobe.c 	if (trace_uprobe_has_same_uprobe(to, tu)) {
to                456 kernel/trace/trace_uprobe.c 	ret = trace_probe_append(&tu->tp, &to->tp);
to               3374 kernel/workqueue.c static void copy_workqueue_attrs(struct workqueue_attrs *to,
to               3377 kernel/workqueue.c 	to->nice = from->nice;
to               3378 kernel/workqueue.c 	cpumask_copy(to->cpumask, from->cpumask);
to               3384 kernel/workqueue.c 	to->no_numa = from->no_numa;
to                465 lib/assoc_array.c 	edit->set[0].to = assoc_array_node_to_ptr(new_n0);
to                613 lib/assoc_array.c 	edit->set[0].to = assoc_array_node_to_ptr(new_n0);
to                750 lib/assoc_array.c 	edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
to                857 lib/assoc_array.c 		edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
to                874 lib/assoc_array.c 		edit->set[0].to = assoc_array_node_to_ptr(new_n0);
to                919 lib/assoc_array.c 		edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
to                930 lib/assoc_array.c 		edit->set_parent_slot[0].to = sc_slot;
to                932 lib/assoc_array.c 		edit->set[1].to = assoc_array_node_to_ptr(new_n0);
to               1133 lib/assoc_array.c 	edit->set[0].to = NULL;
to               1141 lib/assoc_array.c 		edit->set[1].to = NULL;
to               1243 lib/assoc_array.c 			edit->set[1].to = assoc_array_node_to_ptr(new_n0);
to               1291 lib/assoc_array.c 	edit->set[1].to = NULL;
to               1362 lib/assoc_array.c 			*edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
to               1372 lib/assoc_array.c 			*edit->set[i].ptr = edit->set[i].to;
to               1710 lib/assoc_array.c 	edit->set[0].to = new_root;
to                 19 lib/iomap_copy.c void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
to                 23 lib/iomap_copy.c 	u32 __iomem *dst = to;
to                 42 lib/iomap_copy.c void __ioread32_copy(void *to, const void __iomem *from, size_t count)
to                 44 lib/iomap_copy.c 	u32 *dst = to;
to                 63 lib/iomap_copy.c void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
to                 68 lib/iomap_copy.c 	u64 __iomem *dst = to;
to                 75 lib/iomap_copy.c 	__iowrite32_copy(to, from, count * 2);
to                138 lib/iov_iter.c static int copyout(void __user *to, const void *from, size_t n)
to                140 lib/iov_iter.c 	if (access_ok(to, n)) {
to                142 lib/iov_iter.c 		n = raw_copy_to_user(to, from, n);
to                147 lib/iov_iter.c static int copyin(void *to, const void __user *from, size_t n)
to                150 lib/iov_iter.c 		kasan_check_write(to, n);
to                151 lib/iov_iter.c 		n = raw_copy_from_user(to, from, n);
to                246 lib/iov_iter.c 	void *kaddr, *to;
to                263 lib/iov_iter.c 		to = kaddr + offset;
to                266 lib/iov_iter.c 		left = copyin(to, buf, copy);
to                269 lib/iov_iter.c 		to += copy;
to                276 lib/iov_iter.c 			left = copyin(to, buf, copy);
to                279 lib/iov_iter.c 			to += copy;
to                286 lib/iov_iter.c 		offset = to - kaddr;
to                294 lib/iov_iter.c 	to = kaddr + offset;
to                295 lib/iov_iter.c 	left = copyin(to, buf, copy);
to                298 lib/iov_iter.c 	to += copy;
to                304 lib/iov_iter.c 		left = copyin(to, buf, copy);
to                307 lib/iov_iter.c 		to += copy;
to                457 lib/iov_iter.c static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
to                460 lib/iov_iter.c 	memcpy(to, from + offset, len);
to                466 lib/iov_iter.c 	char *to = kmap_atomic(page);
to                467 lib/iov_iter.c 	memcpy(to + offset, from, len);
to                468 lib/iov_iter.c 	kunmap_atomic(to);
to                565 lib/iov_iter.c static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
to                568 lib/iov_iter.c 	__wsum next = csum_partial_copy_nocheck(from, to, len, 0);
to                622 lib/iov_iter.c static int copyout_mcsafe(void __user *to, const void *from, size_t n)
to                624 lib/iov_iter.c 	if (access_ok(to, n)) {
to                626 lib/iov_iter.c 		n = copy_to_user_mcsafe((__force void *) to, from, n);
to                635 lib/iov_iter.c 	char *to;
to                637 lib/iov_iter.c 	to = kmap_atomic(page);
to                638 lib/iov_iter.c 	ret = memcpy_mcsafe(to + offset, from, len);
to                639 lib/iov_iter.c 	kunmap_atomic(to);
to                739 lib/iov_iter.c 	char *to = addr;
to                747 lib/iov_iter.c 		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
to                748 lib/iov_iter.c 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
to                750 lib/iov_iter.c 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
to                759 lib/iov_iter.c 	char *to = addr;
to                770 lib/iov_iter.c 		if (copyin((to += v.iov_len) - v.iov_len,
to                774 lib/iov_iter.c 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
to                776 lib/iov_iter.c 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
to                786 lib/iov_iter.c 	char *to = addr;
to                792 lib/iov_iter.c 		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
to                794 lib/iov_iter.c 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
to                796 lib/iov_iter.c 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
to                820 lib/iov_iter.c 	char *to = addr;
to                826 lib/iov_iter.c 		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
to                828 lib/iov_iter.c 		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
to                830 lib/iov_iter.c 		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
to                841 lib/iov_iter.c 	char *to = addr;
to                849 lib/iov_iter.c 		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
to                853 lib/iov_iter.c 		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
to                855 lib/iov_iter.c 		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
to               1407 lib/iov_iter.c 	char *to = addr;
to               1418 lib/iov_iter.c 					       (to += v.iov_len) - v.iov_len,
to               1427 lib/iov_iter.c 		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
to               1433 lib/iov_iter.c 		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
to               1447 lib/iov_iter.c 	char *to = addr;
to               1460 lib/iov_iter.c 					       (to += v.iov_len) - v.iov_len,
to               1469 lib/iov_iter.c 		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
to               1475 lib/iov_iter.c 		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
to                236 lib/kfifo.c    static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
to                252 lib/kfifo.c    	ret = copy_to_user(to, fifo->data + off, l);
to                256 lib/kfifo.c    		ret = copy_to_user(to + l, fifo->data, len - l);
to                270 lib/kfifo.c    int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
to                284 lib/kfifo.c    	ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
to                523 lib/kfifo.c    int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
to                538 lib/kfifo.c    	ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
to                 65 lib/parser.c   			args[argc].to = s + len;
to                 69 lib/parser.c   			simple_strtol(s, &args[argc].to, 0);
to                 72 lib/parser.c   			simple_strtoul(s, &args[argc].to, 0);
to                 75 lib/parser.c   			simple_strtoul(s, &args[argc].to, 8);
to                 78 lib/parser.c   			simple_strtoul(s, &args[argc].to, 16);
to                 80 lib/parser.c   			if (args[argc].to == args[argc].from)
to                 86 lib/parser.c   		s = args[argc].to;
to                301 lib/parser.c   	size_t ret = src->to - src->from;
to                322 lib/parser.c   	return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
to                316 lib/string_helpers.c 	unsigned char to;
to                320 lib/string_helpers.c 		to = 'n';
to                323 lib/string_helpers.c 		to = 'r';
to                326 lib/string_helpers.c 		to = 't';
to                329 lib/string_helpers.c 		to = 'v';
to                332 lib/string_helpers.c 		to = 'f';
to                342 lib/string_helpers.c 		*out = to;
to                352 lib/string_helpers.c 	unsigned char to;
to                356 lib/string_helpers.c 		to = '\\';
to                359 lib/string_helpers.c 		to = 'a';
to                362 lib/string_helpers.c 		to = 'e';
to                372 lib/string_helpers.c 		*out = to;
to                  8 lib/usercopy.c unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
to                 13 lib/usercopy.c 		kasan_check_write(to, n);
to                 14 lib/usercopy.c 		res = raw_copy_from_user(to, from, n);
to                 17 lib/usercopy.c 		memset(to + (n - res), 0, res);
to                 24 lib/usercopy.c unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
to                 27 lib/usercopy.c 	if (likely(access_ok(to, n))) {
to                 29 lib/usercopy.c 		n = raw_copy_to_user(to, from, n);
to                245 mm/hugetlb.c   	long to;
to                271 mm/hugetlb.c   		if (f <= rg->to)
to                289 mm/hugetlb.c   		nrg->to = t;
to                311 mm/hugetlb.c   		if (rg->to > t)
to                312 mm/hugetlb.c   			t = rg->to;
to                318 mm/hugetlb.c   			add -= (rg->to - rg->from);
to                326 mm/hugetlb.c   	add += t - nrg->to;		/* Added to end of region */
to                327 mm/hugetlb.c   	nrg->to = t;
to                395 mm/hugetlb.c   		if (f <= rg->to)
to                410 mm/hugetlb.c   			nrg->to   = f;
to                435 mm/hugetlb.c   		if (rg->to > t) {
to                436 mm/hugetlb.c   			chg += rg->to - t;
to                437 mm/hugetlb.c   			t = rg->to;
to                439 mm/hugetlb.c   		chg -= rg->to - rg->from;
to                502 mm/hugetlb.c   		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
to                508 mm/hugetlb.c   		if (f > rg->from && t < rg->to) { /* Must split region */
to                534 mm/hugetlb.c   			nrg->to = rg->to;
to                538 mm/hugetlb.c   			rg->to = f;
to                545 mm/hugetlb.c   		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
to                546 mm/hugetlb.c   			del += rg->to - rg->from;
to                556 mm/hugetlb.c   			del += rg->to - f;
to                557 mm/hugetlb.c   			rg->to = f;
to                604 mm/hugetlb.c   		if (rg->to <= f)
to                610 mm/hugetlb.c   		seg_to = min(rg->to, t);
to               4646 mm/hugetlb.c   					long from, long to,
to               4657 mm/hugetlb.c   	if (from > to) {
to               4684 mm/hugetlb.c   		chg = region_chg(resv_map, from, to);
to               4691 mm/hugetlb.c   		chg = to - from;
to               4736 mm/hugetlb.c   		long add = region_add(resv_map, from, to);
to               4758 mm/hugetlb.c   			region_abort(resv_map, from, to);
to                 73 mm/kasan/quarantine.c static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
to                 78 mm/kasan/quarantine.c 	if (qlist_empty(to)) {
to                 79 mm/kasan/quarantine.c 		*to = *from;
to                 84 mm/kasan/quarantine.c 	to->tail->next = from->head;
to                 85 mm/kasan/quarantine.c 	to->tail = from->tail;
to                 86 mm/kasan/quarantine.c 	to->bytes += from->bytes;
to                265 mm/kasan/quarantine.c 				   struct qlist_head *to,
to                280 mm/kasan/quarantine.c 			qlist_put(to, curr, obj_cache->size);
to                192 mm/memcontrol.c 	struct mem_cgroup *to;
to               1358 mm/memcontrol.c 	struct mem_cgroup *to;
to               1366 mm/memcontrol.c 	to = mc.to;
to               1371 mm/memcontrol.c 		mem_cgroup_is_descendant(to, memcg);
to               3146 mm/memcontrol.c 				struct mem_cgroup *from, struct mem_cgroup *to)
to               3151 mm/memcontrol.c 	new_id = mem_cgroup_id(to);
to               3155 mm/memcontrol.c 		mod_memcg_state(to, MEMCG_SWAP, 1);
to               3162 mm/memcontrol.c 				struct mem_cgroup *from, struct mem_cgroup *to)
to               5343 mm/memcontrol.c 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
to               5351 mm/memcontrol.c 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
to               5486 mm/memcontrol.c 				   struct mem_cgroup *to)
to               5495 mm/memcontrol.c 	VM_BUG_ON(from == to);
to               5515 mm/memcontrol.c 	to_vec = mem_cgroup_lruvec(pgdat, to);
to               5550 mm/memcontrol.c 	page->mem_cgroup = to;
to               5557 mm/memcontrol.c 	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
to               5558 mm/memcontrol.c 	memcg_check_events(to, page);
to               5741 mm/memcontrol.c 	struct mem_cgroup *to = mc.to;
to               5745 mm/memcontrol.c 		cancel_charge(mc.to, mc.precharge);
to               5768 mm/memcontrol.c 		if (!mem_cgroup_is_root(mc.to))
to               5769 mm/memcontrol.c 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
to               5771 mm/memcontrol.c 		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
to               5772 mm/memcontrol.c 		css_put_many(&mc.to->css, mc.moved_swap);
to               5777 mm/memcontrol.c 	memcg_oom_recover(to);
to               5793 mm/memcontrol.c 	mc.to = NULL;
to               5848 mm/memcontrol.c 		VM_BUG_ON(mc.to);
to               5856 mm/memcontrol.c 		mc.to = memcg;
to               5872 mm/memcontrol.c 	if (mc.to)
to               5899 mm/memcontrol.c 							     mc.from, mc.to)) {
to               5909 mm/memcontrol.c 						     mc.from, mc.to)) {
to               5948 mm/memcontrol.c 						mc.from, mc.to)) {
to               5960 mm/memcontrol.c 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
to               6028 mm/memcontrol.c 	if (mc.to) {
to               1060 mm/mempolicy.c 		     const nodemask_t *to, int flags)
to               1126 mm/mempolicy.c 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
to               1127 mm/mempolicy.c 						(node_isset(s, *to)))
to               1130 mm/mempolicy.c 			d = node_remap(s, *from, *to);
to               1206 mm/mempolicy.c 		     const nodemask_t *to, int flags)
to                532 mm/nommu.c     static void free_page_series(unsigned long from, unsigned long to)
to                534 mm/nommu.c     	for (; from < to; from += PAGE_SIZE) {
to               1448 mm/nommu.c     		      unsigned long from, unsigned long to)
to               1458 mm/nommu.c     		vma->vm_start = to;
to               1468 mm/nommu.c     		to = region->vm_top;
to               1471 mm/nommu.c     		region->vm_start = to;
to               1476 mm/nommu.c     	free_page_series(from, to);
to               2529 mm/shmem.c     static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
to               2546 mm/shmem.c     	if (!iter_is_iovec(to))
to               2618 mm/shmem.c     		ret = copy_page_to_iter(page, offset, nr, to);
to               2625 mm/shmem.c     		if (!iov_iter_count(to))
to                574 mm/slab.c      static int transfer_objects(struct array_cache *to,
to                578 mm/slab.c      	int nr = min3(from->avail, max, to->limit - to->avail);
to                583 mm/slab.c      	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
to                587 mm/slab.c      	to->avail += nr;
to                727 mm/slub.c      						void *from, void *to)
to                729 mm/slub.c      	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
to                730 mm/slub.c      	memset(from, data, to - from);
to                865 mm/truncate.c  void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
to                872 mm/truncate.c  	WARN_ON(to > inode->i_size);
to                874 mm/truncate.c  	if (from >= to || bsize == PAGE_SIZE)
to                878 mm/truncate.c  	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
to                122 net/8021q/vlan_netlink.c 			vlan_dev_set_ingress_priority(dev, m->to, m->from);
to                128 net/8021q/vlan_netlink.c 			err = vlan_dev_set_egress_priority(dev, m->from, m->to);
to                241 net/8021q/vlan_netlink.c 			m.to   = vlan->ingress_priority_map[i];
to                261 net/8021q/vlan_netlink.c 				m.to   = (pm->vlan_qos >> 13) & 0x7;
to               1550 net/9p/client.c p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
to               1558 net/9p/client.c 		   fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
to               1560 net/9p/client.c 	while (iov_iter_count(to)) {
to               1561 net/9p/client.c 		int count = iov_iter_count(to);
to               1578 net/9p/client.c 			req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
to               1610 net/9p/client.c 			int n = copy_to_iter(dataptr, count, to);
to               1619 net/9p/client.c 			iov_iter_advance(to, count);
to               2074 net/9p/client.c 	struct iov_iter to;
to               2076 net/9p/client.c 	iov_iter_kvec(&to, READ, &kv, 1, count);
to               2097 net/9p/client.c 		req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0,
to                 87 net/appletalk/ddp.c static struct sock *atalk_search_socket(struct sockaddr_at *to,
to                 96 net/appletalk/ddp.c 		if (to->sat_port != at->src_port)
to                 99 net/appletalk/ddp.c 		if (to->sat_addr.s_net == ATADDR_ANYNET &&
to                100 net/appletalk/ddp.c 		    to->sat_addr.s_node == ATADDR_BCAST)
to                103 net/appletalk/ddp.c 		if (to->sat_addr.s_net == at->src_net &&
to                104 net/appletalk/ddp.c 		    (to->sat_addr.s_node == at->src_node ||
to                105 net/appletalk/ddp.c 		     to->sat_addr.s_node == ATADDR_BCAST ||
to                106 net/appletalk/ddp.c 		     to->sat_addr.s_node == ATADDR_ANYNODE))
to                111 net/appletalk/ddp.c 		if (to->sat_addr.s_node == ATADDR_ANYNODE &&
to                112 net/appletalk/ddp.c 		    to->sat_addr.s_net != ATADDR_ANYNET &&
to                114 net/appletalk/ddp.c 			to->sat_addr.s_node = atif->address.s_node;
to                 88 net/atm/atm_misc.c void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
to                 90 net/atm/atm_misc.c #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
to                 96 net/atm/atm_misc.c void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
to                 98 net/atm/atm_misc.c #define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
to                162 net/atm/resources.c     struct atm_aal_stats *to)
to                164 net/atm/resources.c #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
to                170 net/atm/resources.c     struct atm_aal_stats *to)
to                172 net/atm/resources.c #define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
to                619 net/batman-adv/main.c 	unsigned int to = skb->len;
to                627 net/batman-adv/main.c 	skb_prepare_seq_read(skb, from, to, &st);
to               1484 net/bluetooth/hci_request.c 		int to;
to               1508 net/bluetooth/hci_request.c 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
to               1511 net/bluetooth/hci_request.c 					   &adv_instance->rpa_expired_cb, to);
to               1514 net/bluetooth/hci_request.c 					   &hdev->rpa_expired, to);
to               1909 net/bluetooth/hci_request.c 		int to;
to               1925 net/bluetooth/hci_request.c 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
to               1926 net/bluetooth/hci_request.c 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
to               1320 net/bluetooth/mgmt.c 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
to               1321 net/bluetooth/mgmt.c 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
to               1417 net/bluetooth/mgmt.c 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
to               1419 net/bluetooth/mgmt.c 					   &hdev->discov_off, to);
to                194 net/bpf/test_run.c static inline bool range_is_zero(void *buf, size_t from, size_t to)
to                196 net/bpf/test_run.c 	return !memchr_inv((u8 *)buf + from, 0, to - from);
to                 72 net/bridge/br_forward.c static void __br_forward(const struct net_bridge_port *to,
to                 80 net/bridge/br_forward.c 	vg = nbp_vlan_group_rcu(to);
to                 81 net/bridge/br_forward.c 	skb = br_handle_vlan(to->br, to, vg, skb);
to                 86 net/bridge/br_forward.c 	skb->dev = to->dev;
to                 96 net/bridge/br_forward.c 		if (unlikely(netpoll_tx_running(to->br->dev))) {
to                101 net/bridge/br_forward.c 				br_netpoll_send_skb(to, skb);
to                138 net/bridge/br_forward.c void br_forward(const struct net_bridge_port *to,
to                141 net/bridge/br_forward.c 	if (unlikely(!to))
to                145 net/bridge/br_forward.c 	if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
to                148 net/bridge/br_forward.c 		backup_port = rcu_dereference(to->backup_port);
to                151 net/bridge/br_forward.c 		to = backup_port;
to                154 net/bridge/br_forward.c 	if (should_deliver(to, skb)) {
to                156 net/bridge/br_forward.c 			deliver_clone(to, skb, local_orig);
to                158 net/bridge/br_forward.c 			__br_forward(to, skb, local_orig);
to                599 net/bridge/br_private.h void br_forward(const struct net_bridge_port *to, struct sk_buff *skb,
to                606 net/bridge/br_private.h static inline bool br_skb_isolated(const struct net_bridge_port *to,
to                610 net/bridge/br_private.h 	       (to->flags & BR_ISOLATED);
to                176 net/caif/cfpkt_skbuff.c 	u8 *to;
to                204 net/caif/cfpkt_skbuff.c 	to = pskb_put(skb, lastskb, len);
to                206 net/caif/cfpkt_skbuff.c 		memcpy(to, data, len);
to                219 net/caif/cfpkt_skbuff.c 	u8 *to;
to                236 net/caif/cfpkt_skbuff.c 	to = skb_push(skb, len);
to                237 net/caif/cfpkt_skbuff.c 	memcpy(to, data, len);
to                228 net/can/gw.c   static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r)
to                244 net/can/gw.c   	    to >= -dlen && to < dlen &&
to                262 net/can/gw.c   	int to = calc_idx(xor->to_idx, cf->len);
to                267 net/can/gw.c   	if (from < 0 || to < 0 || res < 0)
to                270 net/can/gw.c   	if (from <= to) {
to                271 net/can/gw.c   		for (i = from; i <= to; i++)
to                274 net/can/gw.c   		for (i = from; i >= to; i--)
to                307 net/can/gw.c   	int to = calc_idx(crc8->to_idx, cf->len);
to                312 net/can/gw.c   	if (from < 0 || to < 0 || res < 0)
to                315 net/can/gw.c   	if (from <= to) {
to                432 net/ceph/ceph_common.c 					     argstr[0].to,
to                448 net/ceph/ceph_common.c 					      argstr[0].to-argstr[0].from,
to               2412 net/ceph/osdmap.c 			int to = pg->pg_upmap_items.from_to[i][1];
to               2420 net/ceph/osdmap.c 				if (osd == to) {
to               2426 net/ceph/osdmap.c 				    !(to != CRUSH_ITEM_NONE &&
to               2427 net/ceph/osdmap.c 				      to < osdmap->max_osd &&
to               2428 net/ceph/osdmap.c 				      osdmap->osd_weight[to] == 0)) {
to               2433 net/ceph/osdmap.c 				raw->osds[pos] = to;
to                417 net/core/datagram.c 			       struct iov_iter *to, int len, bool fault_short,
to                430 net/core/datagram.c 				    skb->data + offset, copy, data, to);
to                454 net/core/datagram.c 					copy, data, to);
to                475 net/core/datagram.c 						to, copy, fault_short, cb, data))
to                492 net/core/datagram.c 	iov_iter_revert(to, offset - start_off);
to                496 net/core/datagram.c 	if (fault_short || iov_iter_count(to))
to                512 net/core/datagram.c 			   struct iov_iter *to, int len,
to                515 net/core/datagram.c 	return __skb_datagram_iter(skb, offset, to, len, true,
to                534 net/core/datagram.c 			   struct iov_iter *to, int len)
to                537 net/core/datagram.c 	return __skb_datagram_iter(skb, offset, to, len, false,
to                700 net/core/datagram.c 				      struct iov_iter *to, int len,
to                703 net/core/datagram.c 	return __skb_datagram_iter(skb, offset, to, len, true,
to                633 net/core/dev_addr_lists.c int dev_uc_sync(struct net_device *to, struct net_device *from)
to                637 net/core/dev_addr_lists.c 	if (to->addr_len != from->addr_len)
to                640 net/core/dev_addr_lists.c 	netif_addr_lock(to);
to                641 net/core/dev_addr_lists.c 	err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
to                643 net/core/dev_addr_lists.c 		__dev_set_rx_mode(to);
to                644 net/core/dev_addr_lists.c 	netif_addr_unlock(to);
to                663 net/core/dev_addr_lists.c int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
to                667 net/core/dev_addr_lists.c 	if (to->addr_len != from->addr_len)
to                670 net/core/dev_addr_lists.c 	netif_addr_lock(to);
to                671 net/core/dev_addr_lists.c 	err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
to                673 net/core/dev_addr_lists.c 		__dev_set_rx_mode(to);
to                674 net/core/dev_addr_lists.c 	netif_addr_unlock(to);
to                688 net/core/dev_addr_lists.c void dev_uc_unsync(struct net_device *to, struct net_device *from)
to                690 net/core/dev_addr_lists.c 	if (to->addr_len != from->addr_len)
to                694 net/core/dev_addr_lists.c 	netif_addr_lock(to);
to                695 net/core/dev_addr_lists.c 	__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
to                696 net/core/dev_addr_lists.c 	__dev_set_rx_mode(to);
to                697 net/core/dev_addr_lists.c 	netif_addr_unlock(to);
to                854 net/core/dev_addr_lists.c int dev_mc_sync(struct net_device *to, struct net_device *from)
to                858 net/core/dev_addr_lists.c 	if (to->addr_len != from->addr_len)
to                861 net/core/dev_addr_lists.c 	netif_addr_lock(to);
to                862 net/core/dev_addr_lists.c 	err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
to                864 net/core/dev_addr_lists.c 		__dev_set_rx_mode(to);
to                865 net/core/dev_addr_lists.c 	netif_addr_unlock(to);
to                884 net/core/dev_addr_lists.c int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
to                888 net/core/dev_addr_lists.c 	if (to->addr_len != from->addr_len)
to                891 net/core/dev_addr_lists.c 	netif_addr_lock(to);
to                892 net/core/dev_addr_lists.c 	err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
to                894 net/core/dev_addr_lists.c 		__dev_set_rx_mode(to);
to                895 net/core/dev_addr_lists.c 	netif_addr_unlock(to);
to                909 net/core/dev_addr_lists.c void dev_mc_unsync(struct net_device *to, struct net_device *from)
to                911 net/core/dev_addr_lists.c 	if (to->addr_len != from->addr_len)
to                915 net/core/dev_addr_lists.c 	netif_addr_lock(to);
to                916 net/core/dev_addr_lists.c 	__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
to                917 net/core/dev_addr_lists.c 	__dev_set_rx_mode(to);
to                918 net/core/dev_addr_lists.c 	netif_addr_unlock(to);
to                560 net/core/ethtool.c static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to,
to                568 net/core/ethtool.c 	memcpy(&to->base, &link_usettings.base, sizeof(to->base));
to                569 net/core/ethtool.c 	bitmap_from_arr32(to->link_modes.supported,
to                572 net/core/ethtool.c 	bitmap_from_arr32(to->link_modes.advertising,
to                575 net/core/ethtool.c 	bitmap_from_arr32(to->link_modes.lp_advertising,
to                587 net/core/ethtool.c store_link_ksettings_for_user(void __user *to,
to                603 net/core/ethtool.c 	if (copy_to_user(to, &link_usettings, sizeof(link_usettings)))
to               1702 net/core/filter.c 	   void *, to, u32, len)
to               1709 net/core/filter.c 	ptr = skb_header_pointer(skb, offset, len, to);
to               1712 net/core/filter.c 	if (ptr != to)
to               1713 net/core/filter.c 		memcpy(to, ptr, len);
to               1717 net/core/filter.c 	memset(to, 0, len);
to               1733 net/core/filter.c 	   void *, to, u32, len)
to               1743 net/core/filter.c 	ptr = skb_header_pointer(ctx->skb, offset, len, to);
to               1746 net/core/filter.c 	if (ptr != to)
to               1747 net/core/filter.c 		memcpy(to, ptr, len);
to               1751 net/core/filter.c 	memset(to, 0, len);
to               1766 net/core/filter.c 	   u32, offset, void *, to, u32, len, u32, start_header)
to               1788 net/core/filter.c 		memcpy(to, ptr, len);
to               1793 net/core/filter.c 	memset(to, 0, len);
to               1874 net/core/filter.c 	   u64, from, u64, to, u64, flags)
to               1891 net/core/filter.c 		csum_replace_by_diff(ptr, to);
to               1894 net/core/filter.c 		csum_replace2(ptr, from, to);
to               1897 net/core/filter.c 		csum_replace4(ptr, from, to);
to               1918 net/core/filter.c 	   u64, from, u64, to, u64, flags)
to               1942 net/core/filter.c 		inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
to               1945 net/core/filter.c 		inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
to               1948 net/core/filter.c 		inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
to               1971 net/core/filter.c 	   __be32 *, to, u32, to_size, __wsum, seed)
to               1992 net/core/filter.c 		sp->diff[j] = to[i];
to               2225 net/core/filter.c 	u8 *raw, *to, *from;
to               2284 net/core/filter.c 		to = raw + poffset;
to               2286 net/core/filter.c 		memcpy(to, from, len);
to               2350 net/core/filter.c 	u8 *raw, *to, *from;
to               2402 net/core/filter.c 			to = raw + front + len;
to               2404 net/core/filter.c 			memcpy(to, from, back);
to               2574 net/core/filter.c 				u8 *to, *from;
to               2585 net/core/filter.c 				to = page_address(page);
to               2586 net/core/filter.c 				memcpy(to, from, a);
to               2587 net/core/filter.c 				memcpy(to + a, from + a + pop, b);
to               3826 net/core/filter.c BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
to               3831 net/core/filter.c 	void *to_orig = to;
to               3855 net/core/filter.c 			to = (struct bpf_tunnel_key *)compat;
to               3862 net/core/filter.c 	to->tunnel_id = be64_to_cpu(info->key.tun_id);
to               3863 net/core/filter.c 	to->tunnel_tos = info->key.tos;
to               3864 net/core/filter.c 	to->tunnel_ttl = info->key.ttl;
to               3865 net/core/filter.c 	to->tunnel_ext = 0;
to               3868 net/core/filter.c 		memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
to               3869 net/core/filter.c 		       sizeof(to->remote_ipv6));
to               3870 net/core/filter.c 		to->tunnel_label = be32_to_cpu(info->key.label);
to               3872 net/core/filter.c 		to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
to               3873 net/core/filter.c 		memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
to               3874 net/core/filter.c 		to->tunnel_label = 0;
to               3878 net/core/filter.c 		memcpy(to_orig, to, size);
to               3896 net/core/filter.c BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
to               3911 net/core/filter.c 	ip_tunnel_info_opts_get(to, info);
to               3913 net/core/filter.c 		memset(to + info->options_len, 0, size - info->options_len);
to               3917 net/core/filter.c 	memset(to, 0, size);
to               4554 net/core/filter.c 	   struct bpf_xfrm_state *, to, u32, size, u64, flags)
to               4567 net/core/filter.c 	to->reqid = x->props.reqid;
to               4568 net/core/filter.c 	to->spi = x->id.spi;
to               4569 net/core/filter.c 	to->family = x->props.family;
to               4570 net/core/filter.c 	to->ext = 0;
to               4572 net/core/filter.c 	if (to->family == AF_INET6) {
to               4573 net/core/filter.c 		memcpy(to->remote_ipv6, x->props.saddr.a6,
to               4574 net/core/filter.c 		       sizeof(to->remote_ipv6));
to               4576 net/core/filter.c 		to->remote_ipv4 = x->props.saddr.a4;
to               4577 net/core/filter.c 		memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
to               4582 net/core/filter.c 	memset(to, 0, size);
to               8772 net/core/filter.c 	   void *, to, u32, len)
to               8774 net/core/filter.c 	return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
to               8789 net/core/filter.c 	   void *, to, u32, len, u32, start_header)
to               8791 net/core/filter.c 	return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
to               2188 net/core/skbuff.c int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to               2201 net/core/skbuff.c 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
to               2205 net/core/skbuff.c 		to     += copy;
to               2227 net/core/skbuff.c 				memcpy(to + copied, vaddr + p_off, p_len);
to               2234 net/core/skbuff.c 			to     += copy;
to               2248 net/core/skbuff.c 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
to               2253 net/core/skbuff.c 			to     += copy;
to               2727 net/core/skbuff.c 				    u8 *to, int len, __wsum csum)
to               2738 net/core/skbuff.c 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
to               2743 net/core/skbuff.c 		to     += copy;
to               2768 net/core/skbuff.c 								  to + copied,
to               2778 net/core/skbuff.c 			to     += copy;
to               2795 net/core/skbuff.c 						       to, copy, 0);
to               2800 net/core/skbuff.c 			to     += copy;
to               2938 net/core/skbuff.c skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
to               2949 net/core/skbuff.c 	if (len <= skb_tailroom(to))
to               2950 net/core/skbuff.c 		return skb_copy_bits(from, 0, skb_put(to, len), len);
to               2953 net/core/skbuff.c 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
to               2962 net/core/skbuff.c 			__skb_fill_page_desc(to, 0, page, offset, plen);
to               2969 net/core/skbuff.c 	to->truesize += len + plen;
to               2970 net/core/skbuff.c 	to->len += len + plen;
to               2971 net/core/skbuff.c 	to->data_len += len + plen;
to               2977 net/core/skbuff.c 	skb_zerocopy_clone(to, from, GFP_ATOMIC);
to               2984 net/core/skbuff.c 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
to               2985 net/core/skbuff.c 		size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
to               2987 net/core/skbuff.c 		skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
to               2989 net/core/skbuff.c 		skb_frag_ref(to, j);
to               2992 net/core/skbuff.c 	skb_shinfo(to)->nr_frags = j;
to               2998 net/core/skbuff.c void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
to               3010 net/core/skbuff.c 	skb_copy_from_linear_data(skb, to, csstart);
to               3014 net/core/skbuff.c 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
to               3020 net/core/skbuff.c 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
to               3301 net/core/skbuff.c 	int from, to, merge, todo;
to               3313 net/core/skbuff.c 	to = skb_shinfo(tgt)->nr_frags;
to               3319 net/core/skbuff.c 	if (!to ||
to               3320 net/core/skbuff.c 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
to               3324 net/core/skbuff.c 		merge = to - 1;
to               3348 net/core/skbuff.c 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
to               3355 net/core/skbuff.c 		if (to == MAX_SKB_FRAGS)
to               3359 net/core/skbuff.c 		fragto = &skb_shinfo(tgt)->frags[to];
to               3365 net/core/skbuff.c 			to++;
to               3377 net/core/skbuff.c 			to++;
to               3383 net/core/skbuff.c 	skb_shinfo(tgt)->nr_frags = to;
to               3394 net/core/skbuff.c 	to = 0;
to               3396 net/core/skbuff.c 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
to               3397 net/core/skbuff.c 	skb_shinfo(skb)->nr_frags = to;
to               3430 net/core/skbuff.c 			  unsigned int to, struct skb_seq_state *st)
to               3433 net/core/skbuff.c 	st->upper_offset = to;
to               3573 net/core/skbuff.c 			   unsigned int to, struct ts_config *config)
to               3581 net/core/skbuff.c 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
to               3584 net/core/skbuff.c 	return (ret <= to - from ? ret : UINT_MAX);
to               5026 net/core/skbuff.c bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
to               5034 net/core/skbuff.c 	if (skb_cloned(to))
to               5037 net/core/skbuff.c 	if (len <= skb_tailroom(to)) {
to               5039 net/core/skbuff.c 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
to               5044 net/core/skbuff.c 	to_shinfo = skb_shinfo(to);
to               5048 net/core/skbuff.c 	if (skb_zcopy(to) || skb_zcopy(from))
to               5067 net/core/skbuff.c 		skb_fill_page_desc(to, to_shinfo->nr_frags,
to               5094 net/core/skbuff.c 	to->truesize += delta;
to               5095 net/core/skbuff.c 	to->len += len;
to               5096 net/core/skbuff.c 	to->data_len += len;
to                362 net/core/skmsg.c 	void *to;
to                377 net/core/skmsg.c 		to = sg_virt(sge) + msg->sg.copybreak;
to                380 net/core/skmsg.c 			ret = copy_from_iter_nocache(to, copy, from);
to                382 net/core/skmsg.c 			ret = copy_from_iter(to, copy, from);
to                426 net/core/utils.c 			      __be32 from, __be32 to, bool pseudohdr)
to                429 net/core/utils.c 		csum_replace4(sum, from, to);
to                433 net/core/utils.c 					      (__force __wsum)to);
to                437 net/core/utils.c 					   (__force __wsum)to));
to                459 net/core/utils.c 			       const __be32 *from, const __be32 *to,
to                464 net/core/utils.c 		to[0], to[1], to[2], to[3],
to                550 net/dccp/feat.c int dccp_feat_clone_list(struct list_head const *from, struct list_head *to)
to                554 net/dccp/feat.c 	INIT_LIST_HEAD(to);
to                559 net/dccp/feat.c 		list_add_tail(&new->node, to);
to                564 net/dccp/feat.c 	dccp_feat_list_purge(to);
to                127 net/dccp/feat.h void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
to                265 net/dccp/options.c void dccp_encode_value_var(const u64 value, u8 *to, const u8 len)
to                268 net/dccp/options.c 		*to++ = (value & 0xFF0000000000ull) >> 40;
to                270 net/dccp/options.c 		*to++ = (value & 0xFF00000000ull) >> 32;
to                272 net/dccp/options.c 		*to++ = (value & 0xFF000000) >> 24;
to                274 net/dccp/options.c 		*to++ = (value & 0xFF0000) >> 16;
to                276 net/dccp/options.c 		*to++ = (value & 0xFF00) >> 8;
to                278 net/dccp/options.c 		*to++ = (value & 0xFF);
to                291 net/dccp/options.c 	unsigned char *to;
to                298 net/dccp/options.c 	to    = skb_push(skb, len + 2);
to                299 net/dccp/options.c 	*to++ = option;
to                300 net/dccp/options.c 	*to++ = len + 2;
to                302 net/dccp/options.c 	memcpy(to, value, len);
to                356 net/dccp/options.c 	unsigned char *to;
to                377 net/dccp/options.c 	to    = skb_push(skb, len);
to                378 net/dccp/options.c 	*to++ = DCCPO_TIMESTAMP_ECHO;
to                379 net/dccp/options.c 	*to++ = len;
to                381 net/dccp/options.c 	memcpy(to, &tstamp_echo, 4);
to                382 net/dccp/options.c 	to += 4;
to                386 net/dccp/options.c 		memcpy(to, &var16, 2);
to                389 net/dccp/options.c 		memcpy(to, &var32, 4);
to                406 net/dccp/options.c 	unsigned char *to;
to                428 net/dccp/options.c 	to   = skb_push(skb, len);
to                445 net/dccp/options.c 		*to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
to                446 net/dccp/options.c 		*to++ = copylen + 2;
to                452 net/dccp/options.c 			memcpy(to, from, tailsize);
to                453 net/dccp/options.c 			to	+= tailsize;
to                459 net/dccp/options.c 		memcpy(to, from, copylen);
to                461 net/dccp/options.c 		to   += copylen;
to                503 net/dccp/options.c 	u8 tot_len, *to;
to                521 net/dccp/options.c 	to    = skb_push(skb, tot_len);
to                522 net/dccp/options.c 	*to++ = type;
to                523 net/dccp/options.c 	*to++ = tot_len;
to                524 net/dccp/options.c 	*to++ = feat;
to                527 net/dccp/options.c 		*to++ = *val;
to                529 net/dccp/options.c 		memcpy(to, val, len);
to                347 net/ipv4/icmp.c static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
to                355 net/ipv4/icmp.c 				      to, len, 0);
to                544 net/ipv4/ip_output.c static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to                546 net/ipv4/ip_output.c 	to->pkt_type = from->pkt_type;
to                547 net/ipv4/ip_output.c 	to->priority = from->priority;
to                548 net/ipv4/ip_output.c 	to->protocol = from->protocol;
to                549 net/ipv4/ip_output.c 	to->skb_iif = from->skb_iif;
to                550 net/ipv4/ip_output.c 	skb_dst_drop(to);
to                551 net/ipv4/ip_output.c 	skb_dst_copy(to, from);
to                552 net/ipv4/ip_output.c 	to->dev = from->dev;
to                553 net/ipv4/ip_output.c 	to->mark = from->mark;
to                555 net/ipv4/ip_output.c 	skb_copy_hash(to, from);
to                558 net/ipv4/ip_output.c 	to->tc_index = from->tc_index;
to                560 net/ipv4/ip_output.c 	nf_copy(to, from);
to                561 net/ipv4/ip_output.c 	skb_ext_copy(to, from);
to                563 net/ipv4/ip_output.c 	to->ipvs_property = from->ipvs_property;
to                565 net/ipv4/ip_output.c 	skb_copy_secmark(to, from);
to                613 net/ipv4/ip_output.c 	struct sk_buff *to = iter->frag;
to                616 net/ipv4/ip_output.c 	IPCB(to)->flags = IPCB(skb)->flags;
to                619 net/ipv4/ip_output.c 		ip_options_fragment(to);
to                666 net/ipv4/ip_output.c static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
to                670 net/ipv4/ip_output.c 	IPCB(to)->flags = IPCB(from)->flags;
to                925 net/ipv4/ip_output.c ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
to                930 net/ipv4/ip_output.c 		if (!copy_from_iter_full(to, len, &msg->msg_iter))
to                934 net/ipv4/ip_output.c 		if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
to                958 net/ipv4/ip_output.c 			    int getfrag(void *from, char *to, int offset,
to               1297 net/ipv4/ip_output.c 		   int getfrag(void *from, char *to, int offset, int len,
to               1607 net/ipv4/ip_output.c 			    int getfrag(void *from, char *to, int offset,
to               1642 net/ipv4/ip_output.c static int ip_reply_glue_bits(void *dptr, char *to, int offset,
to               1647 net/ipv4/ip_output.c 	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
to                 64 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	__be32 to;
to                 74 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		memcpy(&s[7], &ctx->to, 4);
to                 84 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		memcpy(&s[4], &ctx->to, 4);
to                116 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 			 (void *)&ctx->from, (void *)&ctx->to);
to                120 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		*pdata = ctx->to;
to                137 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		ctx.to = ct->tuplehash[!dir].tuple.dst.u3.ip;
to                140 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		ctx.to = ct->tuplehash[dir].tuple.dst.u3.ip;
to                143 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	if (ctx.from == ctx.to)
to                597 net/ipv4/ping.c int ping_getfrag(void *from, char *to,
to                606 net/ipv4/ping.c 		if (!csum_and_copy_from_iter_full(to + sizeof(struct icmphdr),
to                613 net/ipv4/ping.c 		if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck,
to                467 net/ipv4/raw.c static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
to                476 net/ipv4/raw.c 			memcpy(to, rfv->hdr.c + offset, copy);
to                481 net/ipv4/raw.c 							  to, copy, 0),
to                486 net/ipv4/raw.c 		to += copy;
to                495 net/ipv4/raw.c 	return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
to               1378 net/ipv4/tcp_input.c int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
to               1386 net/ipv4/tcp_input.c 	if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
to               1388 net/ipv4/tcp_input.c 	if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
to               1390 net/ipv4/tcp_input.c 	return skb_shift(to, from, shiftlen);
to               4416 net/ipv4/tcp_input.c 			     struct sk_buff *to,
to               4425 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
to               4429 net/ipv4/tcp_input.c 	if (from->decrypted != to->decrypted)
to               4433 net/ipv4/tcp_input.c 	if (!skb_try_coalesce(to, from, fragstolen, &delta))
to               4439 net/ipv4/tcp_input.c 	TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
to               4440 net/ipv4/tcp_input.c 	TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
to               4441 net/ipv4/tcp_input.c 	TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
to               4444 net/ipv4/tcp_input.c 		TCP_SKB_CB(to)->has_rxtstamp = true;
to               4445 net/ipv4/tcp_input.c 		to->tstamp = from->tstamp;
to               4446 net/ipv4/tcp_input.c 		skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
to               4453 net/ipv4/tcp_input.c 			     struct sk_buff *to,
to               4457 net/ipv4/tcp_input.c 	bool res = tcp_try_coalesce(sk, to, from, fragstolen);
to               4461 net/ipv4/tcp_input.c 		u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
to               4464 net/ipv4/tcp_input.c 		skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
to               2856 net/ipv4/tcp_output.c static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
to               2860 net/ipv4/tcp_output.c 	struct sk_buff *skb = to, *tmp;
to               2872 net/ipv4/tcp_output.c 		if (!tcp_skb_can_collapse_to(to))
to               2888 net/ipv4/tcp_output.c 		if (!tcp_collapse_retrans(sk, to))
to                327 net/ipv6/exthdrs.c 	__be32 from, to;
to                340 net/ipv6/exthdrs.c 	to = *(__be32 *)hdr;
to                344 net/ipv6/exthdrs.c 	update_csum_diff4(skb, from, to);
to                294 net/ipv6/icmp.c static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
to                301 net/ipv6/icmp.c 				      to, len, csum);
to                 84 net/ipv6/ila/ila.h static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
to                 87 net/ipv6/ila/ila.h 		~from[0], ~from[1], to[0], to[1],
to                579 net/ipv6/ip6_output.c static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to                581 net/ipv6/ip6_output.c 	to->pkt_type = from->pkt_type;
to                582 net/ipv6/ip6_output.c 	to->priority = from->priority;
to                583 net/ipv6/ip6_output.c 	to->protocol = from->protocol;
to                584 net/ipv6/ip6_output.c 	skb_dst_drop(to);
to                585 net/ipv6/ip6_output.c 	skb_dst_set(to, dst_clone(skb_dst(from)));
to                586 net/ipv6/ip6_output.c 	to->dev = from->dev;
to                587 net/ipv6/ip6_output.c 	to->mark = from->mark;
to                589 net/ipv6/ip6_output.c 	skb_copy_hash(to, from);
to                592 net/ipv6/ip6_output.c 	to->tc_index = from->tc_index;
to                594 net/ipv6/ip6_output.c 	nf_copy(to, from);
to                595 net/ipv6/ip6_output.c 	skb_ext_copy(to, from);
to                596 net/ipv6/ip6_output.c 	skb_copy_secmark(to, from);
to               1318 net/ipv6/ip6_output.c 			     int getfrag(void *from, char *to, int offset,
to               1657 net/ipv6/ip6_output.c 		    int getfrag(void *from, char *to, int offset, int len,
to               1844 net/ipv6/ip6_output.c 			     int getfrag(void *from, char *to, int offset,
to                735 net/ipv6/raw.c static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
to                744 net/ipv6/raw.c 			memcpy(to, rfv->c + offset, copy);
to                749 net/ipv6/raw.c 							  to, copy, 0),
to                754 net/ipv6/raw.c 		to += copy;
to                763 net/ipv6/raw.c 	return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
to               1560 net/mac80211/trace.h #define SWITCH_ENTRY_ASSIGN(to, from) local_vifs[i].to = vifs[i].from
to               1216 net/netfilter/ipset/ip_set_core.c 	struct ip_set *from, *to;
to               1230 net/netfilter/ipset/ip_set_core.c 	to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
to               1232 net/netfilter/ipset/ip_set_core.c 	if (!to)
to               1239 net/netfilter/ipset/ip_set_core.c 	if (!(from->type->features == to->type->features &&
to               1240 net/netfilter/ipset/ip_set_core.c 	      from->family == to->family))
to               1245 net/netfilter/ipset/ip_set_core.c 	if (from->ref_netlink || to->ref_netlink) {
to               1251 net/netfilter/ipset/ip_set_core.c 	strncpy(from->name, to->name, IPSET_MAXNAMELEN);
to               1252 net/netfilter/ipset/ip_set_core.c 	strncpy(to->name, from_name, IPSET_MAXNAMELEN);
to               1254 net/netfilter/ipset/ip_set_core.c 	swap(from->ref, to->ref);
to               1255 net/netfilter/ipset/ip_set_core.c 	ip_set(inst, from_id) = to;
to                172 net/netfilter/ipset/pfxlen.c ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr)
to                181 net/netfilter/ipset/pfxlen.c 		if (!after(last, to)) {
to                424 net/netfilter/ipvs/ip_vs_ftp.c 	union nf_inet_addr to;
to                496 net/netfilter/ipvs/ip_vs_ftp.c 				   &to, &port, cp->af,
to                499 net/netfilter/ipvs/ip_vs_ftp.c 		IP_VS_DBG(7, "PORT %pI4:%u detected\n", &to.ip, ntohs(port));
to                504 net/netfilter/ipvs/ip_vs_ftp.c 			  &to.ip, ntohs(port), &cp->vaddr.ip,
to                510 net/netfilter/ipvs/ip_vs_ftp.c 					  &to, &port, cp->af,
to                514 net/netfilter/ipvs/ip_vs_ftp.c 			      IP_VS_DBG_ADDR(cp->af, &to), ntohs(port));
to                519 net/netfilter/ipvs/ip_vs_ftp.c 			      IP_VS_DBG_ADDR(cp->af, &to), ntohs(port),
to                532 net/netfilter/ipvs/ip_vs_ftp.c 				      ipvsh->protocol, &to, port, &cp->vaddr,
to                 92 net/netfilter/xt_connbytes.c 	if (sinfo->count.to >= sinfo->count.from)
to                 93 net/netfilter/xt_connbytes.c 		return what <= sinfo->count.to && what >= sinfo->count.from;
to                 95 net/netfilter/xt_connbytes.c 		return what < sinfo->count.to || what > sinfo->count.from;
to                139 net/netfilter/xt_hashlimit.c cfg_copy(struct hashlimit_cfg3 *to, const void *from, int revision)
to                144 net/netfilter/xt_hashlimit.c 		to->mode = cfg->mode;
to                145 net/netfilter/xt_hashlimit.c 		to->avg = cfg->avg;
to                146 net/netfilter/xt_hashlimit.c 		to->burst = cfg->burst;
to                147 net/netfilter/xt_hashlimit.c 		to->size = cfg->size;
to                148 net/netfilter/xt_hashlimit.c 		to->max = cfg->max;
to                149 net/netfilter/xt_hashlimit.c 		to->gc_interval = cfg->gc_interval;
to                150 net/netfilter/xt_hashlimit.c 		to->expire = cfg->expire;
to                151 net/netfilter/xt_hashlimit.c 		to->srcmask = cfg->srcmask;
to                152 net/netfilter/xt_hashlimit.c 		to->dstmask = cfg->dstmask;
to                156 net/netfilter/xt_hashlimit.c 		to->mode = cfg->mode;
to                157 net/netfilter/xt_hashlimit.c 		to->avg = cfg->avg;
to                158 net/netfilter/xt_hashlimit.c 		to->burst = cfg->burst;
to                159 net/netfilter/xt_hashlimit.c 		to->size = cfg->size;
to                160 net/netfilter/xt_hashlimit.c 		to->max = cfg->max;
to                161 net/netfilter/xt_hashlimit.c 		to->gc_interval = cfg->gc_interval;
to                162 net/netfilter/xt_hashlimit.c 		to->expire = cfg->expire;
to                163 net/netfilter/xt_hashlimit.c 		to->srcmask = cfg->srcmask;
to                164 net/netfilter/xt_hashlimit.c 		to->dstmask = cfg->dstmask;
to                166 net/netfilter/xt_hashlimit.c 		memcpy(to, from, sizeof(struct hashlimit_cfg3));
to                 84 net/nfc/digital_dep.c 	u8 to;
to                438 net/nfc/digital_dep.c 	wt = DIGITAL_ATR_RES_TO_WT(atr_res->to);
to               1515 net/nfc/digital_dep.c 	atr_res->to = DIGITAL_NFC_DEP_TG_MAX_WT;
to               2942 net/openvswitch/flow_netlink.c 	struct nlattr *to;
to               2944 net/openvswitch/flow_netlink.c 	to = reserve_sfa_size(sfa, from->nla_len, log);
to               2945 net/openvswitch/flow_netlink.c 	if (IS_ERR(to))
to               2946 net/openvswitch/flow_netlink.c 		return PTR_ERR(to);
to               2948 net/openvswitch/flow_netlink.c 	memcpy(to, from, totlen);
to                133 net/qrtr/qrtr.c 			      struct sockaddr_qrtr *to);
to                136 net/qrtr/qrtr.c 			      struct sockaddr_qrtr *to);
to                177 net/qrtr/qrtr.c 			     struct sockaddr_qrtr *to)
to                188 net/qrtr/qrtr.c 	if (to->sq_port == QRTR_PORT_CTRL) {
to                192 net/qrtr/qrtr.c 		hdr->dst_node_id = cpu_to_le32(to->sq_node);
to                193 net/qrtr/qrtr.c 		hdr->dst_port_id = cpu_to_le32(to->sq_port);
to                507 net/qrtr/qrtr.c 	struct sockaddr_qrtr to;
to                509 net/qrtr/qrtr.c 	to.sq_family = AF_QIPCRTR;
to                510 net/qrtr/qrtr.c 	to.sq_node = QRTR_NODE_BCAST;
to                511 net/qrtr/qrtr.c 	to.sq_port = QRTR_PORT_CTRL;
to                521 net/qrtr/qrtr.c 				   &to);
to                671 net/qrtr/qrtr.c 			      struct sockaddr_qrtr *to)
to                676 net/qrtr/qrtr.c 	ipc = qrtr_port_lookup(to->sq_port);
to                700 net/qrtr/qrtr.c 			      struct sockaddr_qrtr *to)
to                710 net/qrtr/qrtr.c 		qrtr_node_enqueue(node, skbn, type, from, to);
to                714 net/qrtr/qrtr.c 	qrtr_local_enqueue(NULL, skb, type, from, to);
to                 49 net/qrtr/tun.c static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                 66 net/qrtr/tun.c 	count = min_t(size_t, iov_iter_count(to), skb->len);
to                 67 net/qrtr/tun.c 	if (copy_to_iter(skb->data, count, to) != count)
to                405 net/rds/ib.h   int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
to                 80 net/rds/ib_recv.c 				    struct list_head *to)
to                 84 net/rds/ib_recv.c 	list_splice_tail(from_last, to);
to                 85 net/rds/ib_recv.c 	list_add_tail(from_last, to);
to                532 net/rds/ib_recv.c int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
to                546 net/rds/ib_recv.c 	while (iov_iter_count(to) && copied < len) {
to                552 net/rds/ib_recv.c 		to_copy = min_t(unsigned long, iov_iter_count(to),
to                561 net/rds/ib_recv.c 					to);
to                466 net/rds/message.c int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
to                483 net/rds/message.c 	while (iov_iter_count(to) && copied < len) {
to                484 net/rds/message.c 		to_copy = min_t(unsigned long, iov_iter_count(to),
to                490 net/rds/message.c 					to_copy, to);
to                562 net/rds/rds.h  	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
to                863 net/rds/rds.h  int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
to                 84 net/rds/tcp.h  int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
to                 62 net/rds/tcp_recv.c int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
to                 68 net/rds/tcp_recv.c 	if (!iov_iter_count(to))
to                 76 net/rds/tcp_recv.c 			to_copy = iov_iter_count(to);
to                 79 net/rds/tcp_recv.c 			if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
to                 85 net/rds/tcp_recv.c 			if (!iov_iter_count(to))
to                658 net/rxrpc/af_rxrpc.c 			rx->service_upgrade.to = service_upgrade[1];
to                155 net/rxrpc/ar-internal.h 		u16		to;		/* service ID to upgrade to */
to                178 net/rxrpc/conn_service.c 		conn->service_id = rx->service_upgrade.to;
to                198 net/rxrpc/input.c static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
to                207 net/rxrpc/input.c 		call->acks_lowest_nak = to;
to                208 net/rxrpc/input.c 	} else if (before_eq(call->acks_lowest_nak, to)) {
to                210 net/rxrpc/input.c 		call->acks_lowest_nak = to;
to                215 net/rxrpc/input.c 	while (before(call->tx_hard_ack, to)) {
to                387 net/sched/cls_route.c 	u32 id = 0, to = 0, nhandle = 0x8000;
to                400 net/sched/cls_route.c 		to = nla_get_u32(tb[TCA_ROUTE4_TO]);
to                401 net/sched/cls_route.c 		if (to > 0xFF)
to                403 net/sched/cls_route.c 		nhandle = to;
to                446 net/sched/cls_route.c 		f->id = to;
to                449 net/sched/cls_route.c 		f->id = to | id<<16;
to                 32 net/sched/em_text.c 	int from, to;
to                 37 net/sched/em_text.c 	to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
to                 38 net/sched/em_text.c 	to += tm->to_offset;
to                 40 net/sched/em_text.c 	return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
to               1514 net/sctp/sm_sideeffect.c 			timer = &asoc->timers[cmd->obj.to];
to               1521 net/sctp/sm_sideeffect.c 			timer = &asoc->timers[cmd->obj.to];
to               1522 net/sctp/sm_sideeffect.c 			timeout = asoc->timeouts[cmd->obj.to];
to               1539 net/sctp/sm_sideeffect.c 			timer = &asoc->timers[cmd->obj.to];
to               1540 net/sctp/sm_sideeffect.c 			timeout = asoc->timeouts[cmd->obj.to];
to               1546 net/sctp/sm_sideeffect.c 			timer = &asoc->timers[cmd->obj.to];
to               6169 net/sctp/socket.c 	void __user *to;
to               6187 net/sctp/socket.c 	to = optval + offsetof(struct sctp_getaddrs, addrs);
to               6197 net/sctp/socket.c 		if (copy_to_user(to, &temp, addrlen))
to               6199 net/sctp/socket.c 		to += addrlen;
to               6206 net/sctp/socket.c 	bytes_copied = ((char __user *)to) - optval;
to               6213 net/sctp/socket.c static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
to               6245 net/sctp/socket.c 		memcpy(to, &temp, addrlen);
to               6247 net/sctp/socket.c 		to += addrlen;
to               6266 net/sctp/socket.c 	void __user *to;
to               6297 net/sctp/socket.c 	to = optval + offsetof(struct sctp_getaddrs, addrs);
to               6342 net/sctp/socket.c 	if (copy_to_user(to, addrs, bytes_copied)) {
to               6990 net/sctp/socket.c 	char __user *to;
to               6998 net/sctp/socket.c 	to = p->gauth_chunks;
to               7015 net/sctp/socket.c 	if (copy_to_user(to, ch->chunks, num_chunks))
to               7035 net/sctp/socket.c 	char __user *to;
to               7043 net/sctp/socket.c 	to = p->gauth_chunks;
to               7065 net/sctp/socket.c 	if (copy_to_user(to, ch->chunks, num_chunks))
to                112 net/socket.c   static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
to                950 net/socket.c   static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
to                954 net/socket.c   	struct msghdr msg = {.msg_iter = *to,
to                964 net/socket.c   	if (!iov_iter_count(to))	/* Match SYS5 behaviour */
to                968 net/socket.c   	*to = msg.msg_iter;
to                 31 net/sunrpc/socklib.c xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
to                 35 net/sunrpc/socklib.c 	if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
to                 50 net/sunrpc/socklib.c static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
to                 58 net/sunrpc/socklib.c 	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
to                589 net/sunrpc/xprt.c 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
to                592 net/sunrpc/xprt.c 	if (to->to_exponential)
to                593 net/sunrpc/xprt.c 		majortimeo <<= to->to_retries;
to                595 net/sunrpc/xprt.c 		majortimeo += to->to_increment * to->to_retries;
to                596 net/sunrpc/xprt.c 	if (majortimeo > to->to_maxval || majortimeo == 0)
to                597 net/sunrpc/xprt.c 		majortimeo = to->to_maxval;
to                627 net/sunrpc/xprt.c 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
to                631 net/sunrpc/xprt.c 		if (to->to_exponential)
to                634 net/sunrpc/xprt.c 			req->rq_timeout += to->to_increment;
to                635 net/sunrpc/xprt.c 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
to                636 net/sunrpc/xprt.c 			req->rq_timeout = to->to_maxval;
to                639 net/sunrpc/xprt.c 		req->rq_timeout = to->to_initval;
to                644 net/sunrpc/xprt.c 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
to                510 net/sunrpc/xprtrdma/transport.c 		struct rpc_timeout to;
to                513 net/sunrpc/xprtrdma/transport.c 		to = *xprt->timeout;
to                517 net/sunrpc/xprtrdma/transport.c 		to.to_initval = initval;
to                518 net/sunrpc/xprtrdma/transport.c 		to.to_maxval = initval;
to                519 net/sunrpc/xprtrdma/transport.c 		r_xprt->rx_timeout = to;
to               2253 net/sunrpc/xprtsock.c 	struct rpc_timeout to;
to               2260 net/sunrpc/xprtsock.c 		memcpy(&to, xprt->timeout, sizeof(to));
to               2261 net/sunrpc/xprtsock.c 		initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
to               2265 net/sunrpc/xprtsock.c 		to.to_initval = initval;
to               2266 net/sunrpc/xprtsock.c 		to.to_maxval = initval;
to               2267 net/sunrpc/xprtsock.c 		memcpy(&transport->tcp_timeout, &to,
to                246 net/tipc/link.c static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
to               1126 net/tipc/link.c 				u16 from, u16 to, struct sk_buff_head *xmitq)
to               1136 net/tipc/link.c 	if (less(to, from))
to               1139 net/tipc/link.c 	trace_tipc_link_retrans(r, from, to, &l->transmq);
to               1148 net/tipc/link.c 		if (more(msg_seqno(hdr), to))
to               2167 net/tipc/link.c 	u16 to = from + msg_bc_gap(hdr) - 1;
to               2189 net/tipc/link.c 	rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
to               2271 net/tipc/link.c 	u16 to = msg_bcgap_to(hdr);
to               2272 net/tipc/link.c 	u16 peers_snd_nxt = to + 1;
to               2285 net/tipc/link.c 		rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
to                291 net/tipc/trace.h 		__field(u16, to)
to                300 net/tipc/trace.h 		__entry->to = t;
to                307 net/tipc/trace.h 		  __entry->name, __entry->from, __entry->to,
to                320 net/tipc/trace.h 		  __entry->name, __entry->from, __entry->to,
to                546 net/tls/tls_sw.c 				 struct tls_rec **to, struct sk_msg *msg_opl,
to                626 net/tls/tls_sw.c 	*to = new;
to                630 net/tls/tls_sw.c static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
to                634 net/tls/tls_sw.c 	struct sk_msg *msg_opl = &to->msg_plaintext;
to                657 net/tls/tls_sw.c 	sk_msg_free(sk, &to->msg_encrypted);
to                658 net/tls/tls_sw.c 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
to               1327 net/tls/tls_sw.c 			       struct scatterlist *to,
to               1358 net/tls/tls_sw.c 			sg_set_page(&to[num_elem],
to               1360 net/tls/tls_sw.c 			sg_unmark_end(&to[num_elem]);
to               1372 net/tls/tls_sw.c 		sg_mark_end(&to[num_elem - 1]);
to                188 samples/vfs/test-statx.c static void dump_hex(unsigned long long *data, int from, int to)
to                193 samples/vfs/test-statx.c 	to = (to + 7) / 8;
to                195 samples/vfs/test-statx.c 	for (offset = from; offset < to; offset++) {
to               1469 scripts/mod/modpost.c 	const char *to, *to_p;
to               1476 scripts/mod/modpost.c 	get_pretty_name(to_is_func, &to, &to_p);
to               1480 scripts/mod/modpost.c 	     modname, fromsec, fromaddr, from, fromsym, from_p, to, tosec,
to               1493 scripts/mod/modpost.c 		to, prl_to, tosym, to_p,
to               1506 scripts/mod/modpost.c 		fromsym, to, prl_to, tosym, to_p);
to               1517 scripts/mod/modpost.c 		fromsym, to, to, tosym, to_p, prl_to, tosym);
to               1528 scripts/mod/modpost.c 		fromsym, to, prl_to, tosym, to_p);
to               1543 scripts/mod/modpost.c 		to, prl_to, tosym, to_p,
to               1560 scripts/mod/modpost.c 		to, prl_to, tosym, to_p,
to               1577 scripts/mod/modpost.c 		to, prl_to, tosym, to_p,
to               1604 scripts/mod/modpost.c 	Elf_Sym *to;
to               1616 scripts/mod/modpost.c 	to = find_elf_symbol(elf, r->r_addend, sym);
to               1617 scripts/mod/modpost.c 	tosym = sym_name(elf, to);
to               1625 scripts/mod/modpost.c 				    is_function(to));
to                679 security/security.c 				struct task_struct *to)
to                681 security/security.c 	return call_int_hook(binder_transaction, 0, from, to);
to                685 security/security.c 				    struct task_struct *to)
to                687 security/security.c 	return call_int_hook(binder_transfer_binder, 0, from, to);
to                691 security/security.c 				  struct task_struct *to, struct file *file)
to                693 security/security.c 	return call_int_hook(binder_transfer_file, 0, from, to, file);
to               2056 security/selinux/hooks.c 				      struct task_struct *to)
to               2060 security/selinux/hooks.c 	u32 tosid = task_sid(to);
to               2077 security/selinux/hooks.c 					  struct task_struct *to)
to               2080 security/selinux/hooks.c 	u32 tosid = task_sid(to);
to               2088 security/selinux/hooks.c 					struct task_struct *to,
to               2091 security/selinux/hooks.c 	u32 sid = task_sid(to);
to               2625 security/selinux/hooks.c 	char *to = options;
to               2662 security/selinux/hooks.c 			if (to != from)
to               2663 security/selinux/hooks.c 				memmove(to, from, len);
to               2664 security/selinux/hooks.c 			to += len;
to               2671 security/selinux/hooks.c 	*to = '\0';
to                721 security/smack/smack_lsm.c 	char *from = options, *to = options;
to                750 security/smack/smack_lsm.c 			if (to != from)
to                751 security/smack/smack_lsm.c 				memmove(to, from, len);
to                752 security/smack/smack_lsm.c 			to += len;
to                759 security/smack/smack_lsm.c 	*to = '\0';
to               3110 sound/core/pcm_native.c static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
to               3127 sound/core/pcm_native.c 	if (!iter_is_iovec(to))
to               3129 sound/core/pcm_native.c 	if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
to               3131 sound/core/pcm_native.c 	if (!frame_aligned(runtime, to->iov->iov_len))
to               3133 sound/core/pcm_native.c 	frames = bytes_to_samples(runtime, to->iov->iov_len);
to               3134 sound/core/pcm_native.c 	bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
to               3137 sound/core/pcm_native.c 	for (i = 0; i < to->nr_segs; ++i)
to               3138 sound/core/pcm_native.c 		bufs[i] = to->iov[i].iov_base;
to                108 sound/sh/aica.c 	u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi);
to                118 sound/sh/aica.c 		writel(val, to);
to                121 sound/sh/aica.c 		to++;
to                382 sound/soc/codecs/mt6358.c static void headset_volume_ramp(struct mt6358_priv *priv, int from, int to)
to                386 sound/soc/codecs/mt6358.c 	if (!is_valid_hp_pga_idx(from) || !is_valid_hp_pga_idx(to))
to                388 sound/soc/codecs/mt6358.c 			 __func__, from, to);
to                391 sound/soc/codecs/mt6358.c 		 __func__, from, to);
to                393 sound/soc/codecs/mt6358.c 	if (to > from)
to                394 sound/soc/codecs/mt6358.c 		offset = to - from;
to                396 sound/soc/codecs/mt6358.c 		offset = from - to;
to                399 sound/soc/codecs/mt6358.c 		if (to > from)
to                313 sound/soc/codecs/nau8825.c 	unsigned int value, volume, ramp_up, from, to;
to                320 sound/soc/codecs/nau8825.c 		to = vol_to;
to                324 sound/soc/codecs/nau8825.c 		to = vol_from;
to                327 sound/soc/codecs/nau8825.c 	if (to > NAU8825_HP_VOL_MIN)
to                328 sound/soc/codecs/nau8825.c 		to = NAU8825_HP_VOL_MIN;
to                330 sound/soc/codecs/nau8825.c 	for (volume = from; volume < to; volume += step) {
to                334 sound/soc/codecs/nau8825.c 			value = to - volume + from;
to                341 sound/soc/codecs/nau8825.c 		value = to;
to                 48 sound/soc/pxa/pxa-ssp.c 	uint32_t	to;
to                146 sound/soc/pxa/pxa-ssp.c 	priv->to  = __raw_readl(ssp->mmio_base + SSTO);
to                165 sound/soc/pxa/pxa-ssp.c 	__raw_writel(priv->to,  ssp->mmio_base + SSTO);
to                 80 sound/soc/samsung/s3c24xx_simtec.c static void spk_unmute_state(int to)
to                 82 sound/soc/samsung/s3c24xx_simtec.c 	pr_debug("%s: to=%d\n", __func__, to);
to                 84 sound/soc/samsung/s3c24xx_simtec.c 	spk_unmute = to;
to                 85 sound/soc/samsung/s3c24xx_simtec.c 	gpio_set_value(pdata->amp_gpio, to);
to                 88 sound/soc/samsung/s3c24xx_simtec.c 	if (to && pdata->amp_gain[0] > 0)
to                139 sound/soc/sh/hac.c 	unsigned int i, to;
to                148 sound/soc/sh/hac.c 		for (to = TMO_E3;
to                149 sound/soc/sh/hac.c 		     to && !(HACREG(HACTSR) & TSR_CMDAMT);
to                150 sound/soc/sh/hac.c 		     --to)
to                167 sound/soc/sh/hac.c 	unsigned int i, to;
to                178 sound/soc/sh/hac.c 		for (to = TMO_E1;
to                179 sound/soc/sh/hac.c 		     to && !(HACREG(HACTSR) & (TSR_CMDAMT|TSR_CMDDMT));
to                180 sound/soc/sh/hac.c 		     --to)
to                184 sound/soc/sh/hac.c 		if (to)
to                 42 sound/usb/usx2y/usbusx2yaudio.c 					   nrpacks set to 1, you might as well comment 
to                321 sound/xen/xen_snd_front.c 	int to = 100;
to                338 sound/xen/xen_snd_front.c 	       --to)
to                341 sound/xen/xen_snd_front.c 	if (!to) {
to                395 tools/include/uapi/linux/if_link.h 	__u32 to;
to               1172 tools/include/uapi/linux/perf_event.h 	__u64	to;
to                 22 tools/lib/subcmd/run-command.c static inline void dup_devnull(int to)
to                 25 tools/lib/subcmd/run-command.c 	dup2(fd, to);
to                 51 tools/perf/arch/x86/util/intel-bts.c 	u64 to;
to                191 tools/perf/bench/epoll-wait.c 	int to = nonblocking? 0 : -1;
to                209 tools/perf/bench/epoll-wait.c 			ret = epoll_wait(efd, &ev, 1, to);
to                  9 tools/perf/bench/mem-memcpy-x86-64-lib.c unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
to                 11 tools/perf/bench/mem-memcpy-x86-64-lib.c unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
to                 13 tools/perf/bench/mem-memcpy-x86-64-lib.c 	for (; len; --len, to++, from++) {
to                 18 tools/perf/bench/mem-memcpy-x86-64-lib.c 		unsigned long rem = __memcpy_mcsafe(to, from, 1);
to                149 tools/perf/builtin-annotate.c 		prev = &bi[i].to;
to                172 tools/perf/builtin-annotate.c 	err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
to                 55 tools/perf/builtin-buildid-cache.c 	char to[PATH_MAX];
to                 61 tools/perf/builtin-buildid-cache.c 	scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
to                 72 tools/perf/builtin-buildid-cache.c 	if (kallsyms__get_function_start(to, name, &addr2))
to                 82 tools/perf/builtin-buildid-cache.c 	char to[PATH_MAX];
to                100 tools/perf/builtin-buildid-cache.c 		scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
to                104 tools/perf/builtin-buildid-cache.c 		if (!compare_proc_modules(from, to) &&
to                259 tools/perf/builtin-record.c static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
to                261 tools/perf/builtin-record.c 	struct record_aio *aio = to;
to                486 tools/perf/builtin-record.c static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
to                488 tools/perf/builtin-record.c 	struct record *rec = to;
to                156 tools/perf/builtin-report.c 		err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
to                190 tools/perf/builtin-report.c 			  bi->from.addr, bi->to.addr);
to                199 tools/perf/builtin-report.c 	err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
to                739 tools/perf/builtin-script.c 	u64 i, from, to;
to                747 tools/perf/builtin-script.c 		to   = br->entries[i].to;
to                753 tools/perf/builtin-script.c 			thread__find_map_fb(thread, sample->cpumode, to, &alt);
to                763 tools/perf/builtin-script.c 		printed += fprintf(fp, "/0x%"PRIx64, to);
to                786 tools/perf/builtin-script.c 	u64 i, from, to;
to                797 tools/perf/builtin-script.c 		to   = br->entries[i].to;
to                800 tools/perf/builtin-script.c 		thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
to                831 tools/perf/builtin-script.c 	u64 i, from, to;
to                842 tools/perf/builtin-script.c 		to   = br->entries[i].to;
to                848 tools/perf/builtin-script.c 		if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
to                850 tools/perf/builtin-script.c 			to = map__map_ip(alt.map, to);
to                858 tools/perf/builtin-script.c 		printed += fprintf(fp, "/0x%"PRIx64, to);
to               1048 tools/perf/builtin-script.c 		if (br->entries[i].from || br->entries[i].to)
to               1051 tools/perf/builtin-script.c 				 br->entries[i].to);
to               1052 tools/perf/builtin-script.c 		start = br->entries[i + 1].to;
to               1108 tools/perf/builtin-script.c 	start = br->entries[0].to;
to               1043 tools/perf/builtin-timechart.c 		int from = 0, to = 0;
to               1057 tools/perf/builtin-timechart.c 						if (p->pid == we->wakee && !to) {
to               1058 tools/perf/builtin-timechart.c 							to = c->Y;
to               1070 tools/perf/builtin-timechart.c 					if (p->pid == we->wakee && !to) {
to               1071 tools/perf/builtin-timechart.c 						to = c->Y;
to               1090 tools/perf/builtin-timechart.c 			svg_interrupt(we->time, to, we->backtrace);
to               1091 tools/perf/builtin-timechart.c 		else if (from && to && abs(from - to) == 1)
to               1092 tools/perf/builtin-timechart.c 			svg_wakeline(we->time, from, to, we->backtrace);
to               1094 tools/perf/builtin-timechart.c 			svg_partial_wakeline(we->time, from, task_from, to,
to                152 tools/perf/ui/browsers/annotate.c 	unsigned int from, to;
to                195 tools/perf/ui/browsers/annotate.c 		to = target->idx_asm;
to                198 tools/perf/ui/browsers/annotate.c 		to = (u64)target->idx;
to                206 tools/perf/ui/browsers/annotate.c 				 from, to);
to                212 tools/perf/ui/browsers/annotate.c 				       to > from ? true : false);
to               3116 tools/perf/ui/browsers/hists.c 			if (bi->to.sym != bi->from.sym)
to               3120 tools/perf/ui/browsers/hists.c 							bi->to.map,
to               3121 tools/perf/ui/browsers/hists.c 							bi->to.sym);
to                 19 tools/perf/util/branch.c 		       u64 from, u64 to)
to                 27 tools/perf/util/branch.c 		if (to > from)
to                 33 tools/perf/util/branch.c 	if (cross_area(from, to, AREA_2M))
to                 35 tools/perf/util/branch.c 	else if (cross_area(from, to, AREA_4K))
to                 28 tools/perf/util/branch.h 	struct addr_map_symbol to;
to                 36 tools/perf/util/branch.h 	u64			to;
to                 55 tools/perf/util/branch.h 		       u64 from, u64 to);
to                 14 tools/perf/util/copyfile.c static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
to                 28 tools/perf/util/copyfile.c 	to_fp = fopen(to, "w");
to                 73 tools/perf/util/copyfile.c static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
to                 90 tools/perf/util/copyfile.c 	if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
to                126 tools/perf/util/copyfile.c 		err = link(tmp, to);
to                133 tools/perf/util/copyfile.c int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
to                135 tools/perf/util/copyfile.c 	return copyfile_mode_ns(from, to, 0755, nsi);
to                138 tools/perf/util/copyfile.c int copyfile_mode(const char *from, const char *to, mode_t mode)
to                140 tools/perf/util/copyfile.c 	return copyfile_mode_ns(from, to, mode, NULL);
to                143 tools/perf/util/copyfile.c int copyfile(const char *from, const char *to)
to                145 tools/perf/util/copyfile.c 	return copyfile_mode(from, to, 0755);
to                 11 tools/perf/util/copyfile.h int copyfile(const char *from, const char *to);
to                 12 tools/perf/util/copyfile.h int copyfile_mode(const char *from, const char *to, mode_t mode);
to                 13 tools/perf/util/copyfile.h int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
to                979 tools/perf/util/cs-etm.c 	be->to	 = cs_etm__first_executed_instr(tidq->packet);
to               1207 tools/perf/util/cs-etm.c 				.to = sample.addr,
to                126 tools/perf/util/hist.c 		if (h->branch_info->to.sym) {
to                127 tools/perf/util/hist.c 			symlen = (int)h->branch_info->to.sym->namelen + 4;
to                132 tools/perf/util/hist.c 			symlen = dso__name_len(h->branch_info->to.map->dso);
to                444 tools/perf/util/hist.c 		map__get(he->branch_info->to.map);
to                493 tools/perf/util/hist.c 		map__put(he->branch_info->to.map);
to                889 tools/perf/util/hist.c 	al->map = bi[i].to.map;
to                890 tools/perf/util/hist.c 	al->sym = bi[i].to.sym;
to                891 tools/perf/util/hist.c 	al->addr = bi[i].to.addr;
to                908 tools/perf/util/hist.c 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
to               1248 tools/perf/util/hist.c 		map__zput(he->branch_info->to.map);
to               2598 tools/perf/util/hist.c 				prev = &bi[i].to;
to                 82 tools/perf/util/intel-bts.c 	u64 to;
to                112 tools/perf/util/intel-bts.c 				      le64_to_cpu(branch->to),
to                288 tools/perf/util/intel-bts.c 	sample.addr = le64_to_cpu(branch->to);
to                368 tools/perf/util/intel-bts.c 		if (branch->to)
to                374 tools/perf/util/intel-bts.c 	} else if (!branch->to) {
to                393 tools/perf/util/intel-bts.c 		    machine__kernel_ip(btsq->bts->machine, branch->to) &&
to                427 tools/perf/util/intel-bts.c 		if (!branch->from && !branch->to)
to                433 tools/perf/util/intel-bts.c 					    le64_to_cpu(branch->to),
to               1175 tools/perf/util/intel-pt.c 	be->to          = state->to_ip;
to               1299 tools/perf/util/intel-pt.c 				.to = sample.addr,
to               1675 tools/perf/util/intel-pt.c 	u64 *to;
to               1680 tools/perf/util/intel-pt.c 	to = &br_stack->entries[0].from;
to               1688 tools/perf/util/intel-pt.c 				*to++ = from[0];
to               1689 tools/perf/util/intel-pt.c 				*to++ = from[1];
to               1690 tools/perf/util/intel-pt.c 				*to++ = intel_pt_lbr_flags(from[2]);
to               2090 tools/perf/util/machine.c 		ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
to               2217 tools/perf/util/machine.c 					ip = lbr_stack->entries[0].to;
to               2233 tools/perf/util/machine.c 					ip = lbr_stack->entries[0].to;
to               2359 tools/perf/util/machine.c 					       NULL, be[i].to,
to                489 tools/perf/util/mmap.c int perf_mmap__push(struct mmap *md, void *to,
to                490 tools/perf/util/mmap.c 		    int push(struct mmap *map, void *to, void *buf, size_t size))
to                509 tools/perf/util/mmap.c 		if (push(md, to, buf, size) < 0) {
to                519 tools/perf/util/mmap.c 	if (push(md, to, buf, size) < 0) {
to                 66 tools/perf/util/mmap.h int perf_mmap__push(struct mmap *md, void *to,
to                 67 tools/perf/util/mmap.h 		    int push(struct mmap *map, void *to, void *buf, size_t size));
to               1285 tools/perf/util/pmu.c void perf_pmu__set_format(unsigned long *bits, long from, long to)
to               1289 tools/perf/util/pmu.c 	if (!to)
to               1290 tools/perf/util/pmu.c 		to = from;
to               1293 tools/perf/util/pmu.c 	for (b = from; b <= to; b++)
to                 82 tools/perf/util/pmu.h void perf_pmu__set_format(unsigned long *bits, long from, long to);
to                489 tools/perf/util/scripting-engines/trace-event-python.c 		    PyLong_FromUnsignedLongLong(br->entries[i].to));
to                508 tools/perf/util/scripting-engines/trace-event-python.c 				    br->entries[i].to, &al);
to                590 tools/perf/util/scripting-engines/trace-event-python.c 				       br->entries[i].to, &al);
to               1041 tools/perf/util/session.c 		       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
to               1077 tools/perf/util/session.c 				i, e->from, e->to,
to               1086 tools/perf/util/session.c 				i, i > 0 ? e->from : e->to);
to                423 tools/perf/util/sort.c 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
to                426 tools/perf/util/sort.c 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
to                804 tools/perf/util/sort.c 	return _sort__dso_cmp(left->branch_info->to.map,
to                805 tools/perf/util/sort.c 			      right->branch_info->to.map);
to                812 tools/perf/util/sort.c 		return _hist_entry__dso_snprintf(he->branch_info->to.map,
to                826 tools/perf/util/sort.c 	return dso && (!he->branch_info || !he->branch_info->to.map ||
to                827 tools/perf/util/sort.c 		       he->branch_info->to.map->dso != dso);
to                856 tools/perf/util/sort.c 	to_l = &left->branch_info->to;
to                857 tools/perf/util/sort.c 	to_r = &right->branch_info->to;
to                882 tools/perf/util/sort.c 		struct addr_map_symbol *to = &he->branch_info->to;
to                884 tools/perf/util/sort.c 		return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
to                911 tools/perf/util/sort.c 	return sym && !(he->branch_info && he->branch_info->to.sym &&
to                912 tools/perf/util/sort.c 		        strstr(he->branch_info->to.sym->name, sym));
to               1257 tools/perf/util/symbol-elf.c static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
to               1267 tools/perf/util/symbol-elf.c 	if (lseek(to, to_offs, SEEK_SET) != to_offs)
to               1284 tools/perf/util/symbol-elf.c 		r = write(to, buf, n);
to               1371 tools/perf/util/symbol-elf.c static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
to               1373 tools/perf/util/symbol-elf.c 	GElf_Ehdr *ehdr = &to->ehdr;
to               1398 tools/perf/util/symbol-elf.c 	if (!gelf_update_ehdr(to->elf, ehdr))
to               1401 tools/perf/util/symbol-elf.c 	if (!gelf_newphdr(to->elf, count))
to               1768 tools/perf/util/symbol-elf.c static int kcore_copy__compare_fds(int from, int to)
to               1792 tools/perf/util/symbol-elf.c 		if (readn(to, buf_to, len) != (int)len)
to               1809 tools/perf/util/symbol-elf.c 	int from, to, err = -1;
to               1815 tools/perf/util/symbol-elf.c 	to = open(to_filename, O_RDONLY);
to               1816 tools/perf/util/symbol-elf.c 	if (to < 0)
to               1819 tools/perf/util/symbol-elf.c 	err = kcore_copy__compare_fds(from, to);
to               1821 tools/perf/util/symbol-elf.c 	close(to);
to               1015 tools/perf/util/symbol.c int compare_proc_modules(const char *from, const char *to)
to               1026 tools/perf/util/symbol.c 	if (read_proc_modules(to, &to_modules))
to                220 tools/perf/util/symbol.h int compare_proc_modules(const char *from, const char *to);
to                276 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
to                278 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
to                282 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
to                284 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
to                286 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
to                171 tools/testing/selftests/bpf/progs/test_tcp_estats.c static __always_inline void unaligned_u32_set(unsigned char *to, __u8 *from)
to                173 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	to[0] = _(from[0]);
to                174 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	to[1] = _(from[1]);
to                175 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	to[2] = _(from[2]);
to                176 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	to[3] = _(from[3]);
to                672 tools/testing/selftests/bpf/test_align.c static int do_test(unsigned int from, unsigned int to)
to                678 tools/testing/selftests/bpf/test_align.c 	for (i = from; i < to; i++) {
to                700 tools/testing/selftests/bpf/test_align.c 	unsigned int from = 0, to = ARRAY_SIZE(tests);
to                706 tools/testing/selftests/bpf/test_align.c 		if (l < to && u < to) {
to                708 tools/testing/selftests/bpf/test_align.c 			to   = u + 1;
to                713 tools/testing/selftests/bpf/test_align.c 		if (t < to) {
to                715 tools/testing/selftests/bpf/test_align.c 			to   = t + 1;
to                718 tools/testing/selftests/bpf/test_align.c 	return do_test(from, to);
to                660 tools/testing/selftests/bpf/test_maps.c 	struct timeval to;
to                975 tools/testing/selftests/bpf/test_maps.c 		to.tv_sec = 1;
to                976 tools/testing/selftests/bpf/test_maps.c 		to.tv_usec = 0;
to                977 tools/testing/selftests/bpf/test_maps.c 		s = select(sfd[3] + 1, &w, NULL, NULL, &to);
to               1099 tools/testing/selftests/bpf/test_verifier.c static int do_test(bool unpriv, unsigned int from, unsigned int to)
to               1103 tools/testing/selftests/bpf/test_verifier.c 	for (i = from; i < to; i++) {
to               1137 tools/testing/selftests/bpf/test_verifier.c 	unsigned int from = 0, to = ARRAY_SIZE(tests);
to               1151 tools/testing/selftests/bpf/test_verifier.c 		if (l < to && u < to) {
to               1153 tools/testing/selftests/bpf/test_verifier.c 			to   = u + 1;
to               1158 tools/testing/selftests/bpf/test_verifier.c 		if (t < to) {
to               1160 tools/testing/selftests/bpf/test_verifier.c 			to   = t + 1;
to               1172 tools/testing/selftests/bpf/test_verifier.c 	return do_test(unpriv, from, to);
to                160 tools/testing/selftests/capabilities/test_execve.c 	int to = open(toname, O_CREAT | O_WRONLY | O_EXCL, 0700);
to                170 tools/testing/selftests/capabilities/test_execve.c 		if (write(to, buf, sz) != sz)
to                176 tools/testing/selftests/capabilities/test_execve.c 	close(to);
to                 43 tools/testing/selftests/futex/functional/futex_wait_timeout.c 	struct timespec to;
to                 74 tools/testing/selftests/futex/functional/futex_wait_timeout.c 	to.tv_sec = 0;
to                 75 tools/testing/selftests/futex/functional/futex_wait_timeout.c 	to.tv_nsec = timeout_ns;
to                 78 tools/testing/selftests/futex/functional/futex_wait_timeout.c 	res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
to                 41 tools/testing/selftests/futex/functional/futex_wait_wouldblock.c 	struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
to                 69 tools/testing/selftests/futex/functional/futex_wait_wouldblock.c 	res = futex_wait(&f1, f1+1, &to, FUTEX_PRIVATE_FLAG);
to                 54 tools/testing/selftests/powerpc/copyloops/exc_validate.c unsigned long COPY_LOOP(void *to, const void *from, unsigned long size);
to                 55 tools/testing/selftests/powerpc/copyloops/exc_validate.c unsigned long test_copy_tofrom_user_reference(void *to, const void *from, unsigned long size);
to                 15 tools/testing/selftests/powerpc/copyloops/validate.c unsigned long COPY_LOOP(void *to, const void *from, unsigned long size);
to                542 tools/testing/selftests/vm/userfaultfd.c 			area_dst = (char *)(unsigned long)msg.arg.remap.to;
to                 30 tools/virtio/linux/uaccess.h static void volatile_memcpy(volatile char *to, const volatile char *from, 
to                 34 tools/virtio/linux/uaccess.h 		*(to++) = *(from++);
to                 37 tools/virtio/linux/uaccess.h static inline int copy_from_user(void *to, const void __user volatile *from,
to                 41 tools/virtio/linux/uaccess.h 	volatile_memcpy(to, from, n);
to                 45 tools/virtio/linux/uaccess.h static inline int copy_to_user(void __user volatile *to, const void *from,
to                 48 tools/virtio/linux/uaccess.h 	__chk_user_ptr(to, n);
to                 49 tools/virtio/linux/uaccess.h 	volatile_memcpy(to, from, n);
to                750 virt/kvm/arm/mmu.c int create_hyp_mappings(void *from, void *to, pgprot_t prot)
to                755 virt/kvm/arm/mmu.c 	unsigned long end = kern_hyp_va((unsigned long)to);