rt                 15 arch/arm/include/asm/kvm_mmio.h 	unsigned long rt;
rt                299 arch/arm/net/bpf_jit_32.c static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
rt                301 arch/arm/net/bpf_jit_32.c 	op |= rt << 12 | rn << 16;
rt                309 arch/arm/net/bpf_jit_32.c static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
rt                311 arch/arm/net/bpf_jit_32.c 	op |= rt << 12 | rn << 16;
rt                319 arch/arm/net/bpf_jit_32.c #define ARM_LDR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
rt                320 arch/arm/net/bpf_jit_32.c #define ARM_LDRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
rt                321 arch/arm/net/bpf_jit_32.c #define ARM_LDRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
rt                322 arch/arm/net/bpf_jit_32.c #define ARM_LDRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
rt                324 arch/arm/net/bpf_jit_32.c #define ARM_STR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
rt                325 arch/arm/net/bpf_jit_32.c #define ARM_STRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
rt                326 arch/arm/net/bpf_jit_32.c #define ARM_STRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
rt                327 arch/arm/net/bpf_jit_32.c #define ARM_STRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
rt                747 arch/arm/net/bpf_jit_32.c 	s8 rt;
rt                749 arch/arm/net/bpf_jit_32.c 	rt = arm_bpf_get_reg32(src, tmp[0], ctx);
rt                750 arch/arm/net/bpf_jit_32.c 	arm_bpf_put_reg32(dst, rt, ctx);
rt                828 arch/arm/net/bpf_jit_32.c 	s8 rt;
rt                831 arch/arm/net/bpf_jit_32.c 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
rt                835 arch/arm/net/bpf_jit_32.c 	emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
rt                836 arch/arm/net/bpf_jit_32.c 	emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
rt                837 arch/arm/net/bpf_jit_32.c 	emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
rt                840 arch/arm/net/bpf_jit_32.c 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
rt                852 arch/arm/net/bpf_jit_32.c 	s8 rt;
rt                855 arch/arm/net/bpf_jit_32.c 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
rt                859 arch/arm/net/bpf_jit_32.c 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
rt                860 arch/arm/net/bpf_jit_32.c 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
rt                861 arch/arm/net/bpf_jit_32.c 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
rt                865 arch/arm/net/bpf_jit_32.c 	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
rt                877 arch/arm/net/bpf_jit_32.c 	s8 rt;
rt                880 arch/arm/net/bpf_jit_32.c 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
rt                884 arch/arm/net/bpf_jit_32.c 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
rt                885 arch/arm/net/bpf_jit_32.c 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
rt                886 arch/arm/net/bpf_jit_32.c 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
rt                889 arch/arm/net/bpf_jit_32.c 	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
rt                985 arch/arm/net/bpf_jit_32.c 	const s8 *rd, *rt;
rt                989 arch/arm/net/bpf_jit_32.c 	rt = arm_bpf_get_reg64(src, tmp2, ctx);
rt                992 arch/arm/net/bpf_jit_32.c 	emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
rt                993 arch/arm/net/bpf_jit_32.c 	emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
rt                996 arch/arm/net/bpf_jit_32.c 	emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
rt               1103 arch/arm/net/bpf_jit_32.c static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
rt               1109 arch/arm/net/bpf_jit_32.c 			emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
rt               1113 arch/arm/net/bpf_jit_32.c 			emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
rt               1125 arch/arm/net/bpf_jit_32.c 			_emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
rt               1127 arch/arm/net/bpf_jit_32.c 			emit(ARM_CMP_R(rt, rn), ctx);
rt               1132 arch/arm/net/bpf_jit_32.c 		emit(ARM_CMP_R(rn, rt), ctx);
rt               1138 arch/arm/net/bpf_jit_32.c 		emit(ARM_CMP_R(rt, rn), ctx);
rt               1268 arch/arm/net/bpf_jit_32.c 	const s8 *rt;
rt               1271 arch/arm/net/bpf_jit_32.c 	rt = arm_bpf_get_reg64(src, tmp2, ctx);
rt               1273 arch/arm/net/bpf_jit_32.c 	reg_set = (1 << rt[1]) | (1 << rt[0]);
rt               1357 arch/arm/net/bpf_jit_32.c 	s8 rd_lo, rt, rm, rn;
rt               1452 arch/arm/net/bpf_jit_32.c 			rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
rt               1455 arch/arm/net/bpf_jit_32.c 			rt = tmp2[0];
rt               1456 arch/arm/net/bpf_jit_32.c 			emit_a32_mov_i(rt, imm, ctx);
rt               1459 arch/arm/net/bpf_jit_32.c 			rt = src_lo;
rt               1462 arch/arm/net/bpf_jit_32.c 		emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
rt                189 arch/arm/net/bpf_jit_32.h #define ARM_LDR_R(rt, rn, rm)	(ARM_INST_LDR_R | ARM_INST_LDST__U \
rt                190 arch/arm/net/bpf_jit_32.h 				 | (rt) << 12 | (rn) << 16 \
rt                192 arch/arm/net/bpf_jit_32.h #define ARM_LDR_R_SI(rt, rn, rm, type, imm) \
rt                194 arch/arm/net/bpf_jit_32.h 				 | (rt) << 12 | (rn) << 16 \
rt                196 arch/arm/net/bpf_jit_32.h #define ARM_LDRB_R(rt, rn, rm)	(ARM_INST_LDRB_R | ARM_INST_LDST__U \
rt                197 arch/arm/net/bpf_jit_32.h 				 | (rt) << 12 | (rn) << 16 \
rt                199 arch/arm/net/bpf_jit_32.h #define ARM_LDRH_R(rt, rn, rm)	(ARM_INST_LDRH_R | ARM_INST_LDST__U \
rt                200 arch/arm/net/bpf_jit_32.h 				 | (rt) << 12 | (rn) << 16 \
rt                 73 arch/arm/probes/kprobes/actions-arm.c 	int rt = (insn >> 12) & 0xf;
rt                 77 arch/arm/probes/kprobes/actions-arm.c 	register unsigned long rtv asm("r0") = regs->uregs[rt];
rt                 78 arch/arm/probes/kprobes/actions-arm.c 	register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
rt                 91 arch/arm/probes/kprobes/actions-arm.c 	regs->uregs[rt] = rtv;
rt                 92 arch/arm/probes/kprobes/actions-arm.c 	regs->uregs[rt+1] = rt2v;
rt                102 arch/arm/probes/kprobes/actions-arm.c 	int rt = (insn >> 12) & 0xf;
rt                118 arch/arm/probes/kprobes/actions-arm.c 	if (rt == 15)
rt                121 arch/arm/probes/kprobes/actions-arm.c 		regs->uregs[rt] = rtv;
rt                133 arch/arm/probes/kprobes/actions-arm.c 	int rt = (insn >> 12) & 0xf;
rt                137 arch/arm/probes/kprobes/actions-arm.c 	register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
rt                138 arch/arm/probes/kprobes/actions-arm.c 							  : regs->uregs[rt];
rt                110 arch/arm/probes/kprobes/actions-thumb.c 	int rt = (insn >> 12) & 0xf;
rt                122 arch/arm/probes/kprobes/actions-thumb.c 		if (rt == 15) {
rt                140 arch/arm/probes/kprobes/actions-thumb.c 	regs->uregs[rt] = rtv;
rt                188 arch/arm/probes/kprobes/actions-thumb.c 	int rt = (insn >> 12) & 0xf;
rt                192 arch/arm/probes/kprobes/actions-thumb.c 	register unsigned long rtv asm("r0") = regs->uregs[rt];
rt                204 arch/arm/probes/kprobes/actions-thumb.c 	if (rt == 15) /* Can't be true for a STR as they aren't allowed */
rt                207 arch/arm/probes/kprobes/actions-thumb.c 		regs->uregs[rt] = rtv;
rt                325 arch/arm/probes/kprobes/actions-thumb.c 	int rt = (insn >> 8) & 0x7;
rt                326 arch/arm/probes/kprobes/actions-thumb.c 	regs->uregs[rt] = base[index];
rt                335 arch/arm/probes/kprobes/actions-thumb.c 	int rt = (insn >> 8) & 0x7;
rt                337 arch/arm/probes/kprobes/actions-thumb.c 		regs->uregs[rt] = base[index];
rt                339 arch/arm/probes/kprobes/actions-thumb.c 		base[index] = regs->uregs[rt];
rt                349 arch/arm/probes/kprobes/actions-thumb.c 	int rt = (insn >> 8) & 0x7;
rt                350 arch/arm/probes/kprobes/actions-thumb.c 	regs->uregs[rt] = base + offset * 4;
rt                640 arch/arm64/include/asm/cpufeature.h extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
rt                 14 arch/arm64/include/asm/kvm_mmio.h 	unsigned long rt;
rt                750 arch/arm64/include/asm/sysreg.h 	.macro	mrs_s, rt, sreg
rt                754 arch/arm64/include/asm/sysreg.h 	.macro	msr_s, sreg, rt
rt                772 arch/arm64/include/asm/sysreg.h 	__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))	\
rt                778 arch/arm64/include/asm/sysreg.h 	__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))	\
rt               2183 arch/arm64/kernel/cpufeature.c int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
rt               2190 arch/arm64/kernel/cpufeature.c 		pt_regs_write_reg(regs, rt, val);
rt               2198 arch/arm64/kernel/cpufeature.c 	u32 sys_reg, rt;
rt               2205 arch/arm64/kernel/cpufeature.c 	rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
rt               2206 arch/arm64/kernel/cpufeature.c 	return do_emulate_mrs(regs, sys_reg, rt);
rt                432 arch/arm64/kernel/traps.c 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
rt                436 arch/arm64/kernel/traps.c 	address = untagged_addr(pt_regs_read_reg(regs, rt));
rt                470 arch/arm64/kernel/traps.c 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
rt                482 arch/arm64/kernel/traps.c 	pt_regs_write_reg(regs, rt, val);
rt                489 arch/arm64/kernel/traps.c 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
rt                491 arch/arm64/kernel/traps.c 	pt_regs_write_reg(regs, rt, arch_timer_read_counter());
rt                497 arch/arm64/kernel/traps.c 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
rt                499 arch/arm64/kernel/traps.c 	pt_regs_write_reg(regs, rt, arch_timer_get_rate());
rt                505 arch/arm64/kernel/traps.c 	u32 sysreg, rt;
rt                507 arch/arm64/kernel/traps.c 	rt = ESR_ELx_SYS64_ISS_RT(esr);
rt                510 arch/arm64/kernel/traps.c 	if (do_emulate_mrs(regs, sysreg, rt) != 0)
rt                661 arch/arm64/kernel/traps.c 	int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
rt                665 arch/arm64/kernel/traps.c 	pt_regs_write_reg(regs, rt, lower_32_bits(val));
rt                399 arch/arm64/kvm/hyp/switch.c 	int rt = kvm_vcpu_sys_get_rt(vcpu);
rt                400 arch/arm64/kvm/hyp/switch.c 	u64 val = vcpu_get_reg(vcpu, rt);
rt                107 arch/ia64/include/asm/processor.h 	__u64 rt : 1;
rt               1009 arch/ia64/kernel/mca.c 	if (ia64_psr(regs)->rt == 0) {
rt                215 arch/ia64/kernel/smpboot.c get_delta (long *rt, long *master)
rt                233 arch/ia64/kernel/smpboot.c 	*rt = best_t1 - best_t0;
rt                279 arch/ia64/kernel/smpboot.c 	unsigned long flags, rt, master_time_stamp, bound;
rt                282 arch/ia64/kernel/smpboot.c 		long rt;	/* roundtrip time */
rt                310 arch/ia64/kernel/smpboot.c 			delta = get_delta(&rt, &master_time_stamp);
rt                313 arch/ia64/kernel/smpboot.c 				bound = rt;
rt                326 arch/ia64/kernel/smpboot.c 			t[i].rt = rt;
rt                338 arch/ia64/kernel/smpboot.c 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
rt                342 arch/ia64/kernel/smpboot.c 	       "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
rt                 36 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (cpu_to_be64(value)));		\
rt                 48 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "=d" (__value)				\
rt                 62 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (cpu_to_be64(value)));		\
rt                 73 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (cpu_to_be64(value)));		\
rt                 84 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                 95 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                110 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                122 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "=d" (__value)				\
rt                136 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                147 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                158 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                169 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                184 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                196 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "=d" (__value)				\
rt                210 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                221 arch/mips/cavium-octeon/crypto/octeon-crypto.h 	: [rt] "d" (value));				\
rt                 32 arch/mips/crypto/crc32-mips.c _ASM_MACRO_3R(OP, rt, rs, rt2,						  \
rt                233 arch/mips/include/asm/asmmacro.h 	.macro	MFTR	rt=0, rd=0, u=0, sel=0
rt                237 arch/mips/include/asm/asmmacro.h 	.macro	MTTR	rt=0, rd=0, u=0, sel=0
rt                286 arch/mips/include/asm/mipsmtregs.h #define mftc0(rt,sel)							\
rt                294 arch/mips/include/asm/mipsmtregs.h 	"	# mftc0 $1, $" #rt ", " #sel "			\n"	\
rt                295 arch/mips/include/asm/mipsmtregs.h 	"	.word	0x41000800 | (" #rt " << 16) | " #sel " \n"	\
rt                303 arch/mips/include/asm/mipsmtregs.h #define mftgpr(rt)							\
rt                311 arch/mips/include/asm/mipsmtregs.h 	"	# mftgpr $1," #rt "				\n"	\
rt                312 arch/mips/include/asm/mipsmtregs.h 	"	.word	0x41000820 | (" #rt " << 16)		\n"	\
rt                320 arch/mips/include/asm/mipsmtregs.h #define mftr(rt, u, sel)							\
rt                325 arch/mips/include/asm/mipsmtregs.h 	"	mftr	%0, " #rt ", " #u ", " #sel "		\n"	\
rt               1544 arch/mips/include/asm/mipsregs.h _ASM_MACRO_2R_1S(mfhc0, rt, rs, sel,
rt               1547 arch/mips/include/asm/mipsregs.h _ASM_MACRO_2R_1S(mthc0, rt, rd, sel,
rt               1979 arch/mips/include/asm/mipsregs.h _ASM_MACRO_2R_1S(mfgc0, rt, rs, sel,
rt               1982 arch/mips/include/asm/mipsregs.h _ASM_MACRO_2R_1S(dmfgc0, rt, rs, sel,
rt               1985 arch/mips/include/asm/mipsregs.h _ASM_MACRO_2R_1S(mtgc0, rt, rd, sel,
rt               1988 arch/mips/include/asm/mipsregs.h _ASM_MACRO_2R_1S(dmtgc0, rt, rd, sel,
rt                136 arch/mips/include/asm/octeon/cvmx-asm.h 	asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
rt                138 arch/mips/include/asm/octeon/cvmx-asm.h 	asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
rt                211 arch/mips/include/asm/uasm.h # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
rt                212 arch/mips/include/asm/uasm.h # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
rt                213 arch/mips/include/asm/uasm.h # define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
rt                214 arch/mips/include/asm/uasm.h # define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
rt                215 arch/mips/include/asm/uasm.h # define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd)
rt                216 arch/mips/include/asm/uasm.h # define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
rt                217 arch/mips/include/asm/uasm.h # define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
rt                218 arch/mips/include/asm/uasm.h # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
rt                219 arch/mips/include/asm/uasm.h # define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
rt                220 arch/mips/include/asm/uasm.h # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
rt                221 arch/mips/include/asm/uasm.h # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
rt                222 arch/mips/include/asm/uasm.h # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
rt                223 arch/mips/include/asm/uasm.h # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
rt                224 arch/mips/include/asm/uasm.h # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
rt                225 arch/mips/include/asm/uasm.h # define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
rt                227 arch/mips/include/asm/uasm.h # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
rt                228 arch/mips/include/asm/uasm.h # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
rt                229 arch/mips/include/asm/uasm.h # define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
rt                230 arch/mips/include/asm/uasm.h # define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
rt                231 arch/mips/include/asm/uasm.h # define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd)
rt                232 arch/mips/include/asm/uasm.h # define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
rt                233 arch/mips/include/asm/uasm.h # define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
rt                234 arch/mips/include/asm/uasm.h # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
rt                235 arch/mips/include/asm/uasm.h # define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
rt                236 arch/mips/include/asm/uasm.h # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
rt                237 arch/mips/include/asm/uasm.h # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
rt                238 arch/mips/include/asm/uasm.h # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
rt                239 arch/mips/include/asm/uasm.h # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
rt                240 arch/mips/include/asm/uasm.h # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
rt                241 arch/mips/include/asm/uasm.h # define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
rt                635 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                643 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                660 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                670 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                680 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                700 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                711 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                780 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt:5,
rt                826 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                835 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                857 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                867 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                908 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                968 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 3,
rt                977 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 3,
rt                985 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int rt : 5,
rt                 76 arch/mips/kernel/branch.c 				if (insn.mm_i_format.rt != 0)	/* Not mm_jr */
rt                 77 arch/mips/kernel/branch.c 					regs->regs[insn.mm_i_format.rt] =
rt                 87 arch/mips/kernel/branch.c 		switch (insn.mm_i_format.rt) {
rt                176 arch/mips/kernel/branch.c 		switch (insn.mm_i_format.rt) {
rt                211 arch/mips/kernel/branch.c 		    regs->regs[insn.mm_i_format.rt])
rt                222 arch/mips/kernel/branch.c 		    regs->regs[insn.mm_i_format.rt])
rt                450 arch/mips/kernel/branch.c 		switch (insn.i_format.rt) {
rt                458 arch/mips/kernel/branch.c 				if (insn.i_format.rt == bltzl_op)
rt                472 arch/mips/kernel/branch.c 				if (insn.i_format.rt == bgezl_op)
rt                482 arch/mips/kernel/branch.c 			    insn.i_format.rt == bltzall_op))
rt                504 arch/mips/kernel/branch.c 				if (insn.i_format.rt == bltzall_op)
rt                514 arch/mips/kernel/branch.c 			    insn.i_format.rt == bgezall_op))
rt                536 arch/mips/kernel/branch.c 				if (insn.i_format.rt == bgezall_op)
rt                584 arch/mips/kernel/branch.c 		    regs->regs[insn.i_format.rt]) {
rt                599 arch/mips/kernel/branch.c 		    regs->regs[insn.i_format.rt]) {
rt                609 arch/mips/kernel/branch.c 		if (!insn.i_format.rt && NO_R6EMU)
rt                626 arch/mips/kernel/branch.c 		if (cpu_has_mips_r6 && insn.i_format.rt) {
rt                628 arch/mips/kernel/branch.c 			    ((!insn.i_format.rs && insn.i_format.rt) ||
rt                629 arch/mips/kernel/branch.c 			     (insn.i_format.rs == insn.i_format.rt)))
rt                645 arch/mips/kernel/branch.c 		if (!insn.i_format.rt && NO_R6EMU)
rt                662 arch/mips/kernel/branch.c 		if (cpu_has_mips_r6 && insn.i_format.rt) {
rt                664 arch/mips/kernel/branch.c 			    ((!insn.i_format.rs && insn.i_format.rt) ||
rt                665 arch/mips/kernel/branch.c 			    (insn.i_format.rs == insn.i_format.rt)))
rt                693 arch/mips/kernel/branch.c 			reg = insn.i_format.rt;
rt                715 arch/mips/kernel/branch.c 			bit = (insn.i_format.rt >> 2);
rt                718 arch/mips/kernel/branch.c 			switch (insn.i_format.rt & 3) {
rt                724 arch/mips/kernel/branch.c 					if (insn.i_format.rt == 2)
rt                736 arch/mips/kernel/branch.c 					if (insn.i_format.rt == 3)
rt                750 arch/mips/kernel/branch.c 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
rt                759 arch/mips/kernel/branch.c 		    (1ull<<(insn.i_format.rt+32))) == 0)
rt                766 arch/mips/kernel/branch.c 		if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
rt                774 arch/mips/kernel/branch.c 		    (1ull<<(insn.i_format.rt+32)))
rt                821 arch/mips/kernel/branch.c 		if (insn.i_format.rt && !insn.i_format.rs)
rt                890 arch/mips/kernel/branch.c 		if (insn.i_format.rt)
rt                405 arch/mips/kernel/mips-r2-to-r6-emul.c 	s32 rt, rs;
rt                407 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                409 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (s64)rt * (s64)rs;
rt                413 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = res >> 32;
rt                414 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (s64)rt;
rt                432 arch/mips/kernel/mips-r2-to-r6-emul.c 	u32 rt, rs;
rt                434 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                436 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (u64)rt * (u64)rs;
rt                437 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = res;
rt                438 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)(s32)rt;
rt                455 arch/mips/kernel/mips-r2-to-r6-emul.c 	s32 rt, rs;
rt                457 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                460 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)(rs / rt);
rt                461 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->hi = (s64)(rs % rt);
rt                477 arch/mips/kernel/mips-r2-to-r6-emul.c 	u32 rt, rs;
rt                479 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                482 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)(rs / rt);
rt                483 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->hi = (s64)(rs % rt);
rt                500 arch/mips/kernel/mips-r2-to-r6-emul.c 	s64 rt, rs;
rt                505 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                507 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = rt * rs;
rt                513 arch/mips/kernel/mips-r2-to-r6-emul.c 		: "r"(rt), "r"(rs));
rt                532 arch/mips/kernel/mips-r2-to-r6-emul.c 	u64 rt, rs;
rt                537 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                539 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = rt * rs;
rt                545 arch/mips/kernel/mips-r2-to-r6-emul.c 		: "r"(rt), "r"(rs));
rt                563 arch/mips/kernel/mips-r2-to-r6-emul.c 	s64 rt, rs;
rt                568 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                571 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = rs / rt;
rt                572 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->hi = rs % rt;
rt                588 arch/mips/kernel/mips-r2-to-r6-emul.c 	u64 rt, rs;
rt                593 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                596 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = rs / rt;
rt                597 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->hi = rs % rt;
rt                636 arch/mips/kernel/mips-r2-to-r6-emul.c 	s32 rt, rs;
rt                638 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                640 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (s64)rt * (s64)rs;
rt                641 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->hi;
rt                643 arch/mips/kernel/mips-r2-to-r6-emul.c 	res += ((((s64)rt) << 32) | (u32)rs);
rt                645 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = res;
rt                646 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)rt;
rt                665 arch/mips/kernel/mips-r2-to-r6-emul.c 	u32 rt, rs;
rt                667 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                669 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (u64)rt * (u64)rs;
rt                670 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->hi;
rt                672 arch/mips/kernel/mips-r2-to-r6-emul.c 	res += ((((s64)rt) << 32) | (u32)rs);
rt                674 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = res;
rt                675 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)(s32)rt;
rt                694 arch/mips/kernel/mips-r2-to-r6-emul.c 	s32 rt, rs;
rt                696 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                698 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (s64)rt * (s64)rs;
rt                699 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->hi;
rt                701 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = ((((s64)rt) << 32) | (u32)rs) - res;
rt                703 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = res;
rt                704 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)rt;
rt                723 arch/mips/kernel/mips-r2-to-r6-emul.c 	u32 rt, rs;
rt                725 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                727 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (u64)rt * (u64)rs;
rt                728 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->hi;
rt                730 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = ((((s64)rt) << 32) | (u32)rs) - res;
rt                732 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = res;
rt                733 arch/mips/kernel/mips-r2-to-r6-emul.c 	regs->lo = (s64)(s32)rt;
rt                752 arch/mips/kernel/mips-r2-to-r6-emul.c 	s32 rt, rs;
rt                756 arch/mips/kernel/mips-r2-to-r6-emul.c 	rt = regs->regs[MIPSInst_RT(ir)];
rt                758 arch/mips/kernel/mips-r2-to-r6-emul.c 	res = (s64)rt * (s64)rs;
rt                911 arch/mips/kernel/mips-r2-to-r6-emul.c 	unsigned long cpc, epc, nepc, r31, res, rs, rt;
rt                940 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = MIPSInst_RT(inst);
rt                942 arch/mips/kernel/mips-r2-to-r6-emul.c 		switch (rt) {
rt               1011 arch/mips/kernel/mips-r2-to-r6-emul.c 			switch (rt) {
rt               1066 arch/mips/kernel/mips-r2-to-r6-emul.c 			switch (rt) {
rt               1206 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1267 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1272 arch/mips/kernel/mips-r2-to-r6-emul.c 			regs->regs[MIPSInst_RT(inst)] = rt;
rt               1279 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1342 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1346 arch/mips/kernel/mips-r2-to-r6-emul.c 			regs->regs[MIPSInst_RT(inst)] = rt;
rt               1353 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1413 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1423 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1483 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1498 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1602 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1606 arch/mips/kernel/mips-r2-to-r6-emul.c 			regs->regs[MIPSInst_RT(inst)] = rt;
rt               1617 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1721 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1725 arch/mips/kernel/mips-r2-to-r6-emul.c 			regs->regs[MIPSInst_RT(inst)] = rt;
rt               1736 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1840 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt               1854 arch/mips/kernel/mips-r2-to-r6-emul.c 		rt = regs->regs[MIPSInst_RT(inst)];
rt               1958 arch/mips/kernel/mips-r2-to-r6-emul.c 			: "+&r"(rt), "=&r"(rs),
rt                 34 arch/mips/kernel/probes-common.h 		switch (insn.i_format.rt) {
rt                218 arch/mips/kernel/process.c 			if (ip->mm16_r5_format.rt != 31)
rt                246 arch/mips/kernel/process.c 		if (ip->i_format.rt != 31)
rt                274 arch/mips/kernel/process.c 		ip->i_format.rs == 29 && ip->i_format.rt == 31) {
rt                296 arch/mips/kernel/process.c 		    (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
rt                344 arch/mips/kernel/process.c 		    ip->mm16_r5_format.rt == 29) {
rt                353 arch/mips/kernel/process.c 	    ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
rt                359 arch/mips/kernel/process.c 	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
rt                286 arch/mips/kernel/rtlx.c 	struct rtlx_channel *rt;
rt                294 arch/mips/kernel/rtlx.c 	rt = &rtlx->channel[index];
rt                298 arch/mips/kernel/rtlx.c 	rt_read = rt->rt_read;
rt                301 arch/mips/kernel/rtlx.c 	count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
rt                302 arch/mips/kernel/rtlx.c 						     rt->buffer_size));
rt                305 arch/mips/kernel/rtlx.c 	fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
rt                307 arch/mips/kernel/rtlx.c 	failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
rt                313 arch/mips/kernel/rtlx.c 		failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
rt                319 arch/mips/kernel/rtlx.c 	rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
rt                628 arch/mips/kernel/traps.c static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
rt                636 arch/mips/kernel/traps.c 		regs->regs[rt] = smp_processor_id();
rt                639 arch/mips/kernel/traps.c 		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
rt                643 arch/mips/kernel/traps.c 		regs->regs[rt] = read_c0_count();
rt                649 arch/mips/kernel/traps.c 			regs->regs[rt] = 1;
rt                652 arch/mips/kernel/traps.c 			regs->regs[rt] = 2;
rt                656 arch/mips/kernel/traps.c 		regs->regs[rt] = ti->tp_value;
rt                667 arch/mips/kernel/traps.c 		int rt = (opcode & RT) >> 16;
rt                669 arch/mips/kernel/traps.c 		simulate_rdhwr(regs, rd, rt);
rt                681 arch/mips/kernel/traps.c 		int rt = (opcode & MM_RT) >> 21;
rt                682 arch/mips/kernel/traps.c 		simulate_rdhwr(regs, rd, rt);
rt                982 arch/mips/kernel/unaligned.c 				regs->regs[insn.spec3_format.rt] = value;
rt                995 arch/mips/kernel/unaligned.c 				regs->regs[insn.spec3_format.rt] = value;
rt               1008 arch/mips/kernel/unaligned.c 				regs->regs[insn.spec3_format.rt] = value;
rt               1016 arch/mips/kernel/unaligned.c 				value = regs->regs[insn.spec3_format.rt];
rt               1029 arch/mips/kernel/unaligned.c 				value = regs->regs[insn.spec3_format.rt];
rt               1060 arch/mips/kernel/unaligned.c 		regs->regs[insn.i_format.rt] = value;
rt               1079 arch/mips/kernel/unaligned.c 		regs->regs[insn.i_format.rt] = value;
rt               1098 arch/mips/kernel/unaligned.c 		regs->regs[insn.i_format.rt] = value;
rt               1117 arch/mips/kernel/unaligned.c 		regs->regs[insn.i_format.rt] = value;
rt               1140 arch/mips/kernel/unaligned.c 		regs->regs[insn.i_format.rt] = value;
rt               1152 arch/mips/kernel/unaligned.c 		value = regs->regs[insn.i_format.rt];
rt               1172 arch/mips/kernel/unaligned.c 		value = regs->regs[insn.i_format.rt];
rt               1200 arch/mips/kernel/unaligned.c 		value = regs->regs[insn.i_format.rt];
rt               1756 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1760 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1764 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1768 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1772 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1776 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1780 arch/mips/kernel/unaligned.c 		reg = insn.mm_i_format.rt;
rt               1830 arch/mips/kernel/unaligned.c 		reg = reg16to32[insn.mm16_rb_format.rt];
rt               1834 arch/mips/kernel/unaligned.c 		reg = reg16to32[insn.mm16_rb_format.rt];
rt               1838 arch/mips/kernel/unaligned.c 		reg = reg16to32st[insn.mm16_rb_format.rt];
rt               1842 arch/mips/kernel/unaligned.c 		reg = reg16to32st[insn.mm16_rb_format.rt];
rt               1846 arch/mips/kernel/unaligned.c 		reg = insn.mm16_r5_format.rt;
rt               1850 arch/mips/kernel/unaligned.c 		reg = insn.mm16_r5_format.rt;
rt               1854 arch/mips/kernel/unaligned.c 		reg = reg16to32[insn.mm16_r3_format.rt];
rt                 83 arch/mips/kernel/uprobes.c 		switch (inst.u_format.rt) {
rt                 88 arch/mips/kvm/dyntrans.c 	synci_inst.i_format.rt = synci_op;
rt                108 arch/mips/kvm/dyntrans.c 		mfc0_inst.r_format.rd = inst.c0r_format.rt;
rt                112 arch/mips/kvm/dyntrans.c 		mfc0_inst.i_format.rt = inst.c0r_format.rt;
rt                134 arch/mips/kvm/dyntrans.c 	mtc0_inst.i_format.rt = inst.c0r_format.rt;
rt                 82 arch/mips/kvm/emulate.c 		switch (insn.i_format.rt) {
rt                156 arch/mips/kvm/emulate.c 		    arch->gprs[insn.i_format.rt])
rt                166 arch/mips/kvm/emulate.c 		    arch->gprs[insn.i_format.rt])
rt                177 arch/mips/kvm/emulate.c 		if (insn.i_format.rt != 0)
rt                190 arch/mips/kvm/emulate.c 		if (insn.i_format.rt != 0)
rt                209 arch/mips/kvm/emulate.c 		if (insn.i_format.rt != 0)
rt                215 arch/mips/kvm/emulate.c 		if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
rt               1270 arch/mips/kvm/emulate.c 	u32 rt, rd, sel;
rt               1310 arch/mips/kvm/emulate.c 		rt = inst.c0r_format.rt;
rt               1321 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] =
rt               1324 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] = 0x0;
rt               1329 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
rt               1338 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
rt               1342 arch/mips/kvm/emulate.c 			vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
rt               1346 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
rt               1355 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
rt               1358 arch/mips/kvm/emulate.c 			    && (vcpu->arch.gprs[rt] >=
rt               1361 arch/mips/kvm/emulate.c 					vcpu->arch.gprs[rt]);
rt               1371 arch/mips/kvm/emulate.c 							  vcpu->arch.gprs[rt]);
rt               1374 arch/mips/kvm/emulate.c 							vcpu->arch.gprs[rt]);
rt               1378 arch/mips/kvm/emulate.c 				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
rt               1384 arch/mips/kvm/emulate.c 						       vcpu->arch.gprs[rt],
rt               1390 arch/mips/kvm/emulate.c 				val = vcpu->arch.gprs[rt];
rt               1462 arch/mips/kvm/emulate.c 				val = vcpu->arch.gprs[rt];
rt               1499 arch/mips/kvm/emulate.c 				new_cause = vcpu->arch.gprs[rt];
rt               1519 arch/mips/kvm/emulate.c 				cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
rt               1521 arch/mips/kvm/emulate.c 				cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
rt               1530 arch/mips/kvm/emulate.c 				vcpu->arch.pc, rt, rd, sel);
rt               1533 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
rt               1541 arch/mips/kvm/emulate.c 			if (rt != 0)
rt               1542 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] =
rt               1571 arch/mips/kvm/emulate.c 					  vcpu->arch.gprs[rt]);
rt               1572 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
rt               1604 arch/mips/kvm/emulate.c 	u32 rt;
rt               1617 arch/mips/kvm/emulate.c 	rt = inst.i_format.rt;
rt               1628 arch/mips/kvm/emulate.c 		*(u64 *)data = vcpu->arch.gprs[rt];
rt               1632 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u64 *)data);
rt               1638 arch/mips/kvm/emulate.c 		*(u32 *)data = vcpu->arch.gprs[rt];
rt               1642 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u32 *)data);
rt               1647 arch/mips/kvm/emulate.c 		*(u16 *)data = vcpu->arch.gprs[rt];
rt               1651 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u16 *)data);
rt               1656 arch/mips/kvm/emulate.c 		*(u8 *)data = vcpu->arch.gprs[rt];
rt               1660 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u8 *)data);
rt               1686 arch/mips/kvm/emulate.c 	u32 op, rt;
rt               1688 arch/mips/kvm/emulate.c 	rt = inst.i_format.rt;
rt               1703 arch/mips/kvm/emulate.c 	vcpu->arch.io_gpr = rt;
rt               1819 arch/mips/kvm/emulate.c 	op_inst = inst.i_format.rt;
rt               2519 arch/mips/kvm/emulate.c 		int rt = inst.r_format.rt;
rt               2530 arch/mips/kvm/emulate.c 			arch->gprs[rt] = vcpu->vcpu_id;
rt               2533 arch/mips/kvm/emulate.c 			arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
rt               2537 arch/mips/kvm/emulate.c 			arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
rt               2543 arch/mips/kvm/emulate.c 				arch->gprs[rt] = 1;
rt               2546 arch/mips/kvm/emulate.c 				arch->gprs[rt] = 2;
rt               2550 arch/mips/kvm/emulate.c 			arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
rt               2559 arch/mips/kvm/emulate.c 			      vcpu->arch.gprs[rt]);
rt                907 arch/mips/kvm/vz.c 	u32 rt, rd, sel;
rt                929 arch/mips/kvm/vz.c 		rt = inst.c0r_format.rt;
rt                986 arch/mips/kvm/vz.c 				vcpu->arch.gprs[rt] = val;
rt                999 arch/mips/kvm/vz.c 			val = vcpu->arch.gprs[rt];
rt               1007 arch/mips/kvm/vz.c 				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
rt               1011 arch/mips/kvm/vz.c 						       vcpu->arch.gprs[rt],
rt               1084 arch/mips/kvm/vz.c 	op_inst = inst.i_format.rt;
rt               1139 arch/mips/kvm/vz.c 	int rd, rt, sel;
rt               1174 arch/mips/kvm/vz.c 			rt = inst.r_format.rt;
rt               1179 arch/mips/kvm/vz.c 				arch->gprs[rt] =
rt               1189 arch/mips/kvm/vz.c 				      KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
rt               1231 arch/mips/kvm/vz.c 		int rt = inst.c0r_format.rt;
rt               1234 arch/mips/kvm/vz.c 		unsigned int val = arch->gprs[rt];
rt                 91 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
rt                 92 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
rt                 96 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
rt                 97 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
rt                101 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
rt                102 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
rt                106 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
rt                107 arch/mips/math-emu/cp1emu.c 		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
rt                111 arch/mips/math-emu/cp1emu.c 		if ((insn.mm_i_format.rt == mm_bc1f_op) ||
rt                112 arch/mips/math-emu/cp1emu.c 		    (insn.mm_i_format.rt == mm_bc1t_op)) {
rt                116 arch/mips/math-emu/cp1emu.c 				(insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
rt                170 arch/mips/math-emu/cp1emu.c 				mips32_insn.r_format.rt =
rt                255 arch/mips/math-emu/cp1emu.c 				mips32_insn.r_format.rt =
rt                257 arch/mips/math-emu/cp1emu.c 				mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
rt                279 arch/mips/math-emu/cp1emu.c 					insn.mm_fp3_format.rt;
rt                303 arch/mips/math-emu/cp1emu.c 					insn.mm_fp3_format.rt;
rt                343 arch/mips/math-emu/cp1emu.c 					insn.mm_fp1_format.rt;
rt                362 arch/mips/math-emu/cp1emu.c 					insn.mm_fp1_format.rt;
rt                385 arch/mips/math-emu/cp1emu.c 				mips32_insn.fp1_format.rt =
rt                386 arch/mips/math-emu/cp1emu.c 					insn.mm_fp1_format.rt;
rt                400 arch/mips/math-emu/cp1emu.c 			mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
rt                452 arch/mips/math-emu/cp1emu.c 		switch (insn.i_format.rt) {
rt                456 arch/mips/math-emu/cp1emu.c 			    insn.i_format.rt == bltzall_op))
rt                480 arch/mips/math-emu/cp1emu.c 			    insn.i_format.rt == bgezall_op))
rt                525 arch/mips/math-emu/cp1emu.c 		    regs->regs[insn.i_format.rt])
rt                540 arch/mips/math-emu/cp1emu.c 		    regs->regs[insn.i_format.rt])
rt                550 arch/mips/math-emu/cp1emu.c 		if (!insn.i_format.rt && NO_R6EMU)
rt                567 arch/mips/math-emu/cp1emu.c 		if (cpu_has_mips_r6 && insn.i_format.rt) {
rt                569 arch/mips/math-emu/cp1emu.c 			    ((!insn.i_format.rs && insn.i_format.rt) ||
rt                570 arch/mips/math-emu/cp1emu.c 			     (insn.i_format.rs == insn.i_format.rt)))
rt                588 arch/mips/math-emu/cp1emu.c 		if (!insn.i_format.rt && NO_R6EMU)
rt                605 arch/mips/math-emu/cp1emu.c 		if (cpu_has_mips_r6 && insn.i_format.rt) {
rt                607 arch/mips/math-emu/cp1emu.c 			    ((!insn.i_format.rs && insn.i_format.rt) ||
rt                608 arch/mips/math-emu/cp1emu.c 			     (insn.i_format.rs == insn.i_format.rt)))
rt                630 arch/mips/math-emu/cp1emu.c 		if (insn.i_format.rt && !insn.i_format.rs)
rt                638 arch/mips/math-emu/cp1emu.c 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
rt                644 arch/mips/math-emu/cp1emu.c 		if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
rt                650 arch/mips/math-emu/cp1emu.c 		if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
rt                656 arch/mips/math-emu/cp1emu.c 		if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
rt                706 arch/mips/math-emu/cp1emu.c 			fpr = &current->thread.fpu.fpr[insn.i_format.rt];
rt                739 arch/mips/math-emu/cp1emu.c 			bit = (insn.i_format.rt >> 2);
rt                742 arch/mips/math-emu/cp1emu.c 			switch (insn.i_format.rt & 3) {
rt                 36 arch/mips/oprofile/backtrace.c 		&& ip->i_format.rs == 29 && ip->i_format.rt == 31;
rt                 42 arch/mips/oprofile/backtrace.c 	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
rt                 59 arch/mips/oprofile/backtrace.c 	if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
rt                 37 arch/mips/ralink/timer.c static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val)
rt                 39 arch/mips/ralink/timer.c 	__raw_writel(val, rt->membase + reg);
rt                 42 arch/mips/ralink/timer.c static inline u32 rt_timer_r32(struct rt_timer *rt, u8 reg)
rt                 44 arch/mips/ralink/timer.c 	return __raw_readl(rt->membase + reg);
rt                 49 arch/mips/ralink/timer.c 	struct rt_timer *rt =  (struct rt_timer *) _rt;
rt                 51 arch/mips/ralink/timer.c 	rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
rt                 52 arch/mips/ralink/timer.c 	rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT);
rt                 58 arch/mips/ralink/timer.c static int rt_timer_request(struct rt_timer *rt)
rt                 60 arch/mips/ralink/timer.c 	int err = request_irq(rt->irq, rt_timer_irq, 0,
rt                 61 arch/mips/ralink/timer.c 						dev_name(rt->dev), rt);
rt                 63 arch/mips/ralink/timer.c 		dev_err(rt->dev, "failed to request irq\n");
rt                 66 arch/mips/ralink/timer.c 		rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
rt                 71 arch/mips/ralink/timer.c static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
rt                 73 arch/mips/ralink/timer.c 	if (rt->timer_freq < divisor)
rt                 74 arch/mips/ralink/timer.c 		rt->timer_div = rt->timer_freq;
rt                 76 arch/mips/ralink/timer.c 		rt->timer_div = divisor;
rt                 78 arch/mips/ralink/timer.c 	rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
rt                 83 arch/mips/ralink/timer.c static int rt_timer_enable(struct rt_timer *rt)
rt                 87 arch/mips/ralink/timer.c 	rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
rt                 89 arch/mips/ralink/timer.c 	t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
rt                 91 arch/mips/ralink/timer.c 	rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
rt                 99 arch/mips/ralink/timer.c 	struct rt_timer *rt;
rt                102 arch/mips/ralink/timer.c 	rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL);
rt                103 arch/mips/ralink/timer.c 	if (!rt) {
rt                108 arch/mips/ralink/timer.c 	rt->irq = platform_get_irq(pdev, 0);
rt                109 arch/mips/ralink/timer.c 	if (rt->irq < 0)
rt                110 arch/mips/ralink/timer.c 		return rt->irq;
rt                112 arch/mips/ralink/timer.c 	rt->membase = devm_ioremap_resource(&pdev->dev, res);
rt                113 arch/mips/ralink/timer.c 	if (IS_ERR(rt->membase))
rt                114 arch/mips/ralink/timer.c 		return PTR_ERR(rt->membase);
rt                122 arch/mips/ralink/timer.c 	rt->timer_freq = clk_get_rate(clk) / TMR0CTL_PRESCALE_DIV;
rt                123 arch/mips/ralink/timer.c 	if (!rt->timer_freq)
rt                126 arch/mips/ralink/timer.c 	rt->dev = &pdev->dev;
rt                127 arch/mips/ralink/timer.c 	platform_set_drvdata(pdev, rt);
rt                129 arch/mips/ralink/timer.c 	rt_timer_request(rt);
rt                130 arch/mips/ralink/timer.c 	rt_timer_config(rt, 2);
rt                131 arch/mips/ralink/timer.c 	rt_timer_enable(rt);
rt                133 arch/mips/ralink/timer.c 	dev_info(&pdev->dev, "maximum frequency is %luHz\n", rt->timer_freq);
rt                 67 arch/powerpc/include/asm/kvm_ppc.h                               unsigned int rt, unsigned int bytes,
rt                 70 arch/powerpc/include/asm/kvm_ppc.h                                unsigned int rt, unsigned int bytes,
rt                 73 arch/powerpc/include/asm/kvm_ppc.h 				unsigned int rt, unsigned int bytes,
rt                 76 arch/powerpc/include/asm/kvm_ppc.h 		unsigned int rt, unsigned int bytes, int is_default_endian);
rt                 77 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
rt                 80 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
rt                 82 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
rt                 86 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
rt                 89 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
rt                 91 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
rt                 95 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
rt                 97 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
rt                100 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
rt                103 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
rt                105 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
rt                109 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
rt                111 arch/powerpc/kernel/kvm.c 	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
rt                155 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
rt                180 arch/powerpc/kernel/kvm.c 	switch (get_rt(rt)) {
rt                190 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
rt                208 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
rt                235 arch/powerpc/kernel/kvm.c 	switch (get_rt(rt)) {
rt                249 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
rt                250 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
rt                269 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
rt                300 arch/powerpc/kernel/kvm.c 		switch (get_rt(rt)) {
rt                310 arch/powerpc/kernel/kvm.c 			p[kvm_emulate_wrtee_reg_offs] |= rt;
rt                367 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
rt                393 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
rt                242 arch/powerpc/kvm/book3s_emulate.c 	int rt = get_rt(inst);
rt                299 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
rt                324 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, sr);
rt                336 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, sr);
rt                419 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
rt                434 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
rt                445 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
rt                150 arch/powerpc/kvm/book3s_paired_singles.c static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
rt                152 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
rt                 47 arch/powerpc/kvm/booke_emulate.c 	int rt = get_rt(inst);
rt                 80 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
rt                116 arch/powerpc/kvm/e500_emulate.c 				  int rt)
rt                120 arch/powerpc/kvm/e500_emulate.c 		kvmppc_set_gpr(vcpu, rt,
rt                134 arch/powerpc/kvm/e500_emulate.c 	int rt = get_rt(inst);
rt                169 arch/powerpc/kvm/e500_emulate.c 			int type = rt & 0x3;
rt                181 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
rt                128 arch/powerpc/kvm/emulate.c static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
rt                186 arch/powerpc/kvm/emulate.c 		kvmppc_set_gpr(vcpu, rt, spr_val);
rt                197 arch/powerpc/kvm/emulate.c 	int rs, rt, sprn;
rt                211 arch/powerpc/kvm/emulate.c 	rt = get_rt(inst);
rt                243 arch/powerpc/kvm/emulate.c 			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
rt               1223 arch/powerpc/kvm/powerpc.c 				unsigned int rt, unsigned int bytes,
rt               1245 arch/powerpc/kvm/powerpc.c 	vcpu->arch.io_gpr = rt;
rt               1268 arch/powerpc/kvm/powerpc.c 		       unsigned int rt, unsigned int bytes,
rt               1271 arch/powerpc/kvm/powerpc.c 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
rt               1277 arch/powerpc/kvm/powerpc.c 			unsigned int rt, unsigned int bytes,
rt               1280 arch/powerpc/kvm/powerpc.c 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
rt               1285 arch/powerpc/kvm/powerpc.c 			unsigned int rt, unsigned int bytes,
rt               1295 arch/powerpc/kvm/powerpc.c 		emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
rt               1494 arch/powerpc/kvm/powerpc.c 		unsigned int rt, unsigned int bytes, int is_default_endian)
rt               1502 arch/powerpc/kvm/powerpc.c 		emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
rt               2322 arch/powerpc/xmon/ppc-opc.c #define OPVUPRT(x,vup,rt) (OPVUP (x, vup) | ((((unsigned long)(rt)) & 0x1f) << 21))
rt               2733 arch/powerpc/xmon/ppc-opc.c #define XRT(op, xop, rt) (X ((op), (xop)) \
rt               2734 arch/powerpc/xmon/ppc-opc.c         | ((((unsigned long)(rt)) & 0x1f) << 21))
rt               2737 arch/powerpc/xmon/ppc-opc.c #define XRTRA(op, xop, rt, ra) (X ((op), (xop)) \
rt               2738 arch/powerpc/xmon/ppc-opc.c         | ((((unsigned long)(rt)) & 0x1f) << 21) \
rt                173 arch/sparc/kernel/smp_64.c static inline long get_delta (long *rt, long *master)
rt                193 arch/sparc/kernel/smp_64.c 	*rt = best_t1 - best_t0;
rt                206 arch/sparc/kernel/smp_64.c 	unsigned long flags, rt, master_time_stamp;
rt                209 arch/sparc/kernel/smp_64.c 		long rt;	/* roundtrip time */
rt                224 arch/sparc/kernel/smp_64.c 			delta = get_delta(&rt, &master_time_stamp);
rt                238 arch/sparc/kernel/smp_64.c 			t[i].rt = rt;
rt                250 arch/sparc/kernel/smp_64.c 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
rt                255 arch/sparc/kernel/smp_64.c 	       smp_processor_id(), delta, rt);
rt                 67 arch/x86/pci/irq.c 	struct irq_routing_table *rt;
rt                 71 arch/x86/pci/irq.c 	rt = (struct irq_routing_table *) addr;
rt                 72 arch/x86/pci/irq.c 	if (rt->signature != PIRQ_SIGNATURE ||
rt                 73 arch/x86/pci/irq.c 	    rt->version != PIRQ_VERSION ||
rt                 74 arch/x86/pci/irq.c 	    rt->size % 16 ||
rt                 75 arch/x86/pci/irq.c 	    rt->size < sizeof(struct irq_routing_table))
rt                 78 arch/x86/pci/irq.c 	for (i = 0; i < rt->size; i++)
rt                 82 arch/x86/pci/irq.c 			rt);
rt                 83 arch/x86/pci/irq.c 		return rt;
rt                 97 arch/x86/pci/irq.c 	struct irq_routing_table *rt;
rt                100 arch/x86/pci/irq.c 		rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr));
rt                101 arch/x86/pci/irq.c 		if (rt)
rt                102 arch/x86/pci/irq.c 			return rt;
rt                106 arch/x86/pci/irq.c 		rt = pirq_check_routing_table(addr);
rt                107 arch/x86/pci/irq.c 		if (rt)
rt                108 arch/x86/pci/irq.c 			return rt;
rt                121 arch/x86/pci/irq.c 	struct irq_routing_table *rt = pirq_table;
rt                127 arch/x86/pci/irq.c 	for (i = 0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
rt                128 arch/x86/pci/irq.c 		e = &rt->slots[i];
rt                822 arch/x86/pci/irq.c 	struct irq_routing_table *rt = pirq_table;
rt                826 arch/x86/pci/irq.c 	if (!rt->signature) {
rt                840 arch/x86/pci/irq.c 	    rt->rtr_vendor, rt->rtr_device);
rt                842 arch/x86/pci/irq.c 	pirq_router_dev = pci_get_domain_bus_and_slot(0, rt->rtr_bus,
rt                843 arch/x86/pci/irq.c 						      rt->rtr_devfn);
rt                846 arch/x86/pci/irq.c 			"%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
rt                852 arch/x86/pci/irq.c 		if (rt->rtr_vendor == h->vendor &&
rt                853 arch/x86/pci/irq.c 			h->probe(r, pirq_router_dev, rt->rtr_device))
rt                869 arch/x86/pci/irq.c 	struct irq_routing_table *rt = pirq_table;
rt                870 arch/x86/pci/irq.c 	int entries = (rt->size - sizeof(struct irq_routing_table)) /
rt                874 arch/x86/pci/irq.c 	for (info = rt->slots; entries--; info++)
rt                356 arch/x86/pci/pcbios.c 	struct irq_routing_table *rt = NULL;
rt                391 arch/x86/pci/pcbios.c 		rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
rt                392 arch/x86/pci/pcbios.c 		if (rt) {
rt                393 arch/x86/pci/pcbios.c 			memset(rt, 0, sizeof(struct irq_routing_table));
rt                394 arch/x86/pci/pcbios.c 			rt->size = opt.size + sizeof(struct irq_routing_table);
rt                395 arch/x86/pci/pcbios.c 			rt->exclusive_irqs = map;
rt                396 arch/x86/pci/pcbios.c 			memcpy(rt->slots, (void *) page, opt.size);
rt                401 arch/x86/pci/pcbios.c 	return rt;
rt                643 arch/x86/platform/efi/efi_64.c 	u32 *rt, *___f;							 \
rt                645 arch/x86/platform/efi/efi_64.c 	rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime));	 \
rt                646 arch/x86/platform/efi/efi_64.c 	___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
rt                415 arch/xtensa/include/asm/pgtable.h #define _PGD_INDEX(rt,rs)	extui	rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
rt                416 arch/xtensa/include/asm/pgtable.h #define _PTE_INDEX(rt,rs)	extui	rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
rt                426 crypto/crypto_engine.c struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
rt                439 crypto/crypto_engine.c 	engine->rt = rt;
rt                458 crypto/crypto_engine.c 	if (engine->rt) {
rt                 69 drivers/acpi/acpi_tad.c static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt)
rt                 82 drivers/acpi/acpi_tad.c 	if (rt->year < 1900 || rt->year > 9999 ||
rt                 83 drivers/acpi/acpi_tad.c 	    rt->month < 1 || rt->month > 12 ||
rt                 84 drivers/acpi/acpi_tad.c 	    rt->hour > 23 || rt->minute > 59 || rt->second > 59 ||
rt                 85 drivers/acpi/acpi_tad.c 	    rt->tz < -1440 || (rt->tz > 1440 && rt->tz != 2047) ||
rt                 86 drivers/acpi/acpi_tad.c 	    rt->daylight > 3)
rt                 89 drivers/acpi/acpi_tad.c 	args[0].buffer.pointer = (u8 *)rt;
rt                 90 drivers/acpi/acpi_tad.c 	args[0].buffer.length = sizeof(*rt);
rt                104 drivers/acpi/acpi_tad.c static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
rt                126 drivers/acpi/acpi_tad.c 	if (out_obj->buffer.length != sizeof(*rt))
rt                133 drivers/acpi/acpi_tad.c 	memcpy(rt, data, sizeof(*rt));
rt                159 drivers/acpi/acpi_tad.c 	struct acpi_tad_rt rt;
rt                171 drivers/acpi/acpi_tad.c 	rt.year = val;
rt                177 drivers/acpi/acpi_tad.c 	rt.month = val;
rt                183 drivers/acpi/acpi_tad.c 	rt.day = val;
rt                189 drivers/acpi/acpi_tad.c 	rt.hour = val;
rt                195 drivers/acpi/acpi_tad.c 	rt.minute = val;
rt                201 drivers/acpi/acpi_tad.c 	rt.second = val;
rt                207 drivers/acpi/acpi_tad.c 	rt.tz = val;
rt                212 drivers/acpi/acpi_tad.c 	rt.daylight = val;
rt                214 drivers/acpi/acpi_tad.c 	rt.valid = 0;
rt                215 drivers/acpi/acpi_tad.c 	rt.msec = 0;
rt                216 drivers/acpi/acpi_tad.c 	memset(rt.padding, 0, 3);
rt                218 drivers/acpi/acpi_tad.c 	ret = acpi_tad_set_real_time(dev, &rt);
rt                228 drivers/acpi/acpi_tad.c 	struct acpi_tad_rt rt;
rt                231 drivers/acpi/acpi_tad.c 	ret = acpi_tad_get_real_time(dev, &rt);
rt                236 drivers/acpi/acpi_tad.c 		       rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
rt                237 drivers/acpi/acpi_tad.c 		       rt.tz, rt.daylight);
rt                257 drivers/ata/pata_legacy.c 	u8 rt;
rt                280 drivers/ata/pata_legacy.c 	rt = inb(0x1F3);
rt                281 drivers/ata/pata_legacy.c 	rt &= 0x07 << (3 * adev->devno);
rt                283 drivers/ata/pata_legacy.c 		rt |= (1 + 3 * pio) << (3 * adev->devno);
rt                 93 drivers/block/drbd/drbd_proc.c 	unsigned long db, dt, dbdt, rt, rs_total, rs_left;
rt                149 drivers/block/drbd/drbd_proc.c 	rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
rt                152 drivers/block/drbd/drbd_proc.c 		rt / 3600, (rt % 3600) / 60, rt % 60);
rt               1755 drivers/block/pktcdvd.c 	if (ti->rt == 0 && ti->blank == 0)
rt               1758 drivers/block/pktcdvd.c 	if (ti->rt == 0 && ti->blank == 1)
rt               1761 drivers/block/pktcdvd.c 	if (ti->rt == 1 && ti->blank == 0)
rt               1764 drivers/block/pktcdvd.c 	pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
rt                170 drivers/bus/omap_l3_smx.c 		status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
rt                179 drivers/bus/omap_l3_smx.c 		status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
rt                186 drivers/bus/omap_l3_smx.c 	base = l3->rt + omap3_l3_bases[int_type][err_source];
rt                232 drivers/bus/omap_l3_smx.c 	l3->rt = ioremap(res->start, resource_size(res));
rt                233 drivers/bus/omap_l3_smx.c 	if (!l3->rt) {
rt                260 drivers/bus/omap_l3_smx.c 	iounmap(l3->rt);
rt                272 drivers/bus/omap_l3_smx.c 	iounmap(l3->rt);
rt                185 drivers/bus/omap_l3_smx.h 	void __iomem *rt;
rt                 92 drivers/devfreq/event/exynos-ppmu.c 	PPMU_EVENT(d0-rt),
rt                 95 drivers/devfreq/event/exynos-ppmu.c 	PPMU_EVENT(d1-rt),
rt                 25 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h 	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
rt                 27 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h 	TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
rt                 31 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h 			__field(bool, rt)
rt                 39 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h 			__entry->rt = rt;
rt                 46 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h 			__entry->rt, __entry->fl,
rt                510 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c 	u32 r0, r4, rt, rblock_size;
rt                514 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c 	rt = nvkm_rd32(device, 0x100250);
rt                516 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c 		   r0, r4, rt, nvkm_rd32(device, 0x001540));
rt                534 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c 	if (rt & 1)
rt                516 drivers/gpu/drm/radeon/r600_dpm.c void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
rt                518 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
rt                190 drivers/gpu/drm/radeon/r600_dpm.h void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt);
rt                400 drivers/gpu/drm/radeon/rv6xx_dpm.c static void rv6xx_vid_response_set_brt(struct radeon_device *rdev, u32 rt)
rt                402 drivers/gpu/drm/radeon/rv6xx_dpm.c 	WREG32_P(VID_RT, BRT(rt), ~BRT_MASK);
rt                915 drivers/gpu/drm/radeon/rv6xx_dpm.c 	u32 rt;
rt                924 drivers/gpu/drm/radeon/rv6xx_dpm.c 	rt = rv6xx_compute_count_for_delay(rdev,
rt                928 drivers/gpu/drm/radeon/rv6xx_dpm.c 	rv6xx_vid_response_set_brt(rdev, (rt + 0x1F) >> 5);
rt               1046 drivers/hid/hid-wiimote-modules.c 	__s8 rx, ry, lx, ly, lt, rt;
rt               1104 drivers/hid/hid-wiimote-modules.c 	rt = ext[3] & 0x1f;
rt               1110 drivers/hid/hid-wiimote-modules.c 	rt <<= 1;
rt               1117 drivers/hid/hid-wiimote-modules.c 	input_report_abs(wdata->extension.input, ABS_HAT3X, rt);
rt                201 drivers/i2c/busses/i2c-synquacer.c 	u32 rt = i2c->pclkrate;
rt                213 drivers/i2c/busses/i2c-synquacer.c 			ccr_cs = SYNQUACER_I2C_CCR_CS_FAST_MAX_18M(rt);
rt                214 drivers/i2c/busses/i2c-synquacer.c 			csr_cs = SYNQUACER_I2C_CSR_CS_FAST_MAX_18M(rt);
rt                216 drivers/i2c/busses/i2c-synquacer.c 			ccr_cs = SYNQUACER_I2C_CCR_CS_FAST_MIN_18M(rt);
rt                217 drivers/i2c/busses/i2c-synquacer.c 			csr_cs = SYNQUACER_I2C_CSR_CS_FAST_MIN_18M(rt);
rt                228 drivers/i2c/busses/i2c-synquacer.c 			ccr_cs = SYNQUACER_I2C_CCR_CS_STD_MAX_18M(rt);
rt                229 drivers/i2c/busses/i2c-synquacer.c 			csr_cs = SYNQUACER_I2C_CSR_CS_STD_MAX_18M(rt);
rt                231 drivers/i2c/busses/i2c-synquacer.c 			ccr_cs = SYNQUACER_I2C_CCR_CS_STD_MIN_18M(rt);
rt                232 drivers/i2c/busses/i2c-synquacer.c 			csr_cs = SYNQUACER_I2C_CSR_CS_STD_MIN_18M(rt);
rt                350 drivers/infiniband/core/addr.c 	struct rtable *rt;
rt                354 drivers/infiniband/core/addr.c 		rt = container_of(dst, struct rtable, dst);
rt                355 drivers/infiniband/core/addr.c 		return rt->rt_uses_gateway;
rt                392 drivers/infiniband/core/addr.c 	struct rtable *rt;
rt                400 drivers/infiniband/core/addr.c 	rt = ip_route_output_key(addr->net, &fl4);
rt                401 drivers/infiniband/core/addr.c 	ret = PTR_ERR_OR_ZERO(rt);
rt                407 drivers/infiniband/core/addr.c 	addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
rt                409 drivers/infiniband/core/addr.c 	*prt = rt;
rt                558 drivers/infiniband/core/addr.c 	struct rtable *rt = NULL;
rt                585 drivers/infiniband/core/addr.c 		ret = addr4_resolve(src_in, dst_in, addr, &rt);
rt                586 drivers/infiniband/core/addr.c 		dst = &rt->dst;
rt                605 drivers/infiniband/core/addr.c 		ip_rt_put(rt);
rt               1445 drivers/infiniband/core/cma.c 	struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
rt               1450 drivers/infiniband/core/cma.c 	if (!rt)
rt               1453 drivers/infiniband/core/cma.c 	ret = rt->rt6i_idev->dev == net_dev;
rt               1454 drivers/infiniband/core/cma.c 	ip6_rt_put(rt);
rt               2010 drivers/infiniband/core/cma.c 	struct rdma_route *rt;
rt               2031 drivers/infiniband/core/cma.c 	rt = &id->route;
rt               2032 drivers/infiniband/core/cma.c 	rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
rt               2033 drivers/infiniband/core/cma.c 	rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec),
rt               2035 drivers/infiniband/core/cma.c 	if (!rt->path_rec)
rt               2038 drivers/infiniband/core/cma.c 	rt->path_rec[0] = *path;
rt               2039 drivers/infiniband/core/cma.c 	if (rt->num_paths == 2)
rt               2040 drivers/infiniband/core/cma.c 		rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
rt               2043 drivers/infiniband/core/cma.c 		rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
rt               2047 drivers/infiniband/core/cma.c 			rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
rt               2048 drivers/infiniband/core/cma.c 			rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
rt               2049 drivers/infiniband/core/cma.c 			ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
rt               2051 drivers/infiniband/core/cma.c 			ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
rt               2056 drivers/infiniband/core/cma.c 	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
rt                225 drivers/infiniband/core/counters.c 	struct rdma_restrack_root *rt;
rt                229 drivers/infiniband/core/counters.c 	rt = &dev->res[RDMA_RESTRACK_COUNTER];
rt                230 drivers/infiniband/core/counters.c 	xa_lock(&rt->xa);
rt                231 drivers/infiniband/core/counters.c 	xa_for_each(&rt->xa, id, res) {
rt                248 drivers/infiniband/core/counters.c 	xa_unlock(&rt->xa);
rt                361 drivers/infiniband/core/counters.c 	struct rdma_restrack_root *rt;
rt                366 drivers/infiniband/core/counters.c 	rt = &dev->res[RDMA_RESTRACK_COUNTER];
rt                367 drivers/infiniband/core/counters.c 	xa_lock(&rt->xa);
rt                368 drivers/infiniband/core/counters.c 	xa_for_each(&rt->xa, id, res) {
rt                372 drivers/infiniband/core/counters.c 		xa_unlock(&rt->xa);
rt                382 drivers/infiniband/core/counters.c 		xa_lock(&rt->xa);
rt                386 drivers/infiniband/core/counters.c 	xa_unlock(&rt->xa);
rt                690 drivers/infiniband/core/nldev.c 	struct rdma_restrack_root *rt;
rt                698 drivers/infiniband/core/nldev.c 	rt = &counter->device->res[RDMA_RESTRACK_QP];
rt                699 drivers/infiniband/core/nldev.c 	xa_lock(&rt->xa);
rt                700 drivers/infiniband/core/nldev.c 	xa_for_each(&rt->xa, id, res) {
rt                716 drivers/infiniband/core/nldev.c 	xa_unlock(&rt->xa);
rt                721 drivers/infiniband/core/nldev.c 	xa_unlock(&rt->xa);
rt               1275 drivers/infiniband/core/nldev.c 	struct rdma_restrack_root *rt;
rt               1333 drivers/infiniband/core/nldev.c 	rt = &device->res[res_type];
rt               1334 drivers/infiniband/core/nldev.c 	xa_lock(&rt->xa);
rt               1340 drivers/infiniband/core/nldev.c 	xa_for_each(&rt->xa, id, res) {
rt               1347 drivers/infiniband/core/nldev.c 		xa_unlock(&rt->xa);
rt               1370 drivers/infiniband/core/nldev.c again:		xa_lock(&rt->xa);
rt               1373 drivers/infiniband/core/nldev.c 	xa_unlock(&rt->xa);
rt                 25 drivers/infiniband/core/restrack.c 	struct rdma_restrack_root *rt;
rt                 28 drivers/infiniband/core/restrack.c 	dev->res = kcalloc(RDMA_RESTRACK_MAX, sizeof(*rt), GFP_KERNEL);
rt                 32 drivers/infiniband/core/restrack.c 	rt = dev->res;
rt                 35 drivers/infiniband/core/restrack.c 		xa_init_flags(&rt[i].xa, XA_FLAGS_ALLOC);
rt                 61 drivers/infiniband/core/restrack.c 	struct rdma_restrack_root *rt = dev->res;
rt                103 drivers/infiniband/core/restrack.c 	kfree(rt);
rt                113 drivers/infiniband/core/restrack.c 	struct rdma_restrack_root *rt = &dev->res[type];
rt                115 drivers/infiniband/core/restrack.c 	XA_STATE(xas, &rt->xa, 0);
rt                118 drivers/infiniband/core/restrack.c 	xa_lock(&rt->xa);
rt                124 drivers/infiniband/core/restrack.c 	xa_unlock(&rt->xa);
rt                211 drivers/infiniband/core/restrack.c 	struct rdma_restrack_root *rt;
rt                217 drivers/infiniband/core/restrack.c 	rt = &dev->res[res->type];
rt                225 drivers/infiniband/core/restrack.c 		ret = xa_insert(&rt->xa, qp->qp_num, res, GFP_KERNEL);
rt                232 drivers/infiniband/core/restrack.c 		ret = xa_insert(&rt->xa, counter->id, res, GFP_KERNEL);
rt                235 drivers/infiniband/core/restrack.c 		ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
rt                236 drivers/infiniband/core/restrack.c 				      &rt->next_id, GFP_KERNEL);
rt                293 drivers/infiniband/core/restrack.c 	struct rdma_restrack_root *rt = &dev->res[type];
rt                296 drivers/infiniband/core/restrack.c 	xa_lock(&rt->xa);
rt                297 drivers/infiniband/core/restrack.c 	res = xa_load(&rt->xa, id);
rt                300 drivers/infiniband/core/restrack.c 	xa_unlock(&rt->xa);
rt                323 drivers/infiniband/core/restrack.c 	struct rdma_restrack_root *rt;
rt                333 drivers/infiniband/core/restrack.c 	rt = &dev->res[res->type];
rt                335 drivers/infiniband/core/restrack.c 	old = xa_erase(&rt->xa, res->id);
rt                338 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct rtable *rt;
rt                341 drivers/infiniband/hw/cxgb3/iwch_cm.c 	rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
rt                344 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (IS_ERR(rt))
rt                346 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return rt;
rt               1341 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct rtable *rt;
rt               1362 drivers/infiniband/hw/cxgb3/iwch_cm.c 	rt = find_route(tdev,
rt               1367 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!rt) {
rt               1371 drivers/infiniband/hw/cxgb3/iwch_cm.c 	dst = &rt->dst;
rt               1879 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct rtable *rt;
rt               1931 drivers/infiniband/hw/cxgb3/iwch_cm.c 	rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
rt               1934 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!rt) {
rt               1939 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->dst = &rt->dst;
rt               1987 drivers/infiniband/hw/i40iw/i40iw_cm.c 	struct rtable *rt;
rt               1993 drivers/infiniband/hw/i40iw/i40iw_cm.c 	rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
rt               1994 drivers/infiniband/hw/i40iw/i40iw_cm.c 	if (IS_ERR(rt)) {
rt               1999 drivers/infiniband/hw/i40iw/i40iw_cm.c 	neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rt               2028 drivers/infiniband/hw/i40iw/i40iw_cm.c 	ip_rt_put(rt);
rt                438 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	struct rtable *rt = NULL;
rt                441 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
rt                442 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	if (IS_ERR(rt)) {
rt                447 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
rt                461 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	ip_rt_put(rt);
rt                 88 drivers/infiniband/sw/rxe/rxe_net.c 	struct rtable *rt;
rt                 97 drivers/infiniband/sw/rxe/rxe_net.c 	rt = ip_route_output_key(&init_net, &fl);
rt                 98 drivers/infiniband/sw/rxe/rxe_net.c 	if (IS_ERR(rt)) {
rt                103 drivers/infiniband/sw/rxe/rxe_net.c 	return &rt->dst;
rt                 54 drivers/input/touchscreen/88pm860x-ts.c 	int z1, z2, rt = 0;
rt                 69 drivers/input/touchscreen/88pm860x-ts.c 			rt = z2 / z1 - 1;
rt                 70 drivers/input/touchscreen/88pm860x-ts.c 			rt = (rt * touch->res_x * x) >> ACCURATE_BIT;
rt                 72 drivers/input/touchscreen/88pm860x-ts.c 				z1, z2, rt);
rt                 76 drivers/input/touchscreen/88pm860x-ts.c 		input_report_abs(touch->idev, ABS_PRESSURE, rt);
rt                 70 drivers/input/touchscreen/tsc2007_core.c 	u32 rt = 0;
rt                 78 drivers/input/touchscreen/tsc2007_core.c 		rt = tc->z2 - tc->z1;
rt                 79 drivers/input/touchscreen/tsc2007_core.c 		rt *= tc->x;
rt                 80 drivers/input/touchscreen/tsc2007_core.c 		rt *= tsc->x_plate_ohms;
rt                 81 drivers/input/touchscreen/tsc2007_core.c 		rt /= tc->z1;
rt                 82 drivers/input/touchscreen/tsc2007_core.c 		rt = (rt + 2047) >> 12;
rt                 85 drivers/input/touchscreen/tsc2007_core.c 	return rt;
rt                115 drivers/input/touchscreen/tsc2007_core.c 	u32 rt;
rt                125 drivers/input/touchscreen/tsc2007_core.c 		rt = tsc2007_calculate_resistance(ts, &tc);
rt                127 drivers/input/touchscreen/tsc2007_core.c 		if (!rt && !ts->get_pendown_state) {
rt                136 drivers/input/touchscreen/tsc2007_core.c 		if (rt <= ts->max_rt) {
rt                139 drivers/input/touchscreen/tsc2007_core.c 				tc.x, tc.y, rt);
rt                141 drivers/input/touchscreen/tsc2007_core.c 			rt = ts->max_rt - rt;
rt                146 drivers/input/touchscreen/tsc2007_core.c 			input_report_abs(input, ABS_PRESSURE, rt);
rt                156 drivers/input/touchscreen/tsc2007_core.c 			dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
rt                448 drivers/md/dm-raid.c static bool rt_is_raid0(struct raid_type *rt)
rt                450 drivers/md/dm-raid.c 	return !rt->level;
rt                454 drivers/md/dm-raid.c static bool rt_is_raid1(struct raid_type *rt)
rt                456 drivers/md/dm-raid.c 	return rt->level == 1;
rt                460 drivers/md/dm-raid.c static bool rt_is_raid10(struct raid_type *rt)
rt                462 drivers/md/dm-raid.c 	return rt->level == 10;
rt                466 drivers/md/dm-raid.c static bool rt_is_raid45(struct raid_type *rt)
rt                468 drivers/md/dm-raid.c 	return __within_range(rt->level, 4, 5);
rt                472 drivers/md/dm-raid.c static bool rt_is_raid6(struct raid_type *rt)
rt                474 drivers/md/dm-raid.c 	return rt->level == 6;
rt                478 drivers/md/dm-raid.c static bool rt_is_raid456(struct raid_type *rt)
rt                480 drivers/md/dm-raid.c 	return __within_range(rt->level, 4, 6);
rt               1128 drivers/md/dm-raid.c 	struct raid_type *rt = rs->raid_type;
rt               1142 drivers/md/dm-raid.c 	if (rt_is_raid1(rt)) {
rt               1226 drivers/md/dm-raid.c 			if (!rt_is_raid10(rt)) {
rt               1247 drivers/md/dm-raid.c 			if (!rt_is_raid456(rt)) {
rt               1322 drivers/md/dm-raid.c 			if (!rt_is_raid1(rt)) {
rt               1336 drivers/md/dm-raid.c 			if (!rt_is_raid1(rt)) {
rt               1386 drivers/md/dm-raid.c 			if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
rt               1398 drivers/md/dm-raid.c 			if (!rt_is_raid456(rt)) {
rt               1492 drivers/md/dm-raid.c 	if (rt_is_raid10(rt)) {
rt               1504 drivers/md/dm-raid.c 		rt = get_raid_type_by_ll(10, rs->md.new_layout);
rt               1505 drivers/md/dm-raid.c 		if (!rt) {
rt               1510 drivers/md/dm-raid.c 		if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
rt               1511 drivers/md/dm-raid.c 		     rt->algorithm == ALGORITHM_RAID10_NEAR) &&
rt               3009 drivers/md/dm-raid.c 	struct raid_type *rt;
rt               3028 drivers/md/dm-raid.c 	rt = get_raid_type(arg);
rt               3029 drivers/md/dm-raid.c 	if (!rt) {
rt               3050 drivers/md/dm-raid.c 	rs = raid_set_alloc(ti, rt, num_raid_devs);
rt               3516 drivers/md/dm-raid.c 	struct raid_type *rt;
rt               3521 drivers/md/dm-raid.c 		rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
rt               3522 drivers/md/dm-raid.c 		if (!rt)
rt               3525 drivers/md/dm-raid.c 		DMEMIT("%s %d ", rt->name, mddev->raid_disks);
rt               7815 drivers/md/md.c 	sector_t rt, curr_mark_cnt, resync_mark_cnt;
rt               7927 drivers/md/md.c 	rt = max_sectors - resync;    /* number of remaining sectors */
rt               7928 drivers/md/md.c 	rt = div64_u64(rt, db/32+1);
rt               7929 drivers/md/md.c 	rt *= dt;
rt               7930 drivers/md/md.c 	rt >>= 5;
rt               7932 drivers/md/md.c 	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
rt               7933 drivers/md/md.c 		   ((unsigned long)rt % 60)/6);
rt                119 drivers/media/pci/tw686x/tw686x-audio.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                123 drivers/media/pci/tw686x/tw686x-audio.c 	rt->hw = tw686x_capture_hw;
rt                125 drivers/media/pci/tw686x/tw686x-audio.c 	err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
rt                145 drivers/media/pci/tw686x/tw686x-audio.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                156 drivers/media/pci/tw686x/tw686x-audio.c 	if (((dev->audio_rate != rt->rate) ||
rt                163 drivers/media/pci/tw686x/tw686x-audio.c 	if (dev->audio_rate != rt->rate) {
rt                166 drivers/media/pci/tw686x/tw686x-audio.c 		dev->audio_rate = rt->rate;
rt                167 drivers/media/pci/tw686x/tw686x-audio.c 		reg = ((125000000 / rt->rate) << 16) +
rt                168 drivers/media/pci/tw686x/tw686x-audio.c 		       ((125000000 % rt->rate) << 16) / rt->rate;
rt                184 drivers/media/pci/tw686x/tw686x-audio.c 	if (rt->periods < TW686X_AUDIO_PERIODS_MIN ||
rt                185 drivers/media/pci/tw686x/tw686x-audio.c 	    rt->periods > TW686X_AUDIO_PERIODS_MAX)
rt                191 drivers/media/pci/tw686x/tw686x-audio.c 	for (i = 0; i < rt->periods; i++) {
rt                192 drivers/media/pci/tw686x/tw686x-audio.c 		ac->buf[i].dma = rt->dma_addr + period_size * i;
rt                193 drivers/media/pci/tw686x/tw686x-audio.c 		ac->buf[i].virt = rt->dma_area + period_size * i;
rt                 50 drivers/media/platform/rockchip/rga/rga-hw.c 	struct rga_addr_offset *lt, *lb, *rt, *rb;
rt                 56 drivers/media/platform/rockchip/rga/rga-hw.c 	rt = &offsets.right_top;
rt                 74 drivers/media/platform/rockchip/rga/rga-hw.c 	rt->y_off = lt->y_off + (w - 1) * pixel_width;
rt                 75 drivers/media/platform/rockchip/rga/rga-hw.c 	rt->u_off = lt->u_off + w / x_div - 1;
rt                 76 drivers/media/platform/rockchip/rga/rga-hw.c 	rt->v_off = lt->v_off + w / x_div - 1;
rt                 70 drivers/media/radio/radio-aimslab.c 	struct rtrack *rt = kzalloc(sizeof(struct rtrack), GFP_KERNEL);
rt                 72 drivers/media/radio/radio-aimslab.c 	if (rt)
rt                 73 drivers/media/radio/radio-aimslab.c 		rt->curvol = 0xff;
rt                 74 drivers/media/radio/radio-aimslab.c 	return rt ? &rt->isa : NULL;
rt                 89 drivers/media/radio/radio-aimslab.c 	struct rtrack *rt = container_of(isa, struct rtrack, isa);
rt                 92 drivers/media/radio/radio-aimslab.c 	if (!v4l2_ctrl_g_ctrl(rt->isa.mute))
rt                102 drivers/media/radio/radio-aimslab.c 	outb_p(bits, rt->isa.io);
rt                120 drivers/media/radio/radio-aimslab.c 	struct rtrack *rt = container_of(isa, struct rtrack, isa);
rt                121 drivers/media/radio/radio-aimslab.c 	int curvol = rt->curvol;
rt                140 drivers/media/radio/radio-aimslab.c 	rt->curvol = vol;
rt                848 drivers/media/radio/si4713/si4713.c static int si4713_set_rds_radio_text(struct si4713_device *sdev, const char *rt)
rt                863 drivers/media/radio/si4713/si4713.c 	if (!strlen(rt))
rt                872 drivers/media/radio/si4713/si4713.c 				if (!rt[t_index + i] ||
rt                873 drivers/media/radio/si4713/si4713.c 				    rt[t_index + i] == RDS_CARRIAGE_RETURN) {
rt                874 drivers/media/radio/si4713/si4713.c 					rt = cr;
rt                883 drivers/media/radio/si4713/si4713.c 				compose_u16(rt[t_index], rt[t_index + 1]),
rt                884 drivers/media/radio/si4713/si4713.c 				compose_u16(rt[t_index + 2], rt[t_index + 3]),
rt                 55 drivers/net/appletalk/ipddp.c static int ipddp_delete(struct ipddp_route *rt);
rt                 56 drivers/net/appletalk/ipddp.c static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
rt                122 drivers/net/appletalk/ipddp.c         struct ipddp_route *rt;
rt                133 drivers/net/appletalk/ipddp.c         for(rt = ipddp_route_list; rt != NULL; rt = rt->next)
rt                135 drivers/net/appletalk/ipddp.c                 if(rt->ip == paddr)
rt                138 drivers/net/appletalk/ipddp.c         if(rt == NULL) {
rt                143 drivers/net/appletalk/ipddp.c         our_addr = atalk_find_dev_addr(rt->dev);
rt                162 drivers/net/appletalk/ipddp.c         if(rt->dev->type == ARPHRD_LOCALTLK)
rt                169 drivers/net/appletalk/ipddp.c                 ddp->deh_dnet  = rt->at.s_net;   /* FIXME more hops?? */
rt                172 drivers/net/appletalk/ipddp.c         ddp->deh_dnode = rt->at.s_node;
rt                184 drivers/net/appletalk/ipddp.c 	aarp_send_ddp(rt->dev, skb, &rt->at, NULL);
rt                197 drivers/net/appletalk/ipddp.c         struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
rt                199 drivers/net/appletalk/ipddp.c         if (rt == NULL)
rt                202 drivers/net/appletalk/ipddp.c         rt->ip = new_rt->ip;
rt                203 drivers/net/appletalk/ipddp.c         rt->at = new_rt->at;
rt                204 drivers/net/appletalk/ipddp.c         rt->next = NULL;
rt                205 drivers/net/appletalk/ipddp.c         if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) {
rt                206 drivers/net/appletalk/ipddp.c 		kfree(rt);
rt                211 drivers/net/appletalk/ipddp.c 	if (__ipddp_find_route(rt)) {
rt                213 drivers/net/appletalk/ipddp.c 		kfree(rt);
rt                217 drivers/net/appletalk/ipddp.c         rt->next = ipddp_route_list;
rt                218 drivers/net/appletalk/ipddp.c         ipddp_route_list = rt;
rt                229 drivers/net/appletalk/ipddp.c static int ipddp_delete(struct ipddp_route *rt)
rt                237 drivers/net/appletalk/ipddp.c                 if(tmp->ip == rt->ip &&
rt                238 drivers/net/appletalk/ipddp.c 		   tmp->at.s_net == rt->at.s_net &&
rt                239 drivers/net/appletalk/ipddp.c 		   tmp->at.s_node == rt->at.s_node)
rt                256 drivers/net/appletalk/ipddp.c static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
rt                262 drivers/net/appletalk/ipddp.c                 if(f->ip == rt->ip &&
rt                263 drivers/net/appletalk/ipddp.c 		   f->at.s_net == rt->at.s_net &&
rt                264 drivers/net/appletalk/ipddp.c 		   f->at.s_node == rt->at.s_node)
rt                273 drivers/net/appletalk/ipddp.c         struct ipddp_route __user *rt = ifr->ifr_data;
rt                279 drivers/net/appletalk/ipddp.c 	if(copy_from_user(&rcp, rt, sizeof(rcp)))
rt                299 drivers/net/appletalk/ipddp.c 				if (copy_to_user(rt, &rcp2,
rt               2464 drivers/net/bonding/bond_main.c 	struct rtable *rt;
rt               2475 drivers/net/bonding/bond_main.c 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
rt               2477 drivers/net/bonding/bond_main.c 		if (IS_ERR(rt)) {
rt               2491 drivers/net/bonding/bond_main.c 		if (rt->dst.dev == bond->dev)
rt               2495 drivers/net/bonding/bond_main.c 		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
rt               2503 drivers/net/bonding/bond_main.c 			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
rt               2505 drivers/net/bonding/bond_main.c 		ip_rt_put(rt);
rt               2509 drivers/net/bonding/bond_main.c 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
rt               2510 drivers/net/bonding/bond_main.c 		ip_rt_put(rt);
rt                232 drivers/net/ethernet/amd/declance.c #define lib_off(rt, type)						\
rt                233 drivers/net/ethernet/amd/declance.c 	shift_off(offsetof(struct lance_init_block, rt), type)
rt                235 drivers/net/ethernet/amd/declance.c #define lib_ptr(ib, rt, type) 						\
rt                236 drivers/net/ethernet/amd/declance.c 	((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
rt                238 drivers/net/ethernet/amd/declance.c #define rds_off(rt, type)						\
rt                239 drivers/net/ethernet/amd/declance.c 	shift_off(offsetof(struct lance_rx_desc, rt), type)
rt                241 drivers/net/ethernet/amd/declance.c #define rds_ptr(rd, rt, type) 						\
rt                242 drivers/net/ethernet/amd/declance.c 	((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
rt                244 drivers/net/ethernet/amd/declance.c #define tds_off(rt, type)						\
rt                245 drivers/net/ethernet/amd/declance.c 	shift_off(offsetof(struct lance_tx_desc, rt), type)
rt                247 drivers/net/ethernet/amd/declance.c #define tds_ptr(td, rt, type) 						\
rt                248 drivers/net/ethernet/amd/declance.c 	((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
rt                235 drivers/net/ethernet/amd/sunlance.c #define libdesc_offset(rt, elem) \
rt                236 drivers/net/ethernet/amd/sunlance.c ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
rt                238 drivers/net/ethernet/amd/sunlance.c #define libbuff_offset(rt, elem) \
rt                239 drivers/net/ethernet/amd/sunlance.c ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
rt                960 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct rtable *rt;
rt                967 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
rt                968 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	if (IS_ERR(rt)) {
rt                976 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	dst_dev = rt->dst.dev;
rt               1002 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
rt               1011 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
rt               1015 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	ip_rt_put(rt);
rt               1019 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	ip_rt_put(rt);
rt               3678 drivers/net/ethernet/broadcom/cnic.c 	struct rtable *rt;
rt               3680 drivers/net/ethernet/broadcom/cnic.c 	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
rt               3681 drivers/net/ethernet/broadcom/cnic.c 	if (!IS_ERR(rt)) {
rt               3682 drivers/net/ethernet/broadcom/cnic.c 		*dst = &rt->dst;
rt               3685 drivers/net/ethernet/broadcom/cnic.c 	return PTR_ERR(rt);
rt                 96 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	struct rtable *rt;
rt                100 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
rt                103 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	if (IS_ERR(rt))
rt                105 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	n = dst_neigh_lookup(&rt->dst, &peer_ip);
rt                111 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 		dst_release(&rt->dst);
rt                115 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 	return &rt->dst;
rt                288 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c static const char *resource_str(enum mlx4_resource rt)
rt                290 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	switch (rt) {
rt                 80 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	struct rtable *rt;
rt                 95 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
rt                 96 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ret = PTR_ERR_OR_ZERO(rt);
rt                100 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
rt                101 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		ip_rt_put(rt);
rt                108 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
rt                110 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		ip_rt_put(rt);
rt                115 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		*out_ttl = ip4_dst_hoplimit(&rt->dst);
rt                116 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
rt                117 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ip_rt_put(rt);
rt               1076 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 		enum mlx5_reformat_ctx_type rt;
rt               1079 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 			rt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
rt               1081 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 			rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
rt               1083 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 		ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
rt                393 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 				   enum mlx5_reformat_ctx_type rt,
rt                419 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
rt                915 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 				   enum mlx5_reformat_ctx_type rt,
rt                420 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct fib6_info *rt;
rt               2874 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
rt               2946 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
rt               4014 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
rt               4016 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
rt               4018 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				    &rt->fib6_nh->fib_nh_gw6))
rt               4078 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				 list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
rt               4084 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
rt               4104 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
rt               4106 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
rt               4924 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
rt               4930 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
rt               4936 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
rt               4940 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (rt->fib6_flags & RTF_CACHE)
rt               4946 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
rt               4958 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	mlxsw_sp_rt6->rt = rt;
rt               4959 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib6_info_hold(rt);
rt               4965 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_rt6_release(struct fib6_info *rt)
rt               4967 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib6_info_release(rt);
rt               4970 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_rt6_release(struct fib6_info *rt)
rt               4977 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
rt               4981 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
rt               4984 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	return !(rt->fib6_flags & RTF_ADDRCONF) &&
rt               4985 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		rt->fib6_nh->fib_nh_gw_family;
rt               4992 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				list)->rt;
rt               5005 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
rt               5010 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
rt               5012 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
rt               5014 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_metric < nrt->fib6_metric)
rt               5016 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_metric == nrt->fib6_metric &&
rt               5017 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		    mlxsw_sp_fib6_rt_can_mp(rt))
rt               5019 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_metric > nrt->fib6_metric)
rt               5028 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 			    const struct fib6_info *rt)
rt               5033 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (mlxsw_sp_rt6->rt == rt)
rt               5041 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					const struct fib6_info *rt,
rt               5044 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	return rt->fib6_nh->fib_nh_dev &&
rt               5045 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
rt               5051 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				       const struct fib6_info *rt)
rt               5055 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
rt               5096 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				  const struct fib6_info *rt)
rt               5098 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
rt               5101 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
rt               5102 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
rt               5111 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
rt               5123 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				    const struct fib6_info *rt)
rt               5125 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	return rt->fib6_nh->fib_nh_gw_family ||
rt               5126 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
rt               5149 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
rt               5152 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
rt               5155 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
rt               5323 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					 const struct fib6_info *rt)
rt               5331 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
rt               5333 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	else if (rt->fib6_type == RTN_BLACKHOLE)
rt               5335 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	else if (rt->fib6_flags & RTF_REJECT)
rt               5337 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
rt               5423 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
rt               5425 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
rt               5427 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
rt               5429 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (replace && rt->fib6_metric == nrt->fib6_metric) {
rt               5430 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 			if (mlxsw_sp_fib6_rt_can_mp(rt) ==
rt               5436 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_metric > nrt->fib6_metric)
rt               5463 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 			struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
rt               5465 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 			if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
rt               5518 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 			   const struct fib6_info *rt)
rt               5525 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
rt               5530 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
rt               5531 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					    sizeof(rt->fib6_dst.addr),
rt               5532 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					    rt->fib6_dst.plen);
rt               5539 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
rt               5540 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		    rt->fib6_metric == iter_rt->fib6_metric &&
rt               5541 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
rt               5571 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct fib6_info *rt = rt_arr[0];
rt               5577 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (rt->fib6_src.plen)
rt               5580 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
rt               5583 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
rt               5584 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					 &rt->fib6_dst.addr,
rt               5585 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					 sizeof(rt->fib6_dst.addr),
rt               5586 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 					 rt->fib6_dst.plen,
rt               5594 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
rt               5632 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct fib6_info *rt = rt_arr[0];
rt               5637 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
rt               5645 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
rt               5938 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct fib6_info *rt = fen6_info->rt;
rt               5953 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	rt_arr[0] = rt;
rt               5954 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	fib6_info_hold(rt);
rt               5959 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
rt               6281 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 			if (fen6_info->rt->nh) {
rt                300 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	struct rtable *rt = NULL;
rt                310 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	rt = ip_route_output_key(tun->net, &fl4);
rt                311 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	if (IS_ERR(rt))
rt                314 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	if (rt->rt_type != RTN_UNICAST)
rt                317 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	dev = rt->dst.dev;
rt                319 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	if (rt->rt_gw_family == AF_INET)
rt                320 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		*daddrp = rt->rt_gw4;
rt                322 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	else if (rt->rt_gw_family == AF_INET6)
rt                326 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	ip_rt_put(rt);
rt                436 drivers/net/ethernet/netronome/nfp/flower/action.c 		struct rtable *rt;
rt                445 drivers/net/ethernet/netronome/nfp/flower/action.c 		rt = ip_route_output_key(net, &flow);
rt                446 drivers/net/ethernet/netronome/nfp/flower/action.c 		err = PTR_ERR_OR_ZERO(rt);
rt                448 drivers/net/ethernet/netronome/nfp/flower/action.c 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
rt                449 drivers/net/ethernet/netronome/nfp/flower/action.c 			ip_rt_put(rt);
rt                334 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	struct rtable *rt;
rt                364 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	rt = ip_route_output_key(dev_net(n->dev), &flow);
rt                365 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	err = PTR_ERR_OR_ZERO(rt);
rt                369 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	ip_rt_put(rt);
rt                386 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	struct rtable *rt;
rt                401 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	rt = ip_route_output_key(dev_net(netdev), &flow);
rt                402 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	err = PTR_ERR_OR_ZERO(rt);
rt                410 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
rt                411 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	ip_rt_put(rt);
rt               2054 drivers/net/ethernet/sis/sis900.c 	int rt;
rt               2056 drivers/net/ethernet/sis/sis900.c 	rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
rt               2058 drivers/net/ethernet/sis/sis900.c 	return rt;
rt               8719 drivers/net/ethernet/sun/niu.c 			struct rdc_table *rt = &tp->tables[grp];
rt               8725 drivers/net/ethernet/sun/niu.c 				rt->rxdma_channel[slot] =
rt               8728 drivers/net/ethernet/sun/niu.c 				pr_cont("%d ", rt->rxdma_channel[slot]);
rt               1358 drivers/net/ethernet/sun/sunvnet_common.c 			struct rtable *rt = NULL;
rt               1366 drivers/net/ethernet/sun/sunvnet_common.c 			rt = ip_route_output_key(dev_net(dev), &fl4);
rt               1367 drivers/net/ethernet/sun/sunvnet_common.c 			if (!IS_ERR(rt)) {
rt               1368 drivers/net/ethernet/sun/sunvnet_common.c 				skb_dst_set(skb, &rt->dst);
rt                781 drivers/net/geneve.c 	struct rtable *rt = NULL;
rt                802 drivers/net/geneve.c 		rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
rt                803 drivers/net/geneve.c 		if (rt)
rt                804 drivers/net/geneve.c 			return rt;
rt                806 drivers/net/geneve.c 	rt = ip_route_output_key(geneve->net, fl4);
rt                807 drivers/net/geneve.c 	if (IS_ERR(rt)) {
rt                811 drivers/net/geneve.c 	if (rt->dst.dev == dev) { /* is this necessary? */
rt                813 drivers/net/geneve.c 		ip_rt_put(rt);
rt                817 drivers/net/geneve.c 		dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr);
rt                818 drivers/net/geneve.c 	return rt;
rt                881 drivers/net/geneve.c 	struct rtable *rt;
rt                888 drivers/net/geneve.c 	rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
rt                889 drivers/net/geneve.c 	if (IS_ERR(rt))
rt                890 drivers/net/geneve.c 		return PTR_ERR(rt);
rt                892 drivers/net/geneve.c 	skb_tunnel_check_pmtu(skb, &rt->dst,
rt                907 drivers/net/geneve.c 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
rt                925 drivers/net/geneve.c 	err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
rt                929 drivers/net/geneve.c 	udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
rt               1038 drivers/net/geneve.c 		struct rtable *rt;
rt               1042 drivers/net/geneve.c 		rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
rt               1043 drivers/net/geneve.c 		if (IS_ERR(rt))
rt               1044 drivers/net/geneve.c 			return PTR_ERR(rt);
rt               1046 drivers/net/geneve.c 		ip_rt_put(rt);
rt               1508 drivers/net/geneve.c 		struct rtable *rt = ip_route_output_key(geneve->net, &fl4);
rt               1510 drivers/net/geneve.c 		if (!IS_ERR(rt) && rt->dst.dev) {
rt               1511 drivers/net/geneve.c 			ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN;
rt               1512 drivers/net/geneve.c 			ip_rt_put(rt);
rt               1518 drivers/net/geneve.c 		struct rt6_info *rt;
rt               1523 drivers/net/geneve.c 		rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
rt               1526 drivers/net/geneve.c 		if (rt && rt->dst.dev)
rt               1527 drivers/net/geneve.c 			ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
rt               1528 drivers/net/geneve.c 		ip6_rt_put(rt);
rt                448 drivers/net/gtp.c 	struct rtable		*rt;
rt                470 drivers/net/gtp.c 					struct pdp_ctx *pctx, struct rtable *rt,
rt                477 drivers/net/gtp.c 	pktinfo->rt	= rt;
rt                487 drivers/net/gtp.c 	struct rtable *rt;
rt                509 drivers/net/gtp.c 	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
rt                510 drivers/net/gtp.c 	if (IS_ERR(rt)) {
rt                517 drivers/net/gtp.c 	if (rt->dst.dev == dev) {
rt                529 drivers/net/gtp.c 		mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
rt                540 drivers/net/gtp.c 		mtu = dst_mtu(&rt->dst);
rt                543 drivers/net/gtp.c 	rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
rt                554 drivers/net/gtp.c 	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
rt                559 drivers/net/gtp.c 	ip_rt_put(rt);
rt                595 drivers/net/gtp.c 		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
rt                598 drivers/net/gtp.c 				    ip4_dst_hoplimit(&pktinfo.rt->dst),
rt                420 drivers/net/ipvlan/ipvlan_core.c 	struct rtable *rt;
rt                431 drivers/net/ipvlan/ipvlan_core.c 	rt = ip_route_output_flow(net, &fl4, NULL);
rt                432 drivers/net/ipvlan/ipvlan_core.c 	if (IS_ERR(rt))
rt                435 drivers/net/ipvlan/ipvlan_core.c 	if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
rt                436 drivers/net/ipvlan/ipvlan_core.c 		ip_rt_put(rt);
rt                439 drivers/net/ipvlan/ipvlan_core.c 	skb_dst_set(skb, &rt->dst);
rt                146 drivers/net/ppp/pptp.c 	struct rtable *rt;
rt                154 drivers/net/ppp/pptp.c 	rt = ip_route_output_ports(net, &fl4, NULL,
rt                159 drivers/net/ppp/pptp.c 	if (IS_ERR(rt))
rt                162 drivers/net/ppp/pptp.c 	tdev = rt->dst.dev;
rt                169 drivers/net/ppp/pptp.c 			ip_rt_put(rt);
rt                227 drivers/net/ppp/pptp.c 	if (ip_dont_fragment(sk, &rt->dst))
rt                235 drivers/net/ppp/pptp.c 	iph->ttl      = ip4_dst_hoplimit(&rt->dst);
rt                239 drivers/net/ppp/pptp.c 	skb_dst_set(skb, &rt->dst);
rt                409 drivers/net/ppp/pptp.c 	struct rtable *rt;
rt                443 drivers/net/ppp/pptp.c 	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
rt                448 drivers/net/ppp/pptp.c 	if (IS_ERR(rt)) {
rt                452 drivers/net/ppp/pptp.c 	sk_setup_caps(sk, &rt->dst);
rt                454 drivers/net/ppp/pptp.c 	po->chan.mtu = dst_mtu(&rt->dst);
rt                250 drivers/net/vrf.c 	struct rtable *rt;
rt                267 drivers/net/vrf.c 	rt = ip_route_output_flow(net, &fl4, NULL);
rt                268 drivers/net/vrf.c 	if (IS_ERR(rt))
rt                277 drivers/net/vrf.c 	if (rt->dst.dev == vrf_dev)
rt                278 drivers/net/vrf.c 		return vrf_local_xmit(skb, vrf_dev, &rt->dst);
rt                280 drivers/net/vrf.c 	skb_dst_set(skb, &rt->dst);
rt                557 drivers/net/vrf.c 	struct rtable *rt = (struct rtable *)dst;
rt                584 drivers/net/vrf.c 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
rt               1097 drivers/net/vrf.c 	struct rt6_info *rt;
rt               1111 drivers/net/vrf.c 	rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
rt               1112 drivers/net/vrf.c 	if (rt)
rt               1113 drivers/net/vrf.c 		dst = &rt->dst;
rt               2226 drivers/net/vxlan.c 	struct rtable *rt = NULL;
rt               2235 drivers/net/vxlan.c 		rt = dst_cache_get_ip4(dst_cache, saddr);
rt               2236 drivers/net/vxlan.c 		if (rt)
rt               2237 drivers/net/vxlan.c 			return rt;
rt               2250 drivers/net/vxlan.c 	rt = ip_route_output_key(vxlan->net, &fl4);
rt               2251 drivers/net/vxlan.c 	if (!IS_ERR(rt)) {
rt               2252 drivers/net/vxlan.c 		if (rt->dst.dev == dev) {
rt               2254 drivers/net/vxlan.c 			ip_rt_put(rt);
rt               2260 drivers/net/vxlan.c 			dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
rt               2265 drivers/net/vxlan.c 	return rt;
rt               2508 drivers/net/vxlan.c 		struct rtable *rt;
rt               2514 drivers/net/vxlan.c 		rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
rt               2519 drivers/net/vxlan.c 		if (IS_ERR(rt)) {
rt               2520 drivers/net/vxlan.c 			err = PTR_ERR(rt);
rt               2528 drivers/net/vxlan.c 						    &rt->dst, rt->rt_flags);
rt               2546 drivers/net/vxlan.c 		ndst = &rt->dst;
rt               2550 drivers/net/vxlan.c 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
rt               2556 drivers/net/vxlan.c 		udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
rt               2931 drivers/net/vxlan.c 		struct rtable *rt;
rt               2933 drivers/net/vxlan.c 		rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
rt               2937 drivers/net/vxlan.c 		if (IS_ERR(rt))
rt               2938 drivers/net/vxlan.c 			return PTR_ERR(rt);
rt               2939 drivers/net/vxlan.c 		ip_rt_put(rt);
rt                172 drivers/net/wimax/i2400m/debugfs.c 	enum i2400m_reset_type rt = val;
rt                173 drivers/net/wimax/i2400m/debugfs.c 	switch(rt) {
rt                177 drivers/net/wimax/i2400m/debugfs.c 		result = i2400m_reset(i2400m, rt);
rt                815 drivers/net/wimax/i2400m/driver.c int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
rt                828 drivers/net/wimax/i2400m/driver.c 	return i2400m->bus_reset(i2400m, rt);
rt                261 drivers/net/wimax/i2400m/usb.c int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
rt                280 drivers/net/wimax/i2400m/usb.c 	d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
rt                281 drivers/net/wimax/i2400m/usb.c 	if (rt == I2400M_RT_WARM)
rt                286 drivers/net/wimax/i2400m/usb.c 	else if (rt == I2400M_RT_COLD)
rt                291 drivers/net/wimax/i2400m/usb.c 	else if (rt == I2400M_RT_BUS) {
rt                311 drivers/net/wimax/i2400m/usb.c 	    && rt != I2400M_RT_BUS) {
rt                320 drivers/net/wimax/i2400m/usb.c 			rt == I2400M_RT_WARM ? "warm" : "cold", result);
rt                324 drivers/net/wimax/i2400m/usb.c 	d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
rt                321 drivers/net/wireless/ath/dfs_pattern_detector.c 	const struct radar_types *rt;
rt                329 drivers/net/wireless/ath/dfs_pattern_detector.c 	rt = get_dfs_domain_radar_types(region);
rt                330 drivers/net/wireless/ath/dfs_pattern_detector.c 	if (rt == NULL)
rt                337 drivers/net/wireless/ath/dfs_pattern_detector.c 	dpd->radar_spec = rt->radar_types;
rt                338 drivers/net/wireless/ath/dfs_pattern_detector.c 	dpd->num_radar_types = rt->num_radar_types;
rt                151 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt                217 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt                585 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt               4034 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt               5196 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt               5667 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt               7137 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt               9003 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt2x00dev->chip.rt) {
rt               10194 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	u32 rt;
rt               10202 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt = rt2x00_get_field32(reg, MAC_CSR0_CHIPSET);
rt               10205 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	switch (rt) {
rt               10225 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			   rt, rev);
rt               10229 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	if (rt == RT5390 && rt2x00_is_soc(rt2x00dev))
rt               10230 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		rt = RT6352;
rt               10232 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_rt(rt2x00dev, rt, rev);
rt                143 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	u16 rt;
rt               1091 drivers/net/wireless/ralink/rt2x00/rt2x00.h 				   const u16 rt, const u16 rf, const u16 rev)
rt               1093 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	rt2x00dev->chip.rt = rt;
rt               1098 drivers/net/wireless/ralink/rt2x00/rt2x00.h 		    rt2x00dev->chip.rt, rt2x00dev->chip.rf,
rt               1103 drivers/net/wireless/ralink/rt2x00/rt2x00.h 				 const u16 rt, const u16 rev)
rt               1105 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	rt2x00dev->chip.rt = rt;
rt               1109 drivers/net/wireless/ralink/rt2x00/rt2x00.h 		    rt2x00dev->chip.rt, rt2x00dev->chip.rev);
rt               1120 drivers/net/wireless/ralink/rt2x00/rt2x00.h static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
rt               1122 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	return (rt2x00dev->chip.rt == rt);
rt               1136 drivers/net/wireless/ralink/rt2x00/rt2x00.h 				 const u16 rt, const u16 rev)
rt               1138 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev);
rt               1142 drivers/net/wireless/ralink/rt2x00/rt2x00.h 				    const u16 rt, const u16 rev)
rt               1144 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev);
rt               1148 drivers/net/wireless/ralink/rt2x00/rt2x00.h 				     const u16 rt, const u16 rev)
rt               1150 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev);
rt                167 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
rt                609 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
rt                126 drivers/net/wireless/ralink/rt2x00/rt2x00pci.c 	rt2x00dev->chip.rt = chip;
rt                214 drivers/pinctrl/pinctrl-st.c 	struct regmap_field *rt[ST_GPIO_PINS_PER_BANK];
rt                234 drivers/pinctrl/pinctrl-st.c 	} rt;
rt                244 drivers/pinctrl/pinctrl-st.c 	const int alt, oe, pu, od, rt;
rt                346 drivers/pinctrl/pinctrl-st.c 	.alt = 0, .oe = 40, .pu = 50, .od = 60, .rt = 100,
rt                359 drivers/pinctrl/pinctrl-st.c 	.rt = 100,
rt                520 drivers/pinctrl/pinctrl-st.c 	struct st_retime_packed *rt_p = &pc->rt.rt_p;
rt                560 drivers/pinctrl/pinctrl-st.c 	struct st_retime_dedicated *rt_d = &pc->rt.rt_d;
rt                571 drivers/pinctrl/pinctrl-st.c 	regmap_field_write(rt_d->rt[pin], retime_config);
rt                602 drivers/pinctrl/pinctrl-st.c 	struct st_retime_packed *rt_p = &pc->rt.rt_p;
rt                637 drivers/pinctrl/pinctrl-st.c 	struct st_retime_dedicated *rt_d = &pc->rt.rt_d;
rt                639 drivers/pinctrl/pinctrl-st.c 	regmap_field_read(rt_d->rt[pin], &value);
rt               1053 drivers/pinctrl/pinctrl-st.c 	int reg = (data->rt + bank * RT_P_CFGS_PER_BANK) * 4;
rt               1054 drivers/pinctrl/pinctrl-st.c 	struct st_retime_packed *rt_p = &pc->rt.rt_p;
rt               1089 drivers/pinctrl/pinctrl-st.c 	int reg_offset = (data->rt + bank * RT_D_CFGS_PER_BANK) * 4;
rt               1090 drivers/pinctrl/pinctrl-st.c 	struct st_retime_dedicated *rt_d = &pc->rt.rt_d;
rt               1097 drivers/pinctrl/pinctrl-st.c 			rt_d->rt[j] = devm_regmap_field_alloc(dev, rm, reg);
rt               1098 drivers/pinctrl/pinctrl-st.c 			if (IS_ERR(rt_d->rt[j]))
rt                743 drivers/platform/chrome/cros_ec_spi.c 	spi->rt = true;
rt                 88 drivers/platform/chrome/cros_usbpd_logger.c 	struct rtc_time rt;
rt                 95 drivers/platform/chrome/cros_usbpd_logger.c 	rt = rtc_ktime_to_tm(tstamp);
rt                163 drivers/platform/chrome/cros_usbpd_logger.c 		rt.tm_year + 1900, rt.tm_mon + 1, rt.tm_mday,
rt                164 drivers/platform/chrome/cros_usbpd_logger.c 		rt.tm_hour, rt.tm_min, rt.tm_sec, rem,
rt                274 drivers/power/supply/bq25890_charger.c 	struct bq25890_range  rt;
rt                278 drivers/power/supply/bq25890_charger.c 	[TBL_ICHG] =	{ .rt = {0,	  5056000, 64000} },	 /* uA */
rt                279 drivers/power/supply/bq25890_charger.c 	[TBL_ITERM] =	{ .rt = {64000,   1024000, 64000} },	 /* uA */
rt                280 drivers/power/supply/bq25890_charger.c 	[TBL_VREG] =	{ .rt = {3840000, 4608000, 16000} },	 /* uV */
rt                281 drivers/power/supply/bq25890_charger.c 	[TBL_BOOSTV] =	{ .rt = {4550000, 5510000, 64000} },	 /* uV */
rt                282 drivers/power/supply/bq25890_charger.c 	[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} },	 /* uV */
rt                319 drivers/power/supply/bq25890_charger.c 		const struct bq25890_range *rtbl = &bq25890_tables[id].rt;
rt                342 drivers/power/supply/bq25890_charger.c 	rtbl = &bq25890_tables[id].rt;
rt                187 drivers/s390/char/tape.h 	struct tape_class_device *	rt;
rt                 77 drivers/s390/char/tape_char.c 	device->rt = register_tape_dev(
rt                 91 drivers/s390/char/tape_char.c 	unregister_tape_dev(&device->cdev->dev, device->rt);
rt                 92 drivers/s390/char/tape_char.c 	device->rt = NULL;
rt                938 drivers/s390/net/qeth_core.h 	struct rt6_info *rt;
rt                940 drivers/s390/net/qeth_core.h 	rt = (struct rt6_info *) dst;
rt                942 drivers/s390/net/qeth_core.h 		dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
rt               1985 drivers/s390/net/qeth_l3_main.c 		struct rtable *rt = (struct rtable *) dst;
rt               1987 drivers/s390/net/qeth_l3_main.c 		*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
rt               1988 drivers/s390/net/qeth_l3_main.c 				rt_nexthop(rt, ip_hdr(skb)->daddr) :
rt               1991 drivers/s390/net/qeth_l3_main.c 		struct rt6_info *rt = (struct rt6_info *) dst;
rt               1993 drivers/s390/net/qeth_l3_main.c 		if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
rt               1994 drivers/s390/net/qeth_l3_main.c 			l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
rt                591 drivers/scsi/cxgbi/libcxgbi.c 	struct rtable *rt;
rt                593 drivers/scsi/cxgbi/libcxgbi.c 	rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
rt                595 drivers/scsi/cxgbi/libcxgbi.c 	if (IS_ERR(rt))
rt                598 drivers/scsi/cxgbi/libcxgbi.c 	return rt;
rt                608 drivers/scsi/cxgbi/libcxgbi.c 	struct rtable *rt = NULL;
rt                616 drivers/scsi/cxgbi/libcxgbi.c 	rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0,
rt                618 drivers/scsi/cxgbi/libcxgbi.c 	if (!rt) {
rt                625 drivers/scsi/cxgbi/libcxgbi.c 	dst = &rt->dst;
rt                633 drivers/scsi/cxgbi/libcxgbi.c 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
rt                696 drivers/scsi/cxgbi/libcxgbi.c 	ip_rt_put(rt);
rt                724 drivers/scsi/cxgbi/libcxgbi.c 	struct rt6_info *rt = NULL;
rt                732 drivers/scsi/cxgbi/libcxgbi.c 	rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex);
rt                734 drivers/scsi/cxgbi/libcxgbi.c 	if (!rt) {
rt                742 drivers/scsi/cxgbi/libcxgbi.c 	dst = &rt->dst;
rt                793 drivers/scsi/cxgbi/libcxgbi.c 	rt6_get_prefsrc(rt, &pref_saddr);
rt                795 drivers/scsi/cxgbi/libcxgbi.c 		struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
rt                820 drivers/scsi/cxgbi/libcxgbi.c 	ip6_rt_put(rt);
rt                913 drivers/slimbus/qcom-ngd-ctrl.c static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt)
rt                915 drivers/slimbus/qcom-ngd-ctrl.c 	struct slim_device *sdev = rt->dev;
rt                932 drivers/slimbus/qcom-ngd-ctrl.c 	for (i = 0; i < rt->num_ports; i++) {
rt                933 drivers/slimbus/qcom-ngd-ctrl.c 		struct slim_port *port = &rt->ports[i];
rt                936 drivers/slimbus/qcom-ngd-ctrl.c 			int seg_interval = SLIM_SLOTS_PER_SUPERFRAME/rt->ratem;
rt                940 drivers/slimbus/qcom-ngd-ctrl.c 			wbuf[txn.msg->num_bytes] = rt->bps >> 2 |
rt                949 drivers/slimbus/qcom-ngd-ctrl.c 			wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot;
rt                951 drivers/slimbus/qcom-ngd-ctrl.c 			if (rt->prot == SLIM_PROTO_ISO)
rt                420 drivers/slimbus/slimbus.h 	int		(*enable_stream)(struct slim_stream_runtime *rt);
rt                421 drivers/slimbus/slimbus.h 	int		(*disable_stream)(struct slim_stream_runtime *rt);
rt                102 drivers/slimbus/stream.c 	struct slim_stream_runtime *rt;
rt                104 drivers/slimbus/stream.c 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
rt                105 drivers/slimbus/stream.c 	if (!rt)
rt                108 drivers/slimbus/stream.c 	rt->name = kasprintf(GFP_KERNEL, "slim-%s", name);
rt                109 drivers/slimbus/stream.c 	if (!rt->name) {
rt                110 drivers/slimbus/stream.c 		kfree(rt);
rt                114 drivers/slimbus/stream.c 	rt->dev = dev;
rt                116 drivers/slimbus/stream.c 	list_add_tail(&rt->node, &dev->stream_list);
rt                119 drivers/slimbus/stream.c 	return rt;
rt                202 drivers/slimbus/stream.c int slim_stream_prepare(struct slim_stream_runtime *rt,
rt                205 drivers/slimbus/stream.c 	struct slim_controller *ctrl = rt->dev->ctrl;
rt                209 drivers/slimbus/stream.c 	if (rt->ports) {
rt                210 drivers/slimbus/stream.c 		dev_err(&rt->dev->dev, "Stream already Prepared\n");
rt                215 drivers/slimbus/stream.c 	rt->ports = kcalloc(num_ports, sizeof(*port), GFP_KERNEL);
rt                216 drivers/slimbus/stream.c 	if (!rt->ports)
rt                219 drivers/slimbus/stream.c 	rt->num_ports = num_ports;
rt                220 drivers/slimbus/stream.c 	rt->rate = cfg->rate;
rt                221 drivers/slimbus/stream.c 	rt->bps = cfg->bps;
rt                222 drivers/slimbus/stream.c 	rt->direction = cfg->direction;
rt                230 drivers/slimbus/stream.c 			rt->prot = SLIM_PROTO_PUSH;
rt                232 drivers/slimbus/stream.c 			rt->prot = SLIM_PROTO_PULL;
rt                234 drivers/slimbus/stream.c 		rt->prot = SLIM_PROTO_ISO;
rt                237 drivers/slimbus/stream.c 	rt->ratem = cfg->rate/ctrl->a_framer->superfreq;
rt                241 drivers/slimbus/stream.c 		port = &rt->ports[i];
rt                255 drivers/slimbus/stream.c 		slim_connect_port_channel(rt, port);
rt               2111 drivers/spi/spi-pl022.c 	pd->rt = of_property_read_bool(np, "pl022,rt");
rt               2173 drivers/spi/spi-pl022.c 	master->rt = platform_info->rt;
rt               1466 drivers/spi/spi.c 	if (ctlr->rt)
rt               3096 drivers/spi/spi.c 	if (spi->rt && !spi->controller->rt) {
rt               3097 drivers/spi/spi.c 		spi->controller->rt = true;
rt                445 drivers/staging/isdn/gigaset/ev-layer.c 	const struct resp_type_t *rt;
rt                467 drivers/staging/isdn/gigaset/ev-layer.c 	for (rt = resp_type; rt->response; ++rt) {
rt                468 drivers/staging/isdn/gigaset/ev-layer.c 		eoc = skip_prefix(cs->respdata, rt->response);
rt                472 drivers/staging/isdn/gigaset/ev-layer.c 	if (!rt->response) {
rt                495 drivers/staging/isdn/gigaset/ev-layer.c 	switch (rt->type) {
rt                501 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, cid, rt->resp_code, NULL, 0);
rt                511 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, 0, rt->resp_code, NULL, cid);
rt                517 drivers/staging/isdn/gigaset/ev-layer.c 			for (rt = resp_type; rt->response; ++rt) {
rt                518 drivers/staging/isdn/gigaset/ev-layer.c 				psep = skip_prefix(eoc, rt->response);
rt                524 drivers/staging/isdn/gigaset/ev-layer.c 			if (!psep || rt->type != RT_STRING) {
rt                544 drivers/staging/isdn/gigaset/ev-layer.c 			add_cid_event(cs, cid, rt->resp_code, ptr, 0);
rt                552 drivers/staging/isdn/gigaset/ev-layer.c 			add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
rt                565 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
rt                577 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, cid, rt->resp_code, ptr, 0);
rt                596 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
rt                609 drivers/staging/isdn/gigaset/ev-layer.c 		if (rt->resp_code == RSP_ZDLE)
rt                612 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
rt                619 drivers/staging/isdn/gigaset/ev-layer.c 		add_cid_event(cs, cid, rt->resp_code, NULL, -1);
rt               1425 drivers/staging/rtl8192u/r8192U_core.c 	int status, rt = -1;
rt               1570 drivers/staging/rtl8192u/r8192U_core.c 				rt = -ENOMEM;
rt               1598 drivers/staging/rtl8192u/r8192U_core.c 	return rt;
rt                245 drivers/usb/host/fhci-tds.c 	u8 rt;
rt                253 drivers/usb/host/fhci-tds.c 	rt = (BUS_MODE_BO_BE | BUS_MODE_GBL);
rt                256 drivers/usb/host/fhci-tds.c 		rt |= BUS_MODE_DTB;
rt                258 drivers/usb/host/fhci-tds.c 	out_8(&ep->ep_pram_ptr->rx_func_code, rt);
rt                259 drivers/usb/host/fhci-tds.c 	out_8(&ep->ep_pram_ptr->tx_func_code, rt);
rt                136 drivers/usb/usbip/usbip_common.c static void usbip_dump_request_type(__u8 rt)
rt                138 drivers/usb/usbip/usbip_common.c 	switch (rt & USB_RECIP_MASK) {
rt                 77 drivers/video/fbdev/sticore.h #define REGION_OFFSET_TO_PHYS( rt, hpa ) \
rt                 78 drivers/video/fbdev/sticore.h 	(((rt).region_desc.offset << 12) + (hpa))
rt                136 fs/hpfs/alloc.c 		goto rt;
rt                151 fs/hpfs/alloc.c 		goto rt;
rt                174 fs/hpfs/alloc.c 			goto rt;
rt                179 fs/hpfs/alloc.c 	rt:
rt               2871 fs/xfs/libxfs/xfs_bmap.c 	int		rt,		/* is this a realtime inode? */
rt               2986 fs/xfs/libxfs/xfs_bmap.c 	if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
rt               3049 fs/xfs/libxfs/xfs_bmap.c 	int		rt;		/* true if inode is realtime */
rt               3052 fs/xfs/libxfs/xfs_bmap.c 	(rt ? \
rt               3060 fs/xfs/libxfs/xfs_bmap.c 	rt = XFS_IS_REALTIME_INODE(ap->ip) &&
rt               3126 fs/xfs/libxfs/xfs_bmap.c 			if (!rt && !nullfb &&
rt               3168 fs/xfs/libxfs/xfs_bmap.c 			if (!rt && !nullfb &&
rt                870 fs/xfs/xfs_bmap_util.c 	int			rt;
rt                888 fs/xfs/xfs_bmap_util.c 	rt = XFS_IS_REALTIME_INODE(ip);
rt                931 fs/xfs/xfs_bmap_util.c 		if (unlikely(rt)) {
rt                 50 fs/xfs/xfs_bmap_util.h 			       int rt, int eof, int delay, int convert,
rt                182 fs/xfs/xfs_iomap.c 	int		rt;
rt                190 fs/xfs/xfs_iomap.c 	rt = XFS_IS_REALTIME_INODE(ip);
rt                221 fs/xfs/xfs_iomap.c 	if (unlikely(rt)) {
rt                 55 include/crypto/engine.h 	bool			rt;
rt                108 include/crypto/engine.h struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
rt                248 include/linux/amba/pl022.h 	bool rt;
rt                677 include/linux/sched.h 	struct sched_rt_entity		rt;
rt                147 include/linux/spi/spi.h 	bool			rt;
rt                560 include/linux/spi/spi.h 	bool				rt;
rt                 23 include/linux/sunrpc/timer.h extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
rt                 24 include/linux/sunrpc/timer.h extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
rt                 25 include/linux/sunrpc/timer.h extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
rt                 27 include/linux/sunrpc/timer.h static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo)
rt                 32 include/linux/sunrpc/timer.h 	t = &rt->ntimeouts[timer-1];
rt                 43 include/linux/sunrpc/timer.h static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer)
rt                 47 include/linux/sunrpc/timer.h 	return rt->ntimeouts[timer-1];
rt                122 include/math-emu/double.h #define FP_FROM_INT_D(X,r,rs,rt)	_FP_FROM_INT(D,2,X,r,rs,rt)
rt                197 include/math-emu/double.h #define FP_FROM_INT_D(X,r,rs,rt)	_FP_FROM_INT(D,1,X,r,rs,rt)
rt                128 include/math-emu/quad.h #define FP_FROM_INT_Q(X,r,rs,rt)	_FP_FROM_INT(Q,4,X,r,rs,rt)
rt                201 include/math-emu/quad.h #define FP_FROM_INT_Q(X,r,rs,rt)	_FP_FROM_INT(Q,2,X,r,rs,rt)
rt                111 include/math-emu/single.h #define FP_FROM_INT_S(X,r,rs,rt)	_FP_FROM_INT(S,1,X,r,rs,rt)
rt                 81 include/net/dn_route.h static inline bool dn_is_input_route(struct dn_route *rt)
rt                 83 include/net/dn_route.h 	return rt->fld.flowidn_iif != 0;
rt                 86 include/net/dn_route.h static inline bool dn_is_output_route(struct dn_route *rt)
rt                 88 include/net/dn_route.h 	return rt->fld.flowidn_iif == 0;
rt                 63 include/net/if_inet6.h 	struct fib6_info	*rt;
rt                215 include/net/ip.h 		   struct rtable **rt,
rt                406 include/net/ip.h 	const struct rtable *rt = (const struct rtable *)dst;
rt                408 include/net/ip.h 	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
rt                698 include/net/ip.h 		      __be32 daddr, struct rtable *rt, int is_frag);
rt                204 include/net/ip6_fib.h 	for (rt = rcu_dereference((fn)->leaf); rt;			\
rt                205 include/net/ip6_fib.h 	     rt = rcu_dereference(rt->fib6_next))
rt                208 include/net/ip6_fib.h 	for (rt = (w)->leaf; rt;					\
rt                209 include/net/ip6_fib.h 	     rt = rcu_dereference_protected(rt->fib6_next, 1))
rt                259 include/net/ip6_fib.h static inline u32 rt6_get_cookie(const struct rt6_info *rt)
rt                264 include/net/ip6_fib.h 	if (rt->sernum)
rt                265 include/net/ip6_fib.h 		return rt->sernum;
rt                269 include/net/ip6_fib.h 	from = rcu_dereference(rt->from);
rt                278 include/net/ip6_fib.h static inline void ip6_rt_put(struct rt6_info *rt)
rt                284 include/net/ip6_fib.h 	dst_release(&rt->dst);
rt                384 include/net/ip6_fib.h 	struct fib6_info *rt;
rt                427 include/net/ip6_fib.h int fib6_add(struct fib6_node *root, struct fib6_info *rt,
rt                429 include/net/ip6_fib.h int fib6_del(struct fib6_info *rt, struct nl_info *info);
rt                432 include/net/ip6_fib.h void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
rt                438 include/net/ip6_fib.h 	from = rcu_dereference(rt->from);
rt                457 include/net/ip6_fib.h 			      struct fib6_info *rt,
rt                461 include/net/ip6_fib.h 					struct fib6_info *rt,
rt                464 include/net/ip6_fib.h void fib6_rt_update(struct net *net, struct fib6_info *rt,
rt                466 include/net/ip6_fib.h void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
rt                497 include/net/ip6_fib.h void fib6_update_sernum(struct net *net, struct fib6_info *rt);
rt                498 include/net/ip6_fib.h void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
rt                104 include/net/ip6_route.h static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags)
rt                107 include/net/ip6_route.h 	    !list_empty(&rt->rt6i_uncached))
rt                108 include/net/ip6_route.h 		ip6_rt_put(rt);
rt                209 include/net/ip6_route.h void rt6_uncached_list_add(struct rt6_info *rt);
rt                210 include/net/ip6_route.h void rt6_uncached_list_del(struct rt6_info *rt);
rt                245 include/net/ip6_route.h 	struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
rt                247 include/net/ip6_route.h 	return rt->rt6i_flags & RTF_LOCAL;
rt                253 include/net/ip6_route.h 	struct rt6_info *rt = (struct rt6_info *)dst;
rt                255 include/net/ip6_route.h 	return rt->rt6i_flags & RTF_ANYCAST ||
rt                256 include/net/ip6_route.h 		(rt->rt6i_dst.plen < 127 &&
rt                257 include/net/ip6_route.h 		 !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
rt                258 include/net/ip6_route.h 		 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
rt                285 include/net/ip6_route.h static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
rt                288 include/net/ip6_route.h 	if (rt->rt6i_flags & RTF_GATEWAY)
rt                289 include/net/ip6_route.h 		return &rt->rt6i_gateway;
rt                290 include/net/ip6_route.h 	else if (unlikely(rt->rt6i_flags & RTF_CACHE))
rt                291 include/net/ip6_route.h 		return &rt->rt6i_dst.addr;
rt                414 include/net/ip_tunnels.h void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
rt                993 include/net/ipv6.h 		    struct rt6_info *rt, unsigned int flags);
rt               1009 include/net/ipv6.h 			     struct rt6_info *rt, unsigned int flags,
rt                 50 include/net/ipv6_stubs.h 	void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
rt                 51 include/net/ipv6_stubs.h 	int (*ip6_del_rt)(struct net *net, struct fib6_info *rt);
rt                 52 include/net/ipv6_stubs.h 	void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
rt                161 include/net/ipx.h static __inline__ void ipxrtr_hold(struct ipx_route *rt)
rt                163 include/net/ipx.h 	        refcount_inc(&rt->refcnt);
rt                166 include/net/ipx.h static __inline__ void ipxrtr_put(struct ipx_route *rt)
rt                168 include/net/ipx.h 	        if (refcount_dec_and_test(&rt->refcnt))
rt                169 include/net/ipx.h 			                kfree(rt);
rt                 75 include/net/route.h static inline bool rt_is_input_route(const struct rtable *rt)
rt                 77 include/net/route.h 	return rt->rt_is_input != 0;
rt                 80 include/net/route.h static inline bool rt_is_output_route(const struct rtable *rt)
rt                 82 include/net/route.h 	return rt->rt_is_input == 0;
rt                 85 include/net/route.h static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
rt                 87 include/net/route.h 	if (rt->rt_gw_family == AF_INET)
rt                 88 include/net/route.h 		return rt->rt_gw4;
rt                220 include/net/route.h int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
rt                221 include/net/route.h void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
rt                225 include/net/route.h struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
rt                232 include/net/route.h void rt_add_uncached_list(struct rtable *rt);
rt                233 include/net/route.h void rt_del_uncached_list(struct rtable *rt);
rt                239 include/net/route.h static inline void ip_rt_put(struct rtable *rt)
rt                245 include/net/route.h 	dst_release(&rt->dst);
rt                303 include/net/route.h 	struct rtable *rt;
rt                309 include/net/route.h 		rt = __ip_route_output_key(net, fl4);
rt                310 include/net/route.h 		if (IS_ERR(rt))
rt                311 include/net/route.h 			return rt;
rt                312 include/net/route.h 		ip_rt_put(rt);
rt                319 include/net/route.h static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt,
rt                327 include/net/route.h 		ip_rt_put(rt);
rt                334 include/net/route.h 	return rt;
rt                339 include/net/route.h 	struct rtable *rt = skb_rtable(skb);
rt                341 include/net/route.h 	if (rt && rt->rt_iif)
rt                342 include/net/route.h 		return rt->rt_iif;
rt                369 include/net/route.h static inline struct neighbour *ip_neigh_for_gw(struct rtable *rt,
rt                373 include/net/route.h 	struct net_device *dev = rt->dst.dev;
rt                376 include/net/route.h 	if (likely(rt->rt_gw_family == AF_INET)) {
rt                377 include/net/route.h 		neigh = ip_neigh_gw4(dev, rt->rt_gw4);
rt                378 include/net/route.h 	} else if (rt->rt_gw_family == AF_INET6) {
rt                379 include/net/route.h 		neigh = ip_neigh_gw6(dev, &rt->rt_gw6);
rt                141 include/net/udp_tunnel.h void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
rt                268 include/net/x25.h static __inline__ void x25_route_hold(struct x25_route *rt)
rt                270 include/net/x25.h 	refcount_inc(&rt->refcnt);
rt                273 include/net/x25.h static __inline__ void x25_route_put(struct x25_route *rt)
rt                275 include/net/x25.h 	if (refcount_dec_and_test(&rt->refcnt))
rt                276 include/net/x25.h 		kfree(rt);
rt                929 include/net/xfrm.h 		struct rtable		rt;
rt                866 include/uapi/linux/cdrom.h 	__u8 rt				: 1;
rt                883 include/uapi/linux/cdrom.h 	__u8 rt				: 1;
rt                 84 init/init_task.c 	.rt		= {
rt                 85 init/init_task.c 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
rt                674 kernel/sched/core.c 	if (rq->rt.rr_nr_running) {
rt                675 kernel/sched/core.c 		if (rq->rt.rr_nr_running == 1)
rt                685 kernel/sched/core.c 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
rt               2704 kernel/sched/core.c 	INIT_LIST_HEAD(&p->rt.run_list);
rt               2705 kernel/sched/core.c 	p->rt.timeout		= 0;
rt               2706 kernel/sched/core.c 	p->rt.time_slice	= sched_rr_timeslice;
rt               2707 kernel/sched/core.c 	p->rt.on_rq		= 0;
rt               2708 kernel/sched/core.c 	p->rt.on_list		= 0;
rt               4466 kernel/sched/core.c 			p->rt.timeout = 0;
rt               6628 kernel/sched/core.c 		init_rt_rq(&rq->rt);
rt               6657 kernel/sched/core.c 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
rt               6659 kernel/sched/core.c 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
rt                214 kernel/sched/cpufreq_schedutil.c 	    type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
rt               1268 kernel/sched/deadline.c 		struct rt_rq *rt_rq = &rq->rt;
rt               8645 kernel/sched/fair.c 		enum fbq_type rt;
rt               8648 kernel/sched/fair.c 		rt = fbq_classify_rq(rq);
rt               8669 kernel/sched/fair.c 		if (rt > env->fbq_type)
rt                118 kernel/sched/rt.c 	return container_of(rt_se, struct task_struct, rt);
rt                174 kernel/sched/rt.c 		rt_se->rt_rq = &rq->rt;
rt                229 kernel/sched/rt.c 	return container_of(rt_se, struct task_struct, rt);
rt                234 kernel/sched/rt.c 	return container_of(rt_rq, struct rq, rt);
rt                248 kernel/sched/rt.c 	return &rq->rt;
rt                266 kernel/sched/rt.c 	return rq->rt.highest_prio.curr > prev->prio;
rt                324 kernel/sched/rt.c 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt                341 kernel/sched/rt.c 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt                352 kernel/sched/rt.c 	return !plist_head_empty(&rq->rt.pushable_tasks);
rt                376 kernel/sched/rt.c 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
rt                378 kernel/sched/rt.c 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
rt                381 kernel/sched/rt.c 	if (p->prio < rq->rt.highest_prio.next)
rt                382 kernel/sched/rt.c 		rq->rt.highest_prio.next = p->prio;
rt                387 kernel/sched/rt.c 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
rt                391 kernel/sched/rt.c 		p = plist_first_entry(&rq->rt.pushable_tasks,
rt                393 kernel/sched/rt.c 		rq->rt.highest_prio.next = p->prio;
rt                395 kernel/sched/rt.c 		rq->rt.highest_prio.next = MAX_RT_PRIO;
rt                578 kernel/sched/rt.c 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
rt                617 kernel/sched/rt.c 	return &cpu_rq(cpu)->rt;
rt                958 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &curr->rt;
rt               1000 kernel/sched/rt.c 	BUG_ON(&rq->rt != rt_rq);
rt               1017 kernel/sched/rt.c 	BUG_ON(&rq->rt != rt_rq);
rt               1045 kernel/sched/rt.c 	if (&rq->rt != rt_rq)
rt               1061 kernel/sched/rt.c 	if (&rq->rt != rt_rq)
rt               1307 kernel/sched/rt.c 	enqueue_top_rt_rq(&rq->rt);
rt               1322 kernel/sched/rt.c 	enqueue_top_rt_rq(&rq->rt);
rt               1331 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt               1344 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt               1372 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt               1436 kernel/sched/rt.c 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
rt               1474 kernel/sched/rt.c 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
rt               1559 kernel/sched/rt.c 	struct rt_rq *rt_rq  = &rq->rt;
rt               1595 kernel/sched/rt.c 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
rt               1619 kernel/sched/rt.c 	struct plist_head *head = &rq->rt.pushable_tasks;
rt               1725 kernel/sched/rt.c 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
rt               1756 kernel/sched/rt.c 		if (lowest_rq->rt.highest_prio.curr > task->prio)
rt               1774 kernel/sched/rt.c 	p = plist_first_entry(&rq->rt.pushable_tasks,
rt               1798 kernel/sched/rt.c 	if (!rq->rt.overloaded)
rt               2091 kernel/sched/rt.c 		if (src_rq->rt.highest_prio.next >=
rt               2092 kernel/sched/rt.c 		    this_rq->rt.highest_prio.curr)
rt               2112 kernel/sched/rt.c 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
rt               2165 kernel/sched/rt.c 	if (rq->rt.overloaded)
rt               2170 kernel/sched/rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
rt               2176 kernel/sched/rt.c 	if (rq->rt.overloaded)
rt               2197 kernel/sched/rt.c 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
rt               2230 kernel/sched/rt.c 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
rt               2261 kernel/sched/rt.c 		if (p->prio > rq->rt.highest_prio.curr)
rt               2291 kernel/sched/rt.c 		if (p->rt.watchdog_stamp != jiffies) {
rt               2292 kernel/sched/rt.c 			p->rt.timeout++;
rt               2293 kernel/sched/rt.c 			p->rt.watchdog_stamp = jiffies;
rt               2297 kernel/sched/rt.c 		if (p->rt.timeout > next) {
rt               2317 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
rt               2331 kernel/sched/rt.c 	if (--p->rt.time_slice)
rt               2334 kernel/sched/rt.c 	p->rt.time_slice = sched_rr_timeslice;
rt               2619 kernel/sched/rt.c 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
rt                888 kernel/sched/sched.h 	struct rt_rq		rt;
rt               1515 kernel/sched/sched.h 	p->rt.rt_rq  = tg->rt_rq[cpu];
rt               1516 kernel/sched/sched.h 	p->rt.parent = tg->rt_se[cpu];
rt               1822 kernel/sched/sched.h 	return rq->rt.rt_queued > 0;
rt                807 kernel/time/posix-cpu-timers.c static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
rt                814 kernel/time/posix-cpu-timers.c 			rt ? "RT" : "CPU", hard ? "hard" : "soft",
rt                848 kernel/time/posix-cpu-timers.c 		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
rt                122 net/appletalk/atalk_proc.c 	struct atalk_route *rt;
rt                130 net/appletalk/atalk_proc.c 		rt = &atrtr_default;
rt                132 net/appletalk/atalk_proc.c 			       ntohs(rt->gateway.s_net), rt->gateway.s_node,
rt                133 net/appletalk/atalk_proc.c 			       rt->flags, rt->dev->name);
rt                136 net/appletalk/atalk_proc.c 	rt = v;
rt                138 net/appletalk/atalk_proc.c 		   ntohs(rt->target.s_net), rt->target.s_node,
rt                139 net/appletalk/atalk_proc.c 		   ntohs(rt->gateway.s_net), rt->gateway.s_node,
rt                140 net/appletalk/atalk_proc.c 		   rt->flags, rt->dev->name);
rt                499 net/appletalk/ddp.c 	struct atalk_route *rt;
rt                515 net/appletalk/ddp.c 	for (rt = atalk_routes; rt; rt = rt->next) {
rt                516 net/appletalk/ddp.c 		if (r->rt_flags != rt->flags)
rt                519 net/appletalk/ddp.c 		if (ta->sat_addr.s_net == rt->target.s_net) {
rt                520 net/appletalk/ddp.c 			if (!(rt->flags & RTF_HOST))
rt                522 net/appletalk/ddp.c 			if (ta->sat_addr.s_node == rt->target.s_node)
rt                552 net/appletalk/ddp.c 	if (!rt) {
rt                553 net/appletalk/ddp.c 		rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
rt                556 net/appletalk/ddp.c 		if (!rt)
rt                559 net/appletalk/ddp.c 		rt->next = atalk_routes;
rt                560 net/appletalk/ddp.c 		atalk_routes = rt;
rt                564 net/appletalk/ddp.c 	rt->target  = ta->sat_addr;
rt                566 net/appletalk/ddp.c 	rt->dev     = devhint;
rt                567 net/appletalk/ddp.c 	rt->flags   = r->rt_flags;
rt                568 net/appletalk/ddp.c 	rt->gateway = ga->sat_addr;
rt                873 net/appletalk/ddp.c 	struct rtentry rt;
rt                875 net/appletalk/ddp.c 	if (copy_from_user(&rt, arg, sizeof(rt)))
rt                880 net/appletalk/ddp.c 		if (rt.rt_dst.sa_family != AF_APPLETALK)
rt                883 net/appletalk/ddp.c 				      &rt.rt_dst)->sat_addr);
rt                887 net/appletalk/ddp.c 		if (rt.rt_dev) {
rt                889 net/appletalk/ddp.c 			if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
rt                896 net/appletalk/ddp.c 		return atrtr_create(&rt, dev);
rt               1315 net/appletalk/ddp.c 	struct atalk_route *rt;
rt               1339 net/appletalk/ddp.c 	rt = atrtr_find(&ta);
rt               1342 net/appletalk/ddp.c 	if (!rt || !(len_hops & (15 << 10)))
rt               1352 net/appletalk/ddp.c 	if (rt->flags & RTF_GATEWAY) {
rt               1353 net/appletalk/ddp.c 		ta.s_net  = rt->gateway.s_net;
rt               1354 net/appletalk/ddp.c 		ta.s_node = rt->gateway.s_node;
rt               1359 net/appletalk/ddp.c 			    (rt->dev->hard_header_len +
rt               1391 net/appletalk/ddp.c 	if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP)
rt               1572 net/appletalk/ddp.c 	struct atalk_route *rt;
rt               1617 net/appletalk/ddp.c 		rt = atrtr_find(&usat->sat_addr);
rt               1624 net/appletalk/ddp.c 		rt = atrtr_find(&at_hint);
rt               1627 net/appletalk/ddp.c 	if (!rt)
rt               1630 net/appletalk/ddp.c 	dev = rt->dev;
rt               1676 net/appletalk/ddp.c 	    !(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) {
rt               1699 net/appletalk/ddp.c 			rt = atrtr_find(&at_lo);
rt               1700 net/appletalk/ddp.c 			if (!rt) {
rt               1705 net/appletalk/ddp.c 			dev = rt->dev;
rt               1711 net/appletalk/ddp.c 		if (rt->flags & RTF_GATEWAY) {
rt               1712 net/appletalk/ddp.c 		    gsat.sat_addr = rt->gateway;
rt                336 net/atm/clip.c 	struct rtable *rt;
rt                348 net/atm/clip.c 	rt = (struct rtable *) dst;
rt                349 net/atm/clip.c 	if (rt->rt_gw_family == AF_INET)
rt                350 net/atm/clip.c 		daddr = &rt->rt_gw4;
rt                450 net/atm/clip.c 	struct rtable *rt;
rt                466 net/atm/clip.c 	rt = ip_route_output(&init_net, ip, 0, 1, 0);
rt                467 net/atm/clip.c 	if (IS_ERR(rt))
rt                468 net/atm/clip.c 		return PTR_ERR(rt);
rt                469 net/atm/clip.c 	neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
rt                470 net/atm/clip.c 	ip_rt_put(rt);
rt                163 net/bluetooth/6lowpan.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
rt                169 net/bluetooth/6lowpan.c 	BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
rt                171 net/bluetooth/6lowpan.c 	if (!rt) {
rt                183 net/bluetooth/6lowpan.c 		nexthop = rt6_nexthop(rt, daddr);
rt                357 net/bridge/br_netfilter_hooks.c 	struct rtable *rt;
rt                381 net/bridge/br_netfilter_hooks.c 			rt = ip_route_output(net, iph->daddr, 0,
rt                383 net/bridge/br_netfilter_hooks.c 			if (!IS_ERR(rt)) {
rt                386 net/bridge/br_netfilter_hooks.c 				if (rt->dst.dev == dev) {
rt                387 net/bridge/br_netfilter_hooks.c 					skb_dst_set(skb, &rt->dst);
rt                390 net/bridge/br_netfilter_hooks.c 				ip_rt_put(rt);
rt                411 net/bridge/br_netfilter_hooks.c 		rt = bridge_parent_rtable(nf_bridge->physindev);
rt                412 net/bridge/br_netfilter_hooks.c 		if (!rt) {
rt                416 net/bridge/br_netfilter_hooks.c 		skb_dst_set_noref(skb, &rt->dst);
rt                163 net/bridge/br_netfilter_ipv6.c 	struct rtable *rt;
rt                195 net/bridge/br_netfilter_ipv6.c 		rt = bridge_parent_rtable(nf_bridge->physindev);
rt                196 net/bridge/br_netfilter_ipv6.c 		if (!rt) {
rt                200 net/bridge/br_netfilter_ipv6.c 		skb_dst_set_noref(skb, &rt->dst);
rt                 74 net/bridge/br_nf_core.c 	struct rtable *rt = &br->fake_rtable;
rt                 76 net/bridge/br_nf_core.c 	atomic_set(&rt->dst.__refcnt, 1);
rt                 77 net/bridge/br_nf_core.c 	rt->dst.dev = br->dev;
rt                 78 net/bridge/br_nf_core.c 	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
rt                 79 net/bridge/br_nf_core.c 	rt->dst.flags	= DST_NOXFRM | DST_FAKE_RTABLE;
rt                 80 net/bridge/br_nf_core.c 	rt->dst.ops = &fake_dst_ops;
rt                204 net/core/lwt_bpf.c 		struct rtable *rt;
rt                215 net/core/lwt_bpf.c 		rt = ip_route_output_key(net, &fl4);
rt                216 net/core/lwt_bpf.c 		if (IS_ERR(rt)) {
rt                217 net/core/lwt_bpf.c 			err = PTR_ERR(rt);
rt                220 net/core/lwt_bpf.c 		dst = &rt->dst;
rt                 46 net/dccp/ipv4.c 	struct rtable *rt;
rt                 71 net/dccp/ipv4.c 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
rt                 75 net/dccp/ipv4.c 	if (IS_ERR(rt))
rt                 76 net/dccp/ipv4.c 		return PTR_ERR(rt);
rt                 78 net/dccp/ipv4.c 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
rt                 79 net/dccp/ipv4.c 		ip_rt_put(rt);
rt                106 net/dccp/ipv4.c 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
rt                108 net/dccp/ipv4.c 	if (IS_ERR(rt)) {
rt                109 net/dccp/ipv4.c 		err = PTR_ERR(rt);
rt                110 net/dccp/ipv4.c 		rt = NULL;
rt                114 net/dccp/ipv4.c 	sk_setup_caps(sk, &rt->dst);
rt                123 net/dccp/ipv4.c 	rt = NULL;
rt                133 net/dccp/ipv4.c 	ip_rt_put(rt);
rt                455 net/dccp/ipv4.c 	struct rtable *rt;
rt                468 net/dccp/ipv4.c 	rt = ip_route_output_flow(net, &fl4, sk);
rt                469 net/dccp/ipv4.c 	if (IS_ERR(rt)) {
rt                474 net/dccp/ipv4.c 	return &rt->dst;
rt                176 net/decnet/dn_neigh.c 	struct dn_route *rt = (struct dn_route *)dst;
rt                182 net/decnet/dn_neigh.c 	dn_dn2eth(mac_addr, rt->rt_local_src);
rt                201 net/decnet/dn_neigh.c 	struct dn_route *rt = (struct dn_route *)dst;
rt                202 net/decnet/dn_neigh.c 	struct neighbour *neigh = rt->n;
rt                341 net/decnet/dn_neigh.c 	struct dn_route *rt = (struct dn_route *) dst;
rt                342 net/decnet/dn_neigh.c 	struct neighbour *neigh = rt->n;
rt                149 net/decnet/dn_route.c 	struct dn_route *rt = (struct dn_route *) dst;
rt                151 net/decnet/dn_route.c 	if (rt->n)
rt                152 net/decnet/dn_route.c 		neigh_release(rt->n);
rt                159 net/decnet/dn_route.c 		struct dn_route *rt = (struct dn_route *) dst;
rt                160 net/decnet/dn_route.c 		struct neighbour *n = rt->n;
rt                182 net/decnet/dn_route.c 	struct dn_route *rt;
rt                191 net/decnet/dn_route.c 		while ((rt = rcu_dereference_protected(*rtp,
rt                193 net/decnet/dn_route.c 			if (atomic_read(&rt->dst.__refcnt) > 1 ||
rt                194 net/decnet/dn_route.c 			    (now - rt->dst.lastuse) < expire) {
rt                195 net/decnet/dn_route.c 				rtp = &rt->dn_next;
rt                198 net/decnet/dn_route.c 			*rtp = rt->dn_next;
rt                199 net/decnet/dn_route.c 			rt->dn_next = NULL;
rt                200 net/decnet/dn_route.c 			dst_dev_put(&rt->dst);
rt                201 net/decnet/dn_route.c 			dst_release(&rt->dst);
rt                214 net/decnet/dn_route.c 	struct dn_route *rt;
rt                225 net/decnet/dn_route.c 		while ((rt = rcu_dereference_protected(*rtp,
rt                227 net/decnet/dn_route.c 			if (atomic_read(&rt->dst.__refcnt) > 1 ||
rt                228 net/decnet/dn_route.c 			    (now - rt->dst.lastuse) < expire) {
rt                229 net/decnet/dn_route.c 				rtp = &rt->dn_next;
rt                232 net/decnet/dn_route.c 			*rtp = rt->dn_next;
rt                233 net/decnet/dn_route.c 			rt->dn_next = NULL;
rt                234 net/decnet/dn_route.c 			dst_dev_put(&rt->dst);
rt                235 net/decnet/dn_route.c 			dst_release(&rt->dst);
rt                258 net/decnet/dn_route.c 	struct dn_route *rt = (struct dn_route *) dst;
rt                259 net/decnet/dn_route.c 	struct neighbour *n = rt->n;
rt                317 net/decnet/dn_route.c static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
rt                328 net/decnet/dn_route.c 		if (compare_keys(&rth->fld, &rt->fld)) {
rt                338 net/decnet/dn_route.c 			dst_release_immediate(&rt->dst);
rt                345 net/decnet/dn_route.c 	rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain);
rt                346 net/decnet/dn_route.c 	rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
rt                348 net/decnet/dn_route.c 	dst_hold_and_use(&rt->dst, now);
rt                350 net/decnet/dn_route.c 	*rp = rt;
rt                357 net/decnet/dn_route.c 	struct dn_route *rt, *next;
rt                362 net/decnet/dn_route.c 		if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
rt                365 net/decnet/dn_route.c 		for(; rt; rt = next) {
rt                366 net/decnet/dn_route.c 			next = rcu_dereference_raw(rt->dn_next);
rt                367 net/decnet/dn_route.c 			RCU_INIT_POINTER(rt->dn_next, NULL);
rt                368 net/decnet/dn_route.c 			dst_dev_put(&rt->dst);
rt                369 net/decnet/dn_route.c 			dst_release(&rt->dst);
rt                736 net/decnet/dn_route.c 	struct dn_route *rt = (struct dn_route *)dst;
rt                742 net/decnet/dn_route.c 	if (rt->n == NULL)
rt                747 net/decnet/dn_route.c 	cb->src = rt->rt_saddr;
rt                748 net/decnet/dn_route.c 	cb->dst = rt->rt_daddr;
rt                776 net/decnet/dn_route.c 	struct dn_route *rt;
rt                784 net/decnet/dn_route.c 	rt = (struct dn_route *)skb_dst(skb);
rt                786 net/decnet/dn_route.c 	if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
rt                795 net/decnet/dn_route.c 	skb->dev = rt->dst.dev;
rt                803 net/decnet/dn_route.c 	if (rt->rt_flags & RTCF_DOREDIRECT)
rt                862 net/decnet/dn_route.c static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
rt                865 net/decnet/dn_route.c 	struct net_device *dev = rt->dst.dev;
rt                872 net/decnet/dn_route.c 			rt->rt_gateway = DN_FIB_RES_GW(*res);
rt                873 net/decnet/dn_route.c 		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
rt                875 net/decnet/dn_route.c 	rt->rt_type = res->type;
rt                877 net/decnet/dn_route.c 	if (dev != NULL && rt->n == NULL) {
rt                878 net/decnet/dn_route.c 		n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
rt                881 net/decnet/dn_route.c 		rt->n = n;
rt                884 net/decnet/dn_route.c 	if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
rt                885 net/decnet/dn_route.c 		dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
rt                886 net/decnet/dn_route.c 	mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
rt                888 net/decnet/dn_route.c 		unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
rt                890 net/decnet/dn_route.c 			dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
rt                957 net/decnet/dn_route.c 	struct dn_route *rt = NULL;
rt               1176 net/decnet/dn_route.c 	rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
rt               1177 net/decnet/dn_route.c 	if (rt == NULL)
rt               1180 net/decnet/dn_route.c 	rt->dn_next = NULL;
rt               1181 net/decnet/dn_route.c 	memset(&rt->fld, 0, sizeof(rt->fld));
rt               1182 net/decnet/dn_route.c 	rt->fld.saddr        = oldflp->saddr;
rt               1183 net/decnet/dn_route.c 	rt->fld.daddr        = oldflp->daddr;
rt               1184 net/decnet/dn_route.c 	rt->fld.flowidn_oif  = oldflp->flowidn_oif;
rt               1185 net/decnet/dn_route.c 	rt->fld.flowidn_iif  = 0;
rt               1186 net/decnet/dn_route.c 	rt->fld.flowidn_mark = oldflp->flowidn_mark;
rt               1188 net/decnet/dn_route.c 	rt->rt_saddr      = fld.saddr;
rt               1189 net/decnet/dn_route.c 	rt->rt_daddr      = fld.daddr;
rt               1190 net/decnet/dn_route.c 	rt->rt_gateway    = gateway ? gateway : fld.daddr;
rt               1191 net/decnet/dn_route.c 	rt->rt_local_src  = fld.saddr;
rt               1193 net/decnet/dn_route.c 	rt->rt_dst_map    = fld.daddr;
rt               1194 net/decnet/dn_route.c 	rt->rt_src_map    = fld.saddr;
rt               1196 net/decnet/dn_route.c 	rt->n = neigh;
rt               1199 net/decnet/dn_route.c 	rt->dst.lastuse = jiffies;
rt               1200 net/decnet/dn_route.c 	rt->dst.output  = dn_output;
rt               1201 net/decnet/dn_route.c 	rt->dst.input   = dn_rt_bug;
rt               1202 net/decnet/dn_route.c 	rt->rt_flags      = flags;
rt               1204 net/decnet/dn_route.c 		rt->dst.input = dn_nsp_rx;
rt               1206 net/decnet/dn_route.c 	err = dn_rt_set_next_hop(rt, &res);
rt               1210 net/decnet/dn_route.c 	hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
rt               1212 net/decnet/dn_route.c 	dn_insert_route(rt, hash, (struct dn_route **)pprt);
rt               1234 net/decnet/dn_route.c 	dst_release_immediate(&rt->dst);
rt               1245 net/decnet/dn_route.c 	struct dn_route *rt = NULL;
rt               1249 net/decnet/dn_route.c 		for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
rt               1250 net/decnet/dn_route.c 			rt = rcu_dereference_bh(rt->dn_next)) {
rt               1251 net/decnet/dn_route.c 			if ((flp->daddr == rt->fld.daddr) &&
rt               1252 net/decnet/dn_route.c 			    (flp->saddr == rt->fld.saddr) &&
rt               1253 net/decnet/dn_route.c 			    (flp->flowidn_mark == rt->fld.flowidn_mark) &&
rt               1254 net/decnet/dn_route.c 			    dn_is_output_route(rt) &&
rt               1255 net/decnet/dn_route.c 			    (rt->fld.flowidn_oif == flp->flowidn_oif)) {
rt               1256 net/decnet/dn_route.c 				dst_hold_and_use(&rt->dst, jiffies);
rt               1258 net/decnet/dn_route.c 				*pprt = &rt->dst;
rt               1302 net/decnet/dn_route.c 	struct dn_route *rt = NULL;
rt               1442 net/decnet/dn_route.c 	rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, DST_HOST);
rt               1443 net/decnet/dn_route.c 	if (rt == NULL)
rt               1446 net/decnet/dn_route.c 	rt->dn_next = NULL;
rt               1447 net/decnet/dn_route.c 	memset(&rt->fld, 0, sizeof(rt->fld));
rt               1448 net/decnet/dn_route.c 	rt->rt_saddr      = fld.saddr;
rt               1449 net/decnet/dn_route.c 	rt->rt_daddr      = fld.daddr;
rt               1450 net/decnet/dn_route.c 	rt->rt_gateway    = fld.daddr;
rt               1452 net/decnet/dn_route.c 		rt->rt_gateway = gateway;
rt               1453 net/decnet/dn_route.c 	rt->rt_local_src  = local_src ? local_src : rt->rt_saddr;
rt               1455 net/decnet/dn_route.c 	rt->rt_dst_map    = fld.daddr;
rt               1456 net/decnet/dn_route.c 	rt->rt_src_map    = fld.saddr;
rt               1458 net/decnet/dn_route.c 	rt->fld.saddr        = cb->src;
rt               1459 net/decnet/dn_route.c 	rt->fld.daddr        = cb->dst;
rt               1460 net/decnet/dn_route.c 	rt->fld.flowidn_oif  = 0;
rt               1461 net/decnet/dn_route.c 	rt->fld.flowidn_iif  = in_dev->ifindex;
rt               1462 net/decnet/dn_route.c 	rt->fld.flowidn_mark = fld.flowidn_mark;
rt               1464 net/decnet/dn_route.c 	rt->n = neigh;
rt               1465 net/decnet/dn_route.c 	rt->dst.lastuse = jiffies;
rt               1466 net/decnet/dn_route.c 	rt->dst.output = dn_rt_bug_out;
rt               1469 net/decnet/dn_route.c 		rt->dst.input = dn_forward;
rt               1472 net/decnet/dn_route.c 		rt->dst.output = dn_output;
rt               1473 net/decnet/dn_route.c 		rt->dst.input = dn_nsp_rx;
rt               1474 net/decnet/dn_route.c 		rt->dst.dev = in_dev;
rt               1480 net/decnet/dn_route.c 		rt->dst.input = dst_discard;
rt               1482 net/decnet/dn_route.c 	rt->rt_flags = flags;
rt               1484 net/decnet/dn_route.c 	err = dn_rt_set_next_hop(rt, &res);
rt               1488 net/decnet/dn_route.c 	hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
rt               1490 net/decnet/dn_route.c 	dn_insert_route(rt, hash, &rt);
rt               1491 net/decnet/dn_route.c 	skb_dst_set(skb, &rt->dst);
rt               1513 net/decnet/dn_route.c 	dst_release_immediate(&rt->dst);
rt               1519 net/decnet/dn_route.c 	struct dn_route *rt;
rt               1527 net/decnet/dn_route.c 	for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
rt               1528 net/decnet/dn_route.c 	    rt = rcu_dereference(rt->dn_next)) {
rt               1529 net/decnet/dn_route.c 		if ((rt->fld.saddr == cb->src) &&
rt               1530 net/decnet/dn_route.c 		    (rt->fld.daddr == cb->dst) &&
rt               1531 net/decnet/dn_route.c 		    (rt->fld.flowidn_oif == 0) &&
rt               1532 net/decnet/dn_route.c 		    (rt->fld.flowidn_mark == skb->mark) &&
rt               1533 net/decnet/dn_route.c 		    (rt->fld.flowidn_iif == cb->iif)) {
rt               1534 net/decnet/dn_route.c 			dst_hold_and_use(&rt->dst, jiffies);
rt               1536 net/decnet/dn_route.c 			skb_dst_set(skb, (struct dst_entry *)rt);
rt               1548 net/decnet/dn_route.c 	struct dn_route *rt = (struct dn_route *)skb_dst(skb);
rt               1563 net/decnet/dn_route.c 	r->rtm_type = rt->rt_type;
rt               1564 net/decnet/dn_route.c 	r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
rt               1568 net/decnet/dn_route.c 	if (rt->rt_flags & RTCF_NOTIFY)
rt               1572 net/decnet/dn_route.c 	    nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
rt               1575 net/decnet/dn_route.c 	if (rt->fld.saddr) {
rt               1577 net/decnet/dn_route.c 		if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
rt               1580 net/decnet/dn_route.c 	if (rt->dst.dev &&
rt               1581 net/decnet/dn_route.c 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
rt               1589 net/decnet/dn_route.c 	if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
rt               1592 net/decnet/dn_route.c 	if (rt->rt_daddr != rt->rt_gateway &&
rt               1593 net/decnet/dn_route.c 	    nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
rt               1596 net/decnet/dn_route.c 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
rt               1599 net/decnet/dn_route.c 	expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
rt               1600 net/decnet/dn_route.c 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
rt               1601 net/decnet/dn_route.c 			       rt->dst.error) < 0)
rt               1604 net/decnet/dn_route.c 	if (dn_is_input_route(rt) &&
rt               1605 net/decnet/dn_route.c 	    nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
rt               1638 net/decnet/dn_route.c 	struct dn_route *rt = NULL;
rt               1686 net/decnet/dn_route.c 		rt = (struct dn_route *)skb_dst(skb);
rt               1687 net/decnet/dn_route.c 		if (!err && -rt->dst.error)
rt               1688 net/decnet/dn_route.c 			err = rt->dst.error;
rt               1693 net/decnet/dn_route.c 		err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
rt               1699 net/decnet/dn_route.c 	skb_dst_set(skb, &rt->dst);
rt               1701 net/decnet/dn_route.c 		rt->rt_flags |= RTCF_NOTIFY;
rt               1723 net/decnet/dn_route.c 	struct dn_route *rt;
rt               1746 net/decnet/dn_route.c 		for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
rt               1747 net/decnet/dn_route.c 			rt;
rt               1748 net/decnet/dn_route.c 			rt = rcu_dereference_bh(rt->dn_next), idx++) {
rt               1751 net/decnet/dn_route.c 			skb_dst_set(skb, dst_clone(&rt->dst));
rt               1777 net/decnet/dn_route.c 	struct dn_route *rt = NULL;
rt               1782 net/decnet/dn_route.c 		rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
rt               1783 net/decnet/dn_route.c 		if (rt)
rt               1787 net/decnet/dn_route.c 	return rt;
rt               1790 net/decnet/dn_route.c static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
rt               1794 net/decnet/dn_route.c 	rt = rcu_dereference_bh(rt->dn_next);
rt               1795 net/decnet/dn_route.c 	while (!rt) {
rt               1800 net/decnet/dn_route.c 		rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
rt               1802 net/decnet/dn_route.c 	return rt;
rt               1807 net/decnet/dn_route.c 	struct dn_route *rt = dn_rt_cache_get_first(seq);
rt               1809 net/decnet/dn_route.c 	if (rt) {
rt               1810 net/decnet/dn_route.c 		while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
rt               1813 net/decnet/dn_route.c 	return *pos ? NULL : rt;
rt               1818 net/decnet/dn_route.c 	struct dn_route *rt = dn_rt_cache_get_next(seq, v);
rt               1820 net/decnet/dn_route.c 	return rt;
rt               1831 net/decnet/dn_route.c 	struct dn_route *rt = v;
rt               1835 net/decnet/dn_route.c 		   rt->dst.dev ? rt->dst.dev->name : "*",
rt               1836 net/decnet/dn_route.c 		   dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
rt               1837 net/decnet/dn_route.c 		   dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
rt               1838 net/decnet/dn_route.c 		   atomic_read(&rt->dst.__refcnt),
rt               1839 net/decnet/dn_route.c 		   rt->dst.__use, 0);
rt                920 net/ipv4/af_inet.c 	struct rtentry rt;
rt                925 net/ipv4/af_inet.c 		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
rt                927 net/ipv4/af_inet.c 		err = ip_rt_ioctl(net, cmd, &rt);
rt               1194 net/ipv4/af_inet.c 	struct rtable *rt;
rt               1205 net/ipv4/af_inet.c 	rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
rt               1208 net/ipv4/af_inet.c 	if (IS_ERR(rt))
rt               1209 net/ipv4/af_inet.c 		return PTR_ERR(rt);
rt               1211 net/ipv4/af_inet.c 	sk_setup_caps(sk, &rt->dst);
rt               1239 net/ipv4/af_inet.c 	struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
rt               1246 net/ipv4/af_inet.c 	if (rt)
rt               1257 net/ipv4/af_inet.c 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
rt               1261 net/ipv4/af_inet.c 	if (!IS_ERR(rt)) {
rt               1263 net/ipv4/af_inet.c 		sk_setup_caps(sk, &rt->dst);
rt               1265 net/ipv4/af_inet.c 		err = PTR_ERR(rt);
rt                431 net/ipv4/arp.c 	struct rtable *rt;
rt                436 net/ipv4/arp.c 	rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
rt                437 net/ipv4/arp.c 	if (IS_ERR(rt))
rt                439 net/ipv4/arp.c 	if (rt->dst.dev != dev) {
rt                443 net/ipv4/arp.c 	ip_rt_put(rt);
rt                451 net/ipv4/arp.c 				struct net_device *dev,	struct rtable *rt)
rt                456 net/ipv4/arp.c 	if (rt->dst.dev == dev)
rt                469 net/ipv4/arp.c 	out_dev = __in_dev_get_rcu(rt->dst.dev);
rt                496 net/ipv4/arp.c 				struct net_device *dev,	struct rtable *rt,
rt                500 net/ipv4/arp.c 	if (rt->dst.dev != dev)
rt                681 net/ipv4/arp.c 	struct rtable *rt;
rt                816 net/ipv4/arp.c 		rt = skb_rtable(skb);
rt                817 net/ipv4/arp.c 		addr_type = rt->rt_type;
rt                838 net/ipv4/arp.c 			    (arp_fwd_proxy(in_dev, dev, rt) ||
rt                839 net/ipv4/arp.c 			     arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
rt                840 net/ipv4/arp.c 			     (rt->dst.dev != dev &&
rt               1032 net/ipv4/arp.c 		struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
rt               1034 net/ipv4/arp.c 		if (IS_ERR(rt))
rt               1035 net/ipv4/arp.c 			return PTR_ERR(rt);
rt               1036 net/ipv4/arp.c 		dev = rt->dst.dev;
rt               1037 net/ipv4/arp.c 		ip_rt_put(rt);
rt               1158 net/ipv4/arp.c 		struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
rt               1159 net/ipv4/arp.c 		if (IS_ERR(rt))
rt               1160 net/ipv4/arp.c 			return PTR_ERR(rt);
rt               1161 net/ipv4/arp.c 		dev = rt->dst.dev;
rt               1162 net/ipv4/arp.c 		ip_rt_put(rt);
rt                 25 net/ipv4/datagram.c 	struct rtable *rt;
rt                 48 net/ipv4/datagram.c 	rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
rt                 52 net/ipv4/datagram.c 	if (IS_ERR(rt)) {
rt                 53 net/ipv4/datagram.c 		err = PTR_ERR(rt);
rt                 59 net/ipv4/datagram.c 	if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
rt                 60 net/ipv4/datagram.c 		ip_rt_put(rt);
rt                 78 net/ipv4/datagram.c 	sk_dst_set(sk, &rt->dst);
rt                107 net/ipv4/datagram.c 	struct rtable *rt;
rt                119 net/ipv4/datagram.c 	rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
rt                124 net/ipv4/datagram.c 	dst = !IS_ERR(rt) ? &rt->dst : NULL;
rt                285 net/ipv4/fib_frontend.c 	struct rtable *rt;
rt                289 net/ipv4/fib_frontend.c 	rt = skb_rtable(skb);
rt                290 net/ipv4/fib_frontend.c 	if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
rt                475 net/ipv4/fib_frontend.c static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
rt                484 net/ipv4/fib_frontend.c 	if (rt->rt_dst.sa_family != AF_INET)
rt                496 net/ipv4/fib_frontend.c 	addr = sk_extract_addr(&rt->rt_dst);
rt                497 net/ipv4/fib_frontend.c 	if (!(rt->rt_flags & RTF_HOST)) {
rt                498 net/ipv4/fib_frontend.c 		__be32 mask = sk_extract_addr(&rt->rt_genmask);
rt                500 net/ipv4/fib_frontend.c 		if (rt->rt_genmask.sa_family != AF_INET) {
rt                501 net/ipv4/fib_frontend.c 			if (mask || rt->rt_genmask.sa_family)
rt                519 net/ipv4/fib_frontend.c 	if (rt->rt_metric)
rt                520 net/ipv4/fib_frontend.c 		cfg->fc_priority = rt->rt_metric - 1;
rt                522 net/ipv4/fib_frontend.c 	if (rt->rt_flags & RTF_REJECT) {
rt                531 net/ipv4/fib_frontend.c 	if (rt->rt_dev) {
rt                536 net/ipv4/fib_frontend.c 		if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
rt                571 net/ipv4/fib_frontend.c 	addr = sk_extract_addr(&rt->rt_gateway);
rt                572 net/ipv4/fib_frontend.c 	if (rt->rt_gateway.sa_family == AF_INET && addr) {
rt                578 net/ipv4/fib_frontend.c 		if (rt->rt_flags & RTF_GATEWAY &&
rt                586 net/ipv4/fib_frontend.c 	if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw_family)
rt                592 net/ipv4/fib_frontend.c 	if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
rt                600 net/ipv4/fib_frontend.c 		if (rt->rt_flags & RTF_MTU)
rt                601 net/ipv4/fib_frontend.c 			len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
rt                603 net/ipv4/fib_frontend.c 		if (rt->rt_flags & RTF_WINDOW)
rt                604 net/ipv4/fib_frontend.c 			len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
rt                606 net/ipv4/fib_frontend.c 		if (rt->rt_flags & RTF_IRTT)
rt                607 net/ipv4/fib_frontend.c 			len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
rt                620 net/ipv4/fib_frontend.c int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt)
rt                632 net/ipv4/fib_frontend.c 		err = rtentry_to_fib_config(net, cmd, rt, &cfg);
rt                148 net/ipv4/fib_semantics.c 	struct rtable *rt = rcu_dereference_protected(*rtp, 1);
rt                150 net/ipv4/fib_semantics.c 	if (!rt)
rt                158 net/ipv4/fib_semantics.c 	dst_dev_put(&rt->dst);
rt                159 net/ipv4/fib_semantics.c 	dst_release_immediate(&rt->dst);
rt                198 net/ipv4/fib_semantics.c 		struct rtable *rt;
rt                200 net/ipv4/fib_semantics.c 		rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
rt                201 net/ipv4/fib_semantics.c 		if (rt) {
rt                202 net/ipv4/fib_semantics.c 			dst_dev_put(&rt->dst);
rt                203 net/ipv4/fib_semantics.c 			dst_release_immediate(&rt->dst);
rt                310 net/ipv4/icmp.c static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
rt                313 net/ipv4/icmp.c 	struct dst_entry *dst = &rt->dst;
rt                365 net/ipv4/icmp.c 			    struct ipcm_cookie *ipc, struct rtable **rt)
rt                370 net/ipv4/icmp.c 	sk = icmp_sk(dev_net((*rt)->dst.dev));
rt                374 net/ipv4/icmp.c 			   ipc, rt, MSG_DONTWAIT) < 0) {
rt                401 net/ipv4/icmp.c 	struct rtable *rt = skb_rtable(skb);
rt                402 net/ipv4/icmp.c 	struct net *net = dev_net(rt->dst.dev);
rt                448 net/ipv4/icmp.c 	rt = ip_route_output_key(net, &fl4);
rt                449 net/ipv4/icmp.c 	if (IS_ERR(rt))
rt                451 net/ipv4/icmp.c 	if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
rt                452 net/ipv4/icmp.c 		icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
rt                453 net/ipv4/icmp.c 	ip_rt_put(rt);
rt                468 net/ipv4/icmp.c 	struct rtable *rt, *rt2;
rt                485 net/ipv4/icmp.c 	rt = ip_route_output_key_hash(net, fl4, skb_in);
rt                486 net/ipv4/icmp.c 	if (IS_ERR(rt))
rt                487 net/ipv4/icmp.c 		return rt;
rt                490 net/ipv4/icmp.c 	rt2 = rt;
rt                492 net/ipv4/icmp.c 	rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
rt                494 net/ipv4/icmp.c 	if (!IS_ERR(rt)) {
rt                495 net/ipv4/icmp.c 		if (rt != rt2)
rt                496 net/ipv4/icmp.c 			return rt;
rt                497 net/ipv4/icmp.c 	} else if (PTR_ERR(rt) == -EPERM) {
rt                498 net/ipv4/icmp.c 		rt = NULL;
rt                500 net/ipv4/icmp.c 		return rt;
rt                539 net/ipv4/icmp.c 		dst_release(&rt->dst);
rt                541 net/ipv4/icmp.c 		rt = rt2;
rt                543 net/ipv4/icmp.c 		if (rt)
rt                544 net/ipv4/icmp.c 			dst_release(&rt->dst);
rt                550 net/ipv4/icmp.c 	return rt;
rt                553 net/ipv4/icmp.c 	if (rt)
rt                554 net/ipv4/icmp.c 		return rt;
rt                575 net/ipv4/icmp.c 	struct rtable *rt = skb_rtable(skb_in);
rt                584 net/ipv4/icmp.c 	if (!rt)
rt                587 net/ipv4/icmp.c 	if (rt->dst.dev)
rt                588 net/ipv4/icmp.c 		net = dev_net(rt->dst.dev);
rt                615 net/ipv4/icmp.c 	if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
rt                677 net/ipv4/icmp.c 	if (!(rt->rt_flags & RTCF_LOCAL)) {
rt                681 net/ipv4/icmp.c 		if (rt_is_input_route(rt) &&
rt                717 net/ipv4/icmp.c 	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
rt                719 net/ipv4/icmp.c 	if (IS_ERR(rt))
rt                723 net/ipv4/icmp.c 	if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
rt                728 net/ipv4/icmp.c 	room = dst_mtu(&rt->dst);
rt                739 net/ipv4/icmp.c 	icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
rt                741 net/ipv4/icmp.c 	ip_rt_put(rt);
rt                999 net/ipv4/icmp.c 	struct rtable *rt = skb_rtable(skb);
rt               1000 net/ipv4/icmp.c 	struct net *net = dev_net(rt->dst.dev);
rt               1048 net/ipv4/icmp.c 	if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
rt                351 net/ipv4/igmp.c 	struct rtable *rt;
rt                371 net/ipv4/igmp.c 	rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
rt                374 net/ipv4/igmp.c 	if (IS_ERR(rt)) {
rt                379 net/ipv4/igmp.c 	skb_dst_set(skb, &rt->dst);
rt                730 net/ipv4/igmp.c 	struct rtable *rt;
rt                749 net/ipv4/igmp.c 	rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
rt                752 net/ipv4/igmp.c 	if (IS_ERR(rt))
rt                759 net/ipv4/igmp.c 		ip_rt_put(rt);
rt                764 net/ipv4/igmp.c 	skb_dst_set(skb, &rt->dst);
rt               1827 net/ipv4/igmp.c 		struct rtable *rt = ip_route_output(net,
rt               1830 net/ipv4/igmp.c 		if (!IS_ERR(rt)) {
rt               1831 net/ipv4/igmp.c 			dev = rt->dst.dev;
rt               1832 net/ipv4/igmp.c 			ip_rt_put(rt);
rt                573 net/ipv4/inet_connection_sock.c 	struct rtable *rt;
rt                585 net/ipv4/inet_connection_sock.c 	rt = ip_route_output_flow(net, fl4, sk);
rt                586 net/ipv4/inet_connection_sock.c 	if (IS_ERR(rt))
rt                588 net/ipv4/inet_connection_sock.c 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
rt                591 net/ipv4/inet_connection_sock.c 	return &rt->dst;
rt                594 net/ipv4/inet_connection_sock.c 	ip_rt_put(rt);
rt                611 net/ipv4/inet_connection_sock.c 	struct rtable *rt;
rt                623 net/ipv4/inet_connection_sock.c 	rt = ip_route_output_flow(net, fl4, sk);
rt                624 net/ipv4/inet_connection_sock.c 	if (IS_ERR(rt))
rt                626 net/ipv4/inet_connection_sock.c 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
rt                628 net/ipv4/inet_connection_sock.c 	return &rt->dst;
rt                631 net/ipv4/inet_connection_sock.c 	ip_rt_put(rt);
rt               1084 net/ipv4/inet_connection_sock.c 	struct rtable *rt;
rt               1091 net/ipv4/inet_connection_sock.c 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
rt               1095 net/ipv4/inet_connection_sock.c 	if (IS_ERR(rt))
rt               1096 net/ipv4/inet_connection_sock.c 		rt = NULL;
rt               1097 net/ipv4/inet_connection_sock.c 	if (rt)
rt               1098 net/ipv4/inet_connection_sock.c 		sk_setup_caps(sk, &rt->dst);
rt               1101 net/ipv4/inet_connection_sock.c 	return &rt->dst;
rt                 90 net/ipv4/ip_forward.c 	struct rtable *rt;	/* Route we use */
rt                124 net/ipv4/ip_forward.c 	rt = skb_rtable(skb);
rt                126 net/ipv4/ip_forward.c 	if (opt->is_strictroute && rt->rt_uses_gateway)
rt                130 net/ipv4/ip_forward.c 	mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
rt                139 net/ipv4/ip_forward.c 	if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len))
rt                158 net/ipv4/ip_forward.c 		       net, NULL, skb, skb->dev, rt->dst.dev,
rt                572 net/ipv4/ip_gre.c 	struct rtable *rt;
rt                582 net/ipv4/ip_gre.c 	rt = ip_route_output_key(dev_net(dev), &fl4);
rt                583 net/ipv4/ip_gre.c 	if (IS_ERR(rt))
rt                584 net/ipv4/ip_gre.c 		return PTR_ERR(rt);
rt                586 net/ipv4/ip_gre.c 	ip_rt_put(rt);
rt                860 net/ipv4/ip_gre.c 		struct rtable *rt;
rt                862 net/ipv4/ip_gre.c 		rt = ip_route_output_gre(t->net, &fl4,
rt                868 net/ipv4/ip_gre.c 		if (IS_ERR(rt))
rt                870 net/ipv4/ip_gre.c 		dev = rt->dst.dev;
rt                871 net/ipv4/ip_gre.c 		ip_rt_put(rt);
rt                312 net/ipv4/ip_input.c 	struct rtable *rt;
rt                358 net/ipv4/ip_input.c 	rt = skb_rtable(skb);
rt                359 net/ipv4/ip_input.c 	if (rt->rt_type == RTN_MULTICAST) {
rt                361 net/ipv4/ip_input.c 	} else if (rt->rt_type == RTN_BROADCAST) {
rt                 45 net/ipv4/ip_options.c 		      __be32 daddr, struct rtable *rt, int is_frag)
rt                 58 net/ipv4/ip_options.c 			ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt);
rt                 60 net/ipv4/ip_options.c 			ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
rt                260 net/ipv4/ip_options.c 	struct rtable *rt = NULL;
rt                266 net/ipv4/ip_options.c 		rt = skb_rtable(skb);
rt                342 net/ipv4/ip_options.c 				if (rt) {
rt                383 net/ipv4/ip_options.c 					if (rt)  {
rt                573 net/ipv4/ip_options.c 	struct rtable *rt = skb_rtable(skb);
rt                578 net/ipv4/ip_options.c 		ip_rt_get_source(&optptr[optptr[2]-5], skb, rt);
rt                598 net/ipv4/ip_options.c 			ip_rt_get_source(&optptr[srrptr-1], skb, rt);
rt                606 net/ipv4/ip_options.c 			ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
rt                623 net/ipv4/ip_options.c 	struct rtable *rt = skb_rtable(skb);
rt                628 net/ipv4/ip_options.c 	if (!rt)
rt                633 net/ipv4/ip_options.c 	if (rt->rt_type == RTN_UNICAST) {
rt                639 net/ipv4/ip_options.c 	if (rt->rt_type != RTN_LOCAL)
rt                148 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
rt                159 net/ipv4/ip_output.c 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
rt                163 net/ipv4/ip_output.c 	if (ip_dont_fragment(sk, &rt->dst)) {
rt                173 net/ipv4/ip_output.c 		ip_options_build(skb, &opt->opt, daddr, rt, 0);
rt                188 net/ipv4/ip_output.c 	struct rtable *rt = (struct rtable *)dst;
rt                194 net/ipv4/ip_output.c 	if (rt->rt_type == RTN_MULTICAST) {
rt                196 net/ipv4/ip_output.c 	} else if (rt->rt_type == RTN_BROADCAST)
rt                222 net/ipv4/ip_output.c 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
rt                363 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
rt                364 net/ipv4/ip_output.c 	struct net_device *dev = rt->dst.dev;
rt                378 net/ipv4/ip_output.c 	if (rt->rt_flags&RTCF_MULTICAST) {
rt                390 net/ipv4/ip_output.c 		    ((rt->rt_flags & RTCF_LOCAL) ||
rt                409 net/ipv4/ip_output.c 	if (rt->rt_flags&RTCF_BROADCAST) {
rt                460 net/ipv4/ip_output.c 	struct rtable *rt;
rt                470 net/ipv4/ip_output.c 	rt = skb_rtable(skb);
rt                471 net/ipv4/ip_output.c 	if (rt)
rt                475 net/ipv4/ip_output.c 	rt = (struct rtable *)__sk_dst_check(sk, 0);
rt                476 net/ipv4/ip_output.c 	if (!rt) {
rt                488 net/ipv4/ip_output.c 		rt = ip_route_output_ports(net, fl4, sk,
rt                495 net/ipv4/ip_output.c 		if (IS_ERR(rt))
rt                497 net/ipv4/ip_output.c 		sk_setup_caps(sk, &rt->dst);
rt                499 net/ipv4/ip_output.c 	skb_dst_set_noref(skb, &rt->dst);
rt                502 net/ipv4/ip_output.c 	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
rt                510 net/ipv4/ip_output.c 	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
rt                514 net/ipv4/ip_output.c 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
rt                522 net/ipv4/ip_output.c 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
rt                771 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
rt                800 net/ipv4/ip_output.c 	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
rt                976 net/ipv4/ip_output.c 	struct rtable *rt = (struct rtable *)cork->dst;
rt                983 net/ipv4/ip_output.c 	exthdrlen = !skb ? rt->dst.header_len : 0;
rt                991 net/ipv4/ip_output.c 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
rt               1009 net/ipv4/ip_output.c 	    rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
rt               1011 net/ipv4/ip_output.c 	    (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
rt               1019 net/ipv4/ip_output.c 		if (rt->dst.dev->features & NETIF_F_SG &&
rt               1071 net/ipv4/ip_output.c 			    !(rt->dst.dev->features&NETIF_F_SG))
rt               1088 net/ipv4/ip_output.c 				alloclen += rt->dst.trailer_len;
rt               1170 net/ipv4/ip_output.c 		if (!(rt->dst.dev->features&NETIF_F_SG) &&
rt               1239 net/ipv4/ip_output.c 	struct rtable *rt;
rt               1241 net/ipv4/ip_output.c 	rt = *rtp;
rt               1242 net/ipv4/ip_output.c 	if (unlikely(!rt))
rt               1262 net/ipv4/ip_output.c 			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
rt               1269 net/ipv4/ip_output.c 	cork->dst = &rt->dst;
rt               1327 net/ipv4/ip_output.c 	struct rtable *rt;
rt               1346 net/ipv4/ip_output.c 	rt = (struct rtable *)cork->dst;
rt               1350 net/ipv4/ip_output.c 	if (!(rt->dst.dev->features&NETIF_F_SG))
rt               1353 net/ipv4/ip_output.c 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
rt               1475 net/ipv4/ip_output.c 	struct rtable *rt = (struct rtable *)cork->dst;
rt               1510 net/ipv4/ip_output.c 	    (skb->len <= dst_mtu(&rt->dst) &&
rt               1511 net/ipv4/ip_output.c 	     ip_dont_fragment(sk, &rt->dst)))
rt               1519 net/ipv4/ip_output.c 	else if (rt->rt_type == RTN_MULTICAST)
rt               1522 net/ipv4/ip_output.c 		ttl = ip_select_ttl(inet, &rt->dst);
rt               1536 net/ipv4/ip_output.c 		ip_options_build(skb, opt, cork->addr, rt, 0);
rt               1547 net/ipv4/ip_output.c 	skb_dst_set(skb, &rt->dst);
rt               1665 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
rt               1698 net/ipv4/ip_output.c 	rt = ip_route_output_key(net, &fl4);
rt               1699 net/ipv4/ip_output.c 	if (IS_ERR(rt))
rt               1709 net/ipv4/ip_output.c 			     len, 0, &ipc, &rt, MSG_DONTWAIT);
rt               1725 net/ipv4/ip_output.c 	ip_rt_put(rt);
rt               1224 net/ipv4/ip_sockglue.c 		struct rtable *rt = skb_rtable(skb);
rt               1229 net/ipv4/ip_sockglue.c 		else if (l3slave && rt && rt->rt_iif)
rt               1230 net/ipv4/ip_sockglue.c 			pktinfo->ipi_ifindex = rt->rt_iif;
rt                291 net/ipv4/ip_tunnel.c 		struct rtable *rt;
rt                297 net/ipv4/ip_tunnel.c 		rt = ip_route_output_key(tunnel->net, &fl4);
rt                299 net/ipv4/ip_tunnel.c 		if (!IS_ERR(rt)) {
rt                300 net/ipv4/ip_tunnel.c 			tdev = rt->dst.dev;
rt                301 net/ipv4/ip_tunnel.c 			ip_rt_put(rt);
rt                486 net/ipv4/ip_tunnel.c 			    struct rtable *rt, __be16 df,
rt                498 net/ipv4/ip_tunnel.c 		mtu = dst_mtu(&rt->dst) - dev->hard_header_len
rt                551 net/ipv4/ip_tunnel.c 	struct rtable *rt = NULL;
rt                579 net/ipv4/ip_tunnel.c 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
rt                580 net/ipv4/ip_tunnel.c 	if (!rt) {
rt                581 net/ipv4/ip_tunnel.c 		rt = ip_route_output_key(tunnel->net, &fl4);
rt                582 net/ipv4/ip_tunnel.c 		if (IS_ERR(rt)) {
rt                587 net/ipv4/ip_tunnel.c 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
rt                590 net/ipv4/ip_tunnel.c 	if (rt->dst.dev == dev) {
rt                591 net/ipv4/ip_tunnel.c 		ip_rt_put(rt);
rt                598 net/ipv4/ip_tunnel.c 	if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
rt                600 net/ipv4/ip_tunnel.c 		ip_rt_put(rt);
rt                612 net/ipv4/ip_tunnel.c 			ttl = ip4_dst_hoplimit(&rt->dst);
rt                618 net/ipv4/ip_tunnel.c 	headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
rt                623 net/ipv4/ip_tunnel.c 		ip_rt_put(rt);
rt                626 net/ipv4/ip_tunnel.c 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
rt                646 net/ipv4/ip_tunnel.c 	struct rtable *rt = NULL;		/* Route to the other host */
rt                678 net/ipv4/ip_tunnel.c 			rt = skb_rtable(skb);
rt                679 net/ipv4/ip_tunnel.c 			dst = rt_nexthop(rt, inner_iph->daddr);
rt                741 net/ipv4/ip_tunnel.c 			rt = dst_cache_get_ip4(&tun_info->dst_cache,
rt                744 net/ipv4/ip_tunnel.c 		rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
rt                748 net/ipv4/ip_tunnel.c 	if (!rt) {
rt                749 net/ipv4/ip_tunnel.c 		rt = ip_route_output_key(tunnel->net, &fl4);
rt                751 net/ipv4/ip_tunnel.c 		if (IS_ERR(rt)) {
rt                756 net/ipv4/ip_tunnel.c 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
rt                759 net/ipv4/ip_tunnel.c 			dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
rt                763 net/ipv4/ip_tunnel.c 	if (rt->dst.dev == dev) {
rt                764 net/ipv4/ip_tunnel.c 		ip_rt_put(rt);
rt                769 net/ipv4/ip_tunnel.c 	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
rt                771 net/ipv4/ip_tunnel.c 		ip_rt_put(rt);
rt                795 net/ipv4/ip_tunnel.c 			ttl = ip4_dst_hoplimit(&rt->dst);
rt                802 net/ipv4/ip_tunnel.c 	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
rt                803 net/ipv4/ip_tunnel.c 			+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
rt                808 net/ipv4/ip_tunnel.c 		ip_rt_put(rt);
rt                814 net/ipv4/ip_tunnel.c 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
rt                 46 net/ipv4/ip_tunnel_core.c void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
rt                 51 net/ipv4/ip_tunnel_core.c 	struct net *net = dev_net(rt->dst.dev);
rt                 59 net/ipv4/ip_tunnel_core.c 	skb_dst_set(skb, &rt->dst);
rt                 70 net/ipv4/ip_tunnel_core.c 	iph->frag_off	=	ip_mtu_locked(&rt->dst) ? 0 : df;
rt                213 net/ipv4/ip_vti.c 			struct rtable *rt;
rt                217 net/ipv4/ip_vti.c 			rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
rt                218 net/ipv4/ip_vti.c 			if (IS_ERR(rt)) {
rt                222 net/ipv4/ip_vti.c 			dst = &rt->dst;
rt               1844 net/ipv4/ipmr.c 	struct rtable *rt;
rt               1864 net/ipv4/ipmr.c 		rt = ip_route_output_ports(net, &fl4, NULL,
rt               1869 net/ipv4/ipmr.c 		if (IS_ERR(rt))
rt               1873 net/ipv4/ipmr.c 		rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
rt               1877 net/ipv4/ipmr.c 		if (IS_ERR(rt))
rt               1881 net/ipv4/ipmr.c 	dev = rt->dst.dev;
rt               1883 net/ipv4/ipmr.c 	if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
rt               1889 net/ipv4/ipmr.c 		ip_rt_put(rt);
rt               1893 net/ipv4/ipmr.c 	encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
rt               1896 net/ipv4/ipmr.c 		ip_rt_put(rt);
rt               1904 net/ipv4/ipmr.c 	skb_dst_set(skb, &rt->dst);
rt               2071 net/ipv4/ipmr.c 	struct rtable *rt = skb_rtable(skb);
rt               2077 net/ipv4/ipmr.c 		.flowi4_oif = (rt_is_output_route(rt) ?
rt               2079 net/ipv4/ipmr.c 		.flowi4_iif = (rt_is_output_route(rt) ?
rt                 23 net/ipv4/netfilter.c 	struct rtable *rt;
rt                 49 net/ipv4/netfilter.c 	rt = ip_route_output_key(net, &fl4);
rt                 50 net/ipv4/netfilter.c 	if (IS_ERR(rt))
rt                 51 net/ipv4/netfilter.c 		return PTR_ERR(rt);
rt                 55 net/ipv4/netfilter.c 	skb_dst_set(skb, &rt->dst);
rt                 86 net/ipv4/netfilter.c 	struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
rt                 87 net/ipv4/netfilter.c 	if (IS_ERR(rt))
rt                 88 net/ipv4/netfilter.c 		return PTR_ERR(rt);
rt                 89 net/ipv4/netfilter.c 	*dst = &rt->dst;
rt                 27 net/ipv4/netfilter/nf_dup_ipv4.c 	struct rtable *rt;
rt                 38 net/ipv4/netfilter/nf_dup_ipv4.c 	rt = ip_route_output_key(net, &fl4);
rt                 39 net/ipv4/netfilter/nf_dup_ipv4.c 	if (IS_ERR(rt))
rt                 43 net/ipv4/netfilter/nf_dup_ipv4.c 	skb_dst_set(skb, &rt->dst);
rt                 44 net/ipv4/netfilter/nf_dup_ipv4.c 	skb->dev      = rt->dst.dev;
rt                702 net/ipv4/ping.c 	struct rtable *rt = NULL;
rt                790 net/ipv4/ping.c 	rt = ip_route_output_flow(net, &fl4, sk);
rt                791 net/ipv4/ping.c 	if (IS_ERR(rt)) {
rt                792 net/ipv4/ping.c 		err = PTR_ERR(rt);
rt                793 net/ipv4/ping.c 		rt = NULL;
rt                800 net/ipv4/ping.c 	if ((rt->rt_flags & RTCF_BROADCAST) &&
rt                823 net/ipv4/ping.c 			0, &ipc, &rt, msg->msg_flags);
rt                831 net/ipv4/ping.c 	ip_rt_put(rt);
rt                843 net/ipv4/ping.c 		dst_confirm_neigh(&rt->dst, &fl4.daddr);
rt                354 net/ipv4/raw.c 	struct rtable *rt = *rtp;
rt                357 net/ipv4/raw.c 	if (length > rt->dst.dev->mtu) {
rt                359 net/ipv4/raw.c 			       rt->dst.dev->mtu);
rt                368 net/ipv4/raw.c 	hlen = LL_RESERVED_SPACE(rt->dst.dev);
rt                369 net/ipv4/raw.c 	tlen = rt->dst.dev->needed_tailroom;
rt                380 net/ipv4/raw.c 	skb_dst_set(skb, &rt->dst);
rt                429 net/ipv4/raw.c 		      net, sk, skb, NULL, rt->dst.dev,
rt                503 net/ipv4/raw.c 	struct rtable *rt = NULL;
rt                643 net/ipv4/raw.c 	rt = ip_route_output_flow(net, &fl4, sk);
rt                644 net/ipv4/raw.c 	if (IS_ERR(rt)) {
rt                645 net/ipv4/raw.c 		err = PTR_ERR(rt);
rt                646 net/ipv4/raw.c 		rt = NULL;
rt                651 net/ipv4/raw.c 	if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
rt                660 net/ipv4/raw.c 				      &rt, msg->msg_flags, &ipc.sockc);
rt                668 net/ipv4/raw.c 				     &ipc, &rt, msg->msg_flags);
rt                681 net/ipv4/raw.c 	ip_rt_put(rt);
rt                690 net/ipv4/raw.c 		dst_confirm_neigh(&rt->dst, &fl4.daddr);
rt                435 net/ipv4/route.c 	const struct rtable *rt = container_of(dst, struct rtable, dst);
rt                441 net/ipv4/route.c 	if (likely(rt->rt_gw_family == AF_INET)) {
rt                442 net/ipv4/route.c 		n = ip_neigh_gw4(dev, rt->rt_gw4);
rt                443 net/ipv4/route.c 	} else if (rt->rt_gw_family == AF_INET6) {
rt                444 net/ipv4/route.c 		n = ip_neigh_gw6(dev, &rt->rt_gw6);
rt                462 net/ipv4/route.c 	const struct rtable *rt = container_of(dst, struct rtable, dst);
rt                466 net/ipv4/route.c 	if (rt->rt_gw_family == AF_INET) {
rt                467 net/ipv4/route.c 		pkey = (const __be32 *)&rt->rt_gw4;
rt                468 net/ipv4/route.c 	} else if (rt->rt_gw_family == AF_INET6) {
rt                469 net/ipv4/route.c 		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
rt                471 net/ipv4/route.c 		 (rt->rt_flags &
rt                589 net/ipv4/route.c 	struct rtable *rt;
rt                591 net/ipv4/route.c 	rt = rcu_dereference(fnhe->fnhe_rth_input);
rt                592 net/ipv4/route.c 	if (rt) {
rt                594 net/ipv4/route.c 		dst_dev_put(&rt->dst);
rt                595 net/ipv4/route.c 		dst_release(&rt->dst);
rt                597 net/ipv4/route.c 	rt = rcu_dereference(fnhe->fnhe_rth_output);
rt                598 net/ipv4/route.c 	if (rt) {
rt                600 net/ipv4/route.c 		dst_dev_put(&rt->dst);
rt                601 net/ipv4/route.c 		dst_release(&rt->dst);
rt                629 net/ipv4/route.c static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
rt                631 net/ipv4/route.c 	rt->rt_pmtu = fnhe->fnhe_pmtu;
rt                632 net/ipv4/route.c 	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
rt                633 net/ipv4/route.c 	rt->dst.expires = fnhe->fnhe_expires;
rt                636 net/ipv4/route.c 		rt->rt_flags |= RTCF_REDIRECTED;
rt                637 net/ipv4/route.c 		rt->rt_uses_gateway = 1;
rt                638 net/ipv4/route.c 		rt->rt_gw_family = AF_INET;
rt                639 net/ipv4/route.c 		rt->rt_gw4 = fnhe->fnhe_gw;
rt                649 net/ipv4/route.c 	struct rtable *rt;
rt                688 net/ipv4/route.c 		rt = rcu_dereference(fnhe->fnhe_rth_input);
rt                689 net/ipv4/route.c 		if (rt)
rt                690 net/ipv4/route.c 			fill_route_from_fnhe(rt, fnhe);
rt                691 net/ipv4/route.c 		rt = rcu_dereference(fnhe->fnhe_rth_output);
rt                692 net/ipv4/route.c 		if (rt)
rt                693 net/ipv4/route.c 			fill_route_from_fnhe(rt, fnhe);
rt                716 net/ipv4/route.c 		rt = rcu_dereference(nhc->nhc_rth_input);
rt                717 net/ipv4/route.c 		if (rt)
rt                718 net/ipv4/route.c 			rt->dst.obsolete = DST_OBSOLETE_KILL;
rt                723 net/ipv4/route.c 			rt = rcu_dereference(*prt);
rt                724 net/ipv4/route.c 			if (rt)
rt                725 net/ipv4/route.c 				rt->dst.obsolete = DST_OBSOLETE_KILL;
rt                735 net/ipv4/route.c static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
rt                757 net/ipv4/route.c 	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
rt                780 net/ipv4/route.c 	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
rt                782 net/ipv4/route.c 		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
rt                795 net/ipv4/route.c 				rt->dst.obsolete = DST_OBSOLETE_KILL;
rt                820 net/ipv4/route.c 	struct rtable *rt;
rt                829 net/ipv4/route.c 	rt = (struct rtable *) dst;
rt                832 net/ipv4/route.c 	__ip_do_redirect(rt, skb, &fl4, true);
rt                837 net/ipv4/route.c 	struct rtable *rt = (struct rtable *)dst;
rt                840 net/ipv4/route.c 	if (rt) {
rt                842 net/ipv4/route.c 			ip_rt_put(rt);
rt                844 net/ipv4/route.c 		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt                845 net/ipv4/route.c 			   rt->dst.expires) {
rt                846 net/ipv4/route.c 			ip_rt_put(rt);
rt                871 net/ipv4/route.c 	struct rtable *rt = skb_rtable(skb);
rt                879 net/ipv4/route.c 	in_dev = __in_dev_get_rcu(rt->dst.dev);
rt                885 net/ipv4/route.c 	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
rt                888 net/ipv4/route.c 	net = dev_net(rt->dst.dev);
rt                892 net/ipv4/route.c 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
rt                919 net/ipv4/route.c 		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
rt                938 net/ipv4/route.c 	struct rtable *rt = skb_rtable(skb);
rt                959 net/ipv4/route.c 	net = dev_net(rt->dst.dev);
rt                961 net/ipv4/route.c 		switch (rt->dst.error) {
rt                973 net/ipv4/route.c 	switch (rt->dst.error) {
rt               1012 net/ipv4/route.c static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
rt               1014 net/ipv4/route.c 	struct dst_entry *dst = &rt->dst;
rt               1030 net/ipv4/route.c 	if (rt->rt_pmtu == mtu && !lock &&
rt               1048 net/ipv4/route.c 	struct rtable *rt = (struct rtable *) dst;
rt               1052 net/ipv4/route.c 	__ip_rt_update_pmtu(rt, &fl4, mtu);
rt               1060 net/ipv4/route.c 	struct rtable *rt;
rt               1065 net/ipv4/route.c 	rt = __ip_route_output_key(net, &fl4);
rt               1066 net/ipv4/route.c 	if (!IS_ERR(rt)) {
rt               1067 net/ipv4/route.c 		__ip_rt_update_pmtu(rt, &fl4, mtu);
rt               1068 net/ipv4/route.c 		ip_rt_put(rt);
rt               1077 net/ipv4/route.c 	struct rtable *rt;
rt               1084 net/ipv4/route.c 	rt = __ip_route_output_key(sock_net(sk), &fl4);
rt               1085 net/ipv4/route.c 	if (!IS_ERR(rt)) {
rt               1086 net/ipv4/route.c 		__ip_rt_update_pmtu(rt, &fl4, mtu);
rt               1087 net/ipv4/route.c 		ip_rt_put(rt);
rt               1095 net/ipv4/route.c 	struct rtable *rt;
rt               1114 net/ipv4/route.c 	rt = (struct rtable *)odst;
rt               1116 net/ipv4/route.c 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
rt               1117 net/ipv4/route.c 		if (IS_ERR(rt))
rt               1123 net/ipv4/route.c 	__ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
rt               1125 net/ipv4/route.c 	if (!dst_check(&rt->dst, 0)) {
rt               1127 net/ipv4/route.c 			dst_release(&rt->dst);
rt               1129 net/ipv4/route.c 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
rt               1130 net/ipv4/route.c 		if (IS_ERR(rt))
rt               1137 net/ipv4/route.c 		sk_dst_set(sk, &rt->dst);
rt               1150 net/ipv4/route.c 	struct rtable *rt;
rt               1154 net/ipv4/route.c 	rt = __ip_route_output_key(net, &fl4);
rt               1155 net/ipv4/route.c 	if (!IS_ERR(rt)) {
rt               1156 net/ipv4/route.c 		__ip_do_redirect(rt, skb, &fl4, false);
rt               1157 net/ipv4/route.c 		ip_rt_put(rt);
rt               1166 net/ipv4/route.c 	struct rtable *rt;
rt               1170 net/ipv4/route.c 	rt = __ip_route_output_key(net, &fl4);
rt               1171 net/ipv4/route.c 	if (!IS_ERR(rt)) {
rt               1172 net/ipv4/route.c 		__ip_do_redirect(rt, skb, &fl4, false);
rt               1173 net/ipv4/route.c 		ip_rt_put(rt);
rt               1180 net/ipv4/route.c 	struct rtable *rt = (struct rtable *) dst;
rt               1190 net/ipv4/route.c 	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
rt               1225 net/ipv4/route.c 	struct rtable *rt;
rt               1229 net/ipv4/route.c 	rt = skb_rtable(skb);
rt               1230 net/ipv4/route.c 	if (rt)
rt               1231 net/ipv4/route.c 		dst_set_expires(&rt->dst, 0);
rt               1253 net/ipv4/route.c void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
rt               1257 net/ipv4/route.c 	if (rt_is_output_route(rt))
rt               1266 net/ipv4/route.c 			.flowi4_oif = rt->dst.dev->ifindex,
rt               1272 net/ipv4/route.c 		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
rt               1273 net/ipv4/route.c 			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
rt               1275 net/ipv4/route.c 			src = inet_select_addr(rt->dst.dev,
rt               1276 net/ipv4/route.c 					       rt_nexthop(rt, iph->daddr),
rt               1284 net/ipv4/route.c static void set_class_tag(struct rtable *rt, u32 tag)
rt               1286 net/ipv4/route.c 	if (!(rt->dst.tclassid & 0xFFFF))
rt               1287 net/ipv4/route.c 		rt->dst.tclassid |= tag & 0xFFFF;
rt               1288 net/ipv4/route.c 	if (!(rt->dst.tclassid & 0xFFFF0000))
rt               1289 net/ipv4/route.c 		rt->dst.tclassid |= tag & 0xFFFF0000;
rt               1304 net/ipv4/route.c 	const struct rtable *rt = (const struct rtable *) dst;
rt               1305 net/ipv4/route.c 	unsigned int mtu = rt->rt_pmtu;
rt               1307 net/ipv4/route.c 	if (!mtu || time_after_eq(jiffies, rt->dst.expires))
rt               1316 net/ipv4/route.c 		if (rt->rt_uses_gateway && mtu > 576)
rt               1416 net/ipv4/route.c static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
rt               1426 net/ipv4/route.c 		int genid = fnhe_genid(dev_net(rt->dst.dev));
rt               1428 net/ipv4/route.c 		if (rt_is_input_route(rt))
rt               1443 net/ipv4/route.c 		fill_route_from_fnhe(rt, fnhe);
rt               1444 net/ipv4/route.c 		if (!rt->rt_gw4) {
rt               1445 net/ipv4/route.c 			rt->rt_gw4 = daddr;
rt               1446 net/ipv4/route.c 			rt->rt_gw_family = AF_INET;
rt               1450 net/ipv4/route.c 			dst_hold(&rt->dst);
rt               1451 net/ipv4/route.c 			rcu_assign_pointer(*porig, rt);
rt               1466 net/ipv4/route.c static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
rt               1471 net/ipv4/route.c 	if (rt_is_input_route(rt)) {
rt               1481 net/ipv4/route.c 	dst_hold(&rt->dst);
rt               1482 net/ipv4/route.c 	prev = cmpxchg(p, orig, rt);
rt               1489 net/ipv4/route.c 		dst_release(&rt->dst);
rt               1503 net/ipv4/route.c void rt_add_uncached_list(struct rtable *rt)
rt               1507 net/ipv4/route.c 	rt->rt_uncached_list = ul;
rt               1510 net/ipv4/route.c 	list_add_tail(&rt->rt_uncached, &ul->head);
rt               1514 net/ipv4/route.c void rt_del_uncached_list(struct rtable *rt)
rt               1516 net/ipv4/route.c 	if (!list_empty(&rt->rt_uncached)) {
rt               1517 net/ipv4/route.c 		struct uncached_list *ul = rt->rt_uncached_list;
rt               1520 net/ipv4/route.c 		list_del(&rt->rt_uncached);
rt               1527 net/ipv4/route.c 	struct rtable *rt = (struct rtable *)dst;
rt               1530 net/ipv4/route.c 	rt_del_uncached_list(rt);
rt               1535 net/ipv4/route.c 	struct rtable *rt;
rt               1542 net/ipv4/route.c 		list_for_each_entry(rt, &ul->head, rt_uncached) {
rt               1543 net/ipv4/route.c 			if (rt->dst.dev != dev)
rt               1545 net/ipv4/route.c 			rt->dst.dev = blackhole_netdev;
rt               1546 net/ipv4/route.c 			dev_hold(rt->dst.dev);
rt               1553 net/ipv4/route.c static bool rt_cache_valid(const struct rtable *rt)
rt               1555 net/ipv4/route.c 	return	rt &&
rt               1556 net/ipv4/route.c 		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
rt               1557 net/ipv4/route.c 		!rt_is_expired(rt);
rt               1560 net/ipv4/route.c static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
rt               1572 net/ipv4/route.c 			rt->rt_uses_gateway = 1;
rt               1573 net/ipv4/route.c 			rt->rt_gw_family = nhc->nhc_gw_family;
rt               1576 net/ipv4/route.c 				rt->rt_gw4 = nhc->nhc_gw.ipv4;
rt               1578 net/ipv4/route.c 				rt->rt_gw6 = nhc->nhc_gw.ipv6;
rt               1581 net/ipv4/route.c 		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
rt               1588 net/ipv4/route.c 			rt->dst.tclassid = nh->nh_tclassid;
rt               1591 net/ipv4/route.c 		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
rt               1593 net/ipv4/route.c 			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
rt               1595 net/ipv4/route.c 			cached = rt_cache_route(nhc, rt);
rt               1602 net/ipv4/route.c 			if (!rt->rt_gw4) {
rt               1603 net/ipv4/route.c 				rt->rt_gw_family = AF_INET;
rt               1604 net/ipv4/route.c 				rt->rt_gw4 = daddr;
rt               1606 net/ipv4/route.c 			rt_add_uncached_list(rt);
rt               1609 net/ipv4/route.c 		rt_add_uncached_list(rt);
rt               1613 net/ipv4/route.c 	set_class_tag(rt, res->tclassid);
rt               1615 net/ipv4/route.c 	set_class_tag(rt, itag);
rt               1623 net/ipv4/route.c 	struct rtable *rt;
rt               1625 net/ipv4/route.c 	rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
rt               1630 net/ipv4/route.c 	if (rt) {
rt               1631 net/ipv4/route.c 		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
rt               1632 net/ipv4/route.c 		rt->rt_flags = flags;
rt               1633 net/ipv4/route.c 		rt->rt_type = type;
rt               1634 net/ipv4/route.c 		rt->rt_is_input = 0;
rt               1635 net/ipv4/route.c 		rt->rt_iif = 0;
rt               1636 net/ipv4/route.c 		rt->rt_pmtu = 0;
rt               1637 net/ipv4/route.c 		rt->rt_mtu_locked = 0;
rt               1638 net/ipv4/route.c 		rt->rt_uses_gateway = 0;
rt               1639 net/ipv4/route.c 		rt->rt_gw_family = 0;
rt               1640 net/ipv4/route.c 		rt->rt_gw4 = 0;
rt               1641 net/ipv4/route.c 		INIT_LIST_HEAD(&rt->rt_uncached);
rt               1643 net/ipv4/route.c 		rt->dst.output = ip_output;
rt               1645 net/ipv4/route.c 			rt->dst.input = ip_local_deliver;
rt               1648 net/ipv4/route.c 	return rt;
rt               1652 net/ipv4/route.c struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
rt               1657 net/ipv4/route.c 			   rt->dst.flags);
rt               1661 net/ipv4/route.c 		new_rt->rt_flags = rt->rt_flags;
rt               1662 net/ipv4/route.c 		new_rt->rt_type = rt->rt_type;
rt               1663 net/ipv4/route.c 		new_rt->rt_is_input = rt->rt_is_input;
rt               1664 net/ipv4/route.c 		new_rt->rt_iif = rt->rt_iif;
rt               1665 net/ipv4/route.c 		new_rt->rt_pmtu = rt->rt_pmtu;
rt               1666 net/ipv4/route.c 		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
rt               1667 net/ipv4/route.c 		new_rt->rt_gw_family = rt->rt_gw_family;
rt               1668 net/ipv4/route.c 		if (rt->rt_gw_family == AF_INET)
rt               1669 net/ipv4/route.c 			new_rt->rt_gw4 = rt->rt_gw4;
rt               1670 net/ipv4/route.c 		else if (rt->rt_gw_family == AF_INET6)
rt               1671 net/ipv4/route.c 			new_rt->rt_gw6 = rt->rt_gw6;
rt               1675 net/ipv4/route.c 		new_rt->dst.input = rt->dst.input;
rt               1676 net/ipv4/route.c 		new_rt->dst.output = rt->dst.output;
rt               1677 net/ipv4/route.c 		new_rt->dst.error = rt->dst.error;
rt               1679 net/ipv4/route.c 		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
rt               2681 net/ipv4/route.c 	struct rtable *rt;
rt               2683 net/ipv4/route.c 	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
rt               2684 net/ipv4/route.c 	if (rt) {
rt               2685 net/ipv4/route.c 		struct dst_entry *new = &rt->dst;
rt               2695 net/ipv4/route.c 		rt->rt_is_input = ort->rt_is_input;
rt               2696 net/ipv4/route.c 		rt->rt_iif = ort->rt_iif;
rt               2697 net/ipv4/route.c 		rt->rt_pmtu = ort->rt_pmtu;
rt               2698 net/ipv4/route.c 		rt->rt_mtu_locked = ort->rt_mtu_locked;
rt               2700 net/ipv4/route.c 		rt->rt_genid = rt_genid_ipv4(net);
rt               2701 net/ipv4/route.c 		rt->rt_flags = ort->rt_flags;
rt               2702 net/ipv4/route.c 		rt->rt_type = ort->rt_type;
rt               2703 net/ipv4/route.c 		rt->rt_uses_gateway = ort->rt_uses_gateway;
rt               2704 net/ipv4/route.c 		rt->rt_gw_family = ort->rt_gw_family;
rt               2705 net/ipv4/route.c 		if (rt->rt_gw_family == AF_INET)
rt               2706 net/ipv4/route.c 			rt->rt_gw4 = ort->rt_gw4;
rt               2707 net/ipv4/route.c 		else if (rt->rt_gw_family == AF_INET6)
rt               2708 net/ipv4/route.c 			rt->rt_gw6 = ort->rt_gw6;
rt               2710 net/ipv4/route.c 		INIT_LIST_HEAD(&rt->rt_uncached);
rt               2715 net/ipv4/route.c 	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
rt               2721 net/ipv4/route.c 	struct rtable *rt = __ip_route_output_key(net, flp4);
rt               2723 net/ipv4/route.c 	if (IS_ERR(rt))
rt               2724 net/ipv4/route.c 		return rt;
rt               2727 net/ipv4/route.c 		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
rt               2731 net/ipv4/route.c 	return rt;
rt               2737 net/ipv4/route.c 			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
rt               2759 net/ipv4/route.c 	r->rtm_type	= rt->rt_type;
rt               2762 net/ipv4/route.c 	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
rt               2763 net/ipv4/route.c 	if (rt->rt_flags & RTCF_NOTIFY)
rt               2775 net/ipv4/route.c 	if (rt->dst.dev &&
rt               2776 net/ipv4/route.c 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
rt               2779 net/ipv4/route.c 	if (rt->dst.tclassid &&
rt               2780 net/ipv4/route.c 	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
rt               2783 net/ipv4/route.c 	if (fl4 && !rt_is_input_route(rt) &&
rt               2788 net/ipv4/route.c 	if (rt->rt_uses_gateway) {
rt               2789 net/ipv4/route.c 		if (rt->rt_gw_family == AF_INET &&
rt               2790 net/ipv4/route.c 		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
rt               2792 net/ipv4/route.c 		} else if (rt->rt_gw_family == AF_INET6) {
rt               2803 net/ipv4/route.c 			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
rt               2807 net/ipv4/route.c 	expires = rt->dst.expires;
rt               2817 net/ipv4/route.c 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
rt               2818 net/ipv4/route.c 	if (rt->rt_pmtu && expires)
rt               2819 net/ipv4/route.c 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
rt               2820 net/ipv4/route.c 	if (rt->rt_mtu_locked && expires)
rt               2836 net/ipv4/route.c 		if (rt_is_input_route(rt)) {
rt               2857 net/ipv4/route.c 	error = rt->dst.error;
rt               2859 net/ipv4/route.c 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
rt               2882 net/ipv4/route.c 			struct rtable *rt;
rt               2895 net/ipv4/route.c 			rt = rcu_dereference(fnhe->fnhe_rth_input);
rt               2896 net/ipv4/route.c 			if (!rt)
rt               2897 net/ipv4/route.c 				rt = rcu_dereference(fnhe->fnhe_rth_output);
rt               2898 net/ipv4/route.c 			if (!rt)
rt               2901 net/ipv4/route.c 			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
rt               3084 net/ipv4/route.c 	struct rtable *rt = NULL;
rt               3155 net/ipv4/route.c 		rt = skb_rtable(skb);
rt               3156 net/ipv4/route.c 		if (err == 0 && rt->dst.error)
rt               3157 net/ipv4/route.c 			err = -rt->dst.error;
rt               3161 net/ipv4/route.c 		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
rt               3163 net/ipv4/route.c 		if (IS_ERR(rt))
rt               3164 net/ipv4/route.c 			err = PTR_ERR(rt);
rt               3166 net/ipv4/route.c 			skb_dst_set(skb, &rt->dst);
rt               3173 net/ipv4/route.c 		rt->rt_flags |= RTCF_NOTIFY;
rt               3193 net/ipv4/route.c 				    rt->rt_type, res.prefix, res.prefixlen,
rt               3196 net/ipv4/route.c 		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
rt                295 net/ipv4/syncookies.c 	struct rtable *rt;
rt                381 net/ipv4/syncookies.c 	rt = ip_route_output_key(sock_net(sk), &fl4);
rt                382 net/ipv4/syncookies.c 	if (IS_ERR(rt)) {
rt                388 net/ipv4/syncookies.c 	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
rt                393 net/ipv4/syncookies.c 				  dst_metric(&rt->dst, RTAX_INITRWND));
rt                396 net/ipv4/syncookies.c 	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
rt                398 net/ipv4/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
rt                207 net/ipv4/tcp_ipv4.c 	struct rtable *rt;
rt                230 net/ipv4/tcp_ipv4.c 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
rt                234 net/ipv4/tcp_ipv4.c 	if (IS_ERR(rt)) {
rt                235 net/ipv4/tcp_ipv4.c 		err = PTR_ERR(rt);
rt                241 net/ipv4/tcp_ipv4.c 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
rt                242 net/ipv4/tcp_ipv4.c 		ip_rt_put(rt);
rt                282 net/ipv4/tcp_ipv4.c 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
rt                284 net/ipv4/tcp_ipv4.c 	if (IS_ERR(rt)) {
rt                285 net/ipv4/tcp_ipv4.c 		err = PTR_ERR(rt);
rt                286 net/ipv4/tcp_ipv4.c 		rt = NULL;
rt                291 net/ipv4/tcp_ipv4.c 	sk_setup_caps(sk, &rt->dst);
rt                292 net/ipv4/tcp_ipv4.c 	rt = NULL;
rt                326 net/ipv4/tcp_ipv4.c 	ip_rt_put(rt);
rt                974 net/ipv4/udp.c 	struct rtable *rt = NULL;
rt               1129 net/ipv4/udp.c 		rt = (struct rtable *)sk_dst_check(sk, 0);
rt               1131 net/ipv4/udp.c 	if (!rt) {
rt               1144 net/ipv4/udp.c 		rt = ip_route_output_flow(net, fl4, sk);
rt               1145 net/ipv4/udp.c 		if (IS_ERR(rt)) {
rt               1146 net/ipv4/udp.c 			err = PTR_ERR(rt);
rt               1147 net/ipv4/udp.c 			rt = NULL;
rt               1154 net/ipv4/udp.c 		if ((rt->rt_flags & RTCF_BROADCAST) &&
rt               1158 net/ipv4/udp.c 			sk_dst_set(sk, dst_clone(&rt->dst));
rt               1174 net/ipv4/udp.c 				  sizeof(struct udphdr), &ipc, &rt,
rt               1205 net/ipv4/udp.c 			     sizeof(struct udphdr), &ipc, &rt,
rt               1216 net/ipv4/udp.c 	ip_rt_put(rt);
rt               1237 net/ipv4/udp.c 		dst_confirm_neigh(&rt->dst, &fl4->daddr);
rt               2286 net/ipv4/udp.c 	struct rtable *rt = skb_rtable(skb);
rt               2327 net/ipv4/udp.c 	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
rt                173 net/ipv4/udp_tunnel.c void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
rt                192 net/ipv4/udp_tunnel.c 	iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
rt                 26 net/ipv4/xfrm4_policy.c 	struct rtable *rt;
rt                 38 net/ipv4/xfrm4_policy.c 	rt = __ip_route_output_key(net, fl4);
rt                 39 net/ipv4/xfrm4_policy.c 	if (!IS_ERR(rt))
rt                 40 net/ipv4/xfrm4_policy.c 		return &rt->dst;
rt                 42 net/ipv4/xfrm4_policy.c 	return ERR_CAST(rt);
rt                 74 net/ipv4/xfrm4_policy.c 	struct rtable *rt = (struct rtable *)xdst->route;
rt                 77 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_iif = fl4->flowi4_iif;
rt                 84 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_is_input = rt->rt_is_input;
rt                 85 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
rt                 87 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_type = rt->rt_type;
rt                 88 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
rt                 89 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_gw_family = rt->rt_gw_family;
rt                 90 net/ipv4/xfrm4_policy.c 	if (rt->rt_gw_family == AF_INET)
rt                 91 net/ipv4/xfrm4_policy.c 		xdst->u.rt.rt_gw4 = rt->rt_gw4;
rt                 92 net/ipv4/xfrm4_policy.c 	else if (rt->rt_gw_family == AF_INET6)
rt                 93 net/ipv4/xfrm4_policy.c 		xdst->u.rt.rt_gw6 = rt->rt_gw6;
rt                 94 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_pmtu = rt->rt_pmtu;
rt                 95 net/ipv4/xfrm4_policy.c 	xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
rt                 96 net/ipv4/xfrm4_policy.c 	INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
rt                 97 net/ipv4/xfrm4_policy.c 	rt_add_uncached_list(&xdst->u.rt);
rt                126 net/ipv4/xfrm4_policy.c 	if (xdst->u.rt.rt_uncached_list)
rt                127 net/ipv4/xfrm4_policy.c 		rt_del_uncached_list(&xdst->u.rt);
rt                111 net/ipv6/addrconf.c static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
rt                114 net/ipv6/addrconf.c 	u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
rt               1119 net/ipv6/addrconf.c 	ifa->rt = f6i;
rt               2409 net/ipv6/addrconf.c 	struct fib6_info *rt = NULL;
rt               2424 net/ipv6/addrconf.c 		if (rt->nh)
rt               2427 net/ipv6/addrconf.c 		if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
rt               2429 net/ipv6/addrconf.c 		if (no_gw && rt->fib6_nh->fib_nh_gw_family)
rt               2431 net/ipv6/addrconf.c 		if ((rt->fib6_flags & flags) != flags)
rt               2433 net/ipv6/addrconf.c 		if ((rt->fib6_flags & noflags) != 0)
rt               2435 net/ipv6/addrconf.c 		if (!fib6_info_hold_safe(rt))
rt               2441 net/ipv6/addrconf.c 	return rt;
rt               2707 net/ipv6/addrconf.c 		struct fib6_info *rt;
rt               2723 net/ipv6/addrconf.c 		rt = addrconf_get_prefix_route(&pinfo->prefix,
rt               2729 net/ipv6/addrconf.c 		if (rt) {
rt               2732 net/ipv6/addrconf.c 				ip6_del_rt(net, rt);
rt               2733 net/ipv6/addrconf.c 				rt = NULL;
rt               2736 net/ipv6/addrconf.c 				fib6_set_expires(rt, jiffies + rt_expires);
rt               2738 net/ipv6/addrconf.c 				fib6_clean_expires(rt);
rt               2752 net/ipv6/addrconf.c 		fib6_info_release(rt);
rt               3431 net/ipv6/addrconf.c 	if (!ifp->rt || !ifp->rt->fib6_node) {
rt               3441 net/ipv6/addrconf.c 		prev = ifp->rt;
rt               3442 net/ipv6/addrconf.c 		ifp->rt = f6i;
rt               3801 net/ipv6/addrconf.c 		struct fib6_info *rt = NULL;
rt               3819 net/ipv6/addrconf.c 			rt = ifa->rt;
rt               3820 net/ipv6/addrconf.c 			ifa->rt = NULL;
rt               3828 net/ipv6/addrconf.c 		if (rt)
rt               3829 net/ipv6/addrconf.c 			ip6_del_rt(net, rt);
rt               3996 net/ipv6/addrconf.c 		ip6_ins_rt(net, ifp->rt);
rt               6010 net/ipv6/addrconf.c 		if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
rt               6011 net/ipv6/addrconf.c 			ip6_ins_rt(net, ifp->rt);
rt               6012 net/ipv6/addrconf.c 		} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
rt               6029 net/ipv6/addrconf.c 			struct fib6_info *rt;
rt               6031 net/ipv6/addrconf.c 			rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
rt               6034 net/ipv6/addrconf.c 			if (rt)
rt               6035 net/ipv6/addrconf.c 				ip6_del_rt(net, rt);
rt               6037 net/ipv6/addrconf.c 		if (ifp->rt) {
rt               6038 net/ipv6/addrconf.c 			ip6_del_rt(net, ifp->rt);
rt               6039 net/ipv6/addrconf.c 			ifp->rt = NULL;
rt               6380 net/ipv6/addrconf.c void addrconf_set_nopolicy(struct rt6_info *rt, int action)
rt               6382 net/ipv6/addrconf.c 	if (rt) {
rt               6384 net/ipv6/addrconf.c 			rt->dst.flags |= DST_NOPOLICY;
rt               6386 net/ipv6/addrconf.c 			rt->dst.flags &= ~DST_NOPOLICY;
rt               6398 net/ipv6/addrconf.c 		if (ifa->rt) {
rt               6400 net/ipv6/addrconf.c 			struct fib6_nh *nh = ifa->rt->fib6_nh;
rt               6404 net/ipv6/addrconf.c 			ifa->rt->dst_nopolicy = val ? true : false;
rt                188 net/ipv6/addrconf_core.c static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt)
rt                 93 net/ipv6/anycast.c 		struct rt6_info *rt;
rt                 95 net/ipv6/anycast.c 		rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
rt                 96 net/ipv6/anycast.c 		if (rt) {
rt                 97 net/ipv6/anycast.c 			dev = rt->dst.dev;
rt                 98 net/ipv6/anycast.c 			ip6_rt_put(rt);
rt                111 net/ipv6/fib6_rules.c 		struct rt6_info *rt;
rt                113 net/ipv6/fib6_rules.c 		rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, skb, flags);
rt                114 net/ipv6/fib6_rules.c 		if (rt != net->ipv6.ip6_null_entry && rt->dst.error != -EAGAIN)
rt                115 net/ipv6/fib6_rules.c 			return &rt->dst;
rt                116 net/ipv6/fib6_rules.c 		ip6_rt_put_flags(rt, flags);
rt                117 net/ipv6/fib6_rules.c 		rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
rt                118 net/ipv6/fib6_rules.c 		if (rt->dst.error != -EAGAIN)
rt                119 net/ipv6/fib6_rules.c 			return &rt->dst;
rt                120 net/ipv6/fib6_rules.c 		ip6_rt_put_flags(rt, flags);
rt                196 net/ipv6/fib6_rules.c 	struct rt6_info *rt = NULL;
rt                208 net/ipv6/fib6_rules.c 		rt = net->ipv6.ip6_null_entry;
rt                213 net/ipv6/fib6_rules.c 		rt = net->ipv6.ip6_blk_hole_entry;
rt                217 net/ipv6/fib6_rules.c 		rt = net->ipv6.ip6_prohibit_entry;
rt                228 net/ipv6/fib6_rules.c 	rt = lookup(net, table, flp6, arg->lookup_data, flags);
rt                229 net/ipv6/fib6_rules.c 	if (rt != net->ipv6.ip6_null_entry) {
rt                231 net/ipv6/fib6_rules.c 				      ip6_dst_idev(&rt->dst)->dev);
rt                236 net/ipv6/fib6_rules.c 		err = rt->dst.error;
rt                241 net/ipv6/fib6_rules.c 	ip6_rt_put_flags(rt, flags);
rt                243 net/ipv6/fib6_rules.c 	rt = NULL;
rt                248 net/ipv6/fib6_rules.c 		dst_hold(&rt->dst);
rt                250 net/ipv6/fib6_rules.c 	res->rt6 = rt;
rt                266 net/ipv6/fib6_rules.c 	struct rt6_info *rt = res->rt6;
rt                269 net/ipv6/fib6_rules.c 	if (!rt)
rt                272 net/ipv6/fib6_rules.c 	if (rt->rt6i_idev)
rt                273 net/ipv6/fib6_rules.c 		dev = rt->rt6i_idev->dev;
rt                278 net/ipv6/fib6_rules.c 	if (rt->rt6i_dst.plen <= rule->suppress_prefixlen)
rt                291 net/ipv6/fib6_rules.c 		ip6_rt_put(rt);
rt                215 net/ipv6/icmp.c 		struct rt6_info *rt = (struct rt6_info *)dst;
rt                220 net/ipv6/icmp.c 		if (rt->rt6i_dst.plen < 128)
rt                221 net/ipv6/icmp.c 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
rt                616 net/ipv6/icmp.c 	struct rt6_info *rt;
rt                636 net/ipv6/icmp.c 	rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
rt                639 net/ipv6/icmp.c 	if (rt && rt->dst.dev)
rt                640 net/ipv6/icmp.c 		skb2->dev = rt->dst.dev;
rt                663 net/ipv6/icmp.c 	if (rt)
rt                664 net/ipv6/icmp.c 		ip6_rt_put(rt);
rt                 41 net/ipv6/ila/ila_lwt.c 	struct rt6_info *rt = (struct rt6_info *)orig_dst;
rt                 54 net/ipv6/ila/ila_lwt.c 	if (rt->rt6i_flags & (RTF_GATEWAY | RTF_CACHE)) {
rt                315 net/ipv6/ip6_fib.c 	struct rt6_info *rt;
rt                317 net/ipv6/ip6_fib.c 	rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
rt                318 net/ipv6/ip6_fib.c 	if (rt->dst.error == -EAGAIN) {
rt                319 net/ipv6/ip6_fib.c 		ip6_rt_put_flags(rt, flags);
rt                320 net/ipv6/ip6_fib.c 		rt = net->ipv6.ip6_null_entry;
rt                322 net/ipv6/ip6_fib.c 			dst_hold(&rt->dst);
rt                325 net/ipv6/ip6_fib.c 	return &rt->dst;
rt                362 net/ipv6/ip6_fib.c 				    struct fib6_info *rt)
rt                365 net/ipv6/ip6_fib.c 		.rt = rt,
rt                373 net/ipv6/ip6_fib.c 			      struct fib6_info *rt,
rt                378 net/ipv6/ip6_fib.c 		.rt = rt,
rt                381 net/ipv6/ip6_fib.c 	rt->fib6_table->fib_seq++;
rt                387 net/ipv6/ip6_fib.c 					struct fib6_info *rt,
rt                393 net/ipv6/ip6_fib.c 		.rt = rt,
rt                397 net/ipv6/ip6_fib.c 	rt->fib6_table->fib_seq++;
rt                406 net/ipv6/ip6_fib.c static void fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg)
rt                408 net/ipv6/ip6_fib.c 	if (rt == arg->net->ipv6.fib6_null_entry)
rt                410 net/ipv6/ip6_fib.c 	call_fib6_entry_notifier(arg->nb, arg->net, FIB_EVENT_ENTRY_ADD, rt);
rt                415 net/ipv6/ip6_fib.c 	struct fib6_info *rt;
rt                418 net/ipv6/ip6_fib.c 		fib6_rt_dump(rt, w->args);
rt                464 net/ipv6/ip6_fib.c 	struct fib6_info *rt;
rt                467 net/ipv6/ip6_fib.c 		res = rt6_dump_route(rt, w->args, w->skip_in_node);
rt                470 net/ipv6/ip6_fib.c 			w->leaf = rt;
rt                486 net/ipv6/ip6_fib.c 		if (rt->fib6_nsiblings)
rt                487 net/ipv6/ip6_fib.c 			rt = list_last_entry(&rt->fib6_siblings,
rt                971 net/ipv6/ip6_fib.c static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
rt                974 net/ipv6/ip6_fib.c 	struct fib6_table *table = rt->fib6_table;
rt                976 net/ipv6/ip6_fib.c 	fib6_drop_pcpu_from(rt, table);
rt                978 net/ipv6/ip6_fib.c 	if (rt->nh && !list_empty(&rt->nh_list))
rt                979 net/ipv6/ip6_fib.c 		list_del_init(&rt->nh_list);
rt                981 net/ipv6/ip6_fib.c 	if (refcount_read(&rt->fib6_ref) != 1) {
rt                992 net/ipv6/ip6_fib.c 			if (!(fn->fn_flags & RTN_RTINFO) && leaf == rt) {
rt                997 net/ipv6/ip6_fib.c 				fib6_info_release(rt);
rt               1009 net/ipv6/ip6_fib.c static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
rt               1014 net/ipv6/ip6_fib.c 				    lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1023 net/ipv6/ip6_fib.c 	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
rt               1034 net/ipv6/ip6_fib.c 				lockdep_is_held(&rt->fib6_table->tb6_lock))) {
rt               1039 net/ipv6/ip6_fib.c 		if (iter->fib6_metric == rt->fib6_metric) {
rt               1057 net/ipv6/ip6_fib.c 			if (rt6_duplicate_nexthop(iter, rt)) {
rt               1058 net/ipv6/ip6_fib.c 				if (rt->fib6_nsiblings)
rt               1059 net/ipv6/ip6_fib.c 					rt->fib6_nsiblings = 0;
rt               1062 net/ipv6/ip6_fib.c 				if (!(rt->fib6_flags & RTF_EXPIRES))
rt               1065 net/ipv6/ip6_fib.c 					fib6_set_expires(iter, rt->expires);
rt               1067 net/ipv6/ip6_fib.c 				if (rt->fib6_pmtu)
rt               1069 net/ipv6/ip6_fib.c 							rt->fib6_pmtu);
rt               1085 net/ipv6/ip6_fib.c 				rt->fib6_nsiblings++;
rt               1088 net/ipv6/ip6_fib.c 		if (iter->fib6_metric > rt->fib6_metric)
rt               1101 net/ipv6/ip6_fib.c 				    lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1110 net/ipv6/ip6_fib.c 	if (rt->fib6_nsiblings) {
rt               1117 net/ipv6/ip6_fib.c 			if (sibling->fib6_metric == rt->fib6_metric &&
rt               1119 net/ipv6/ip6_fib.c 				list_add_tail(&rt->fib6_siblings,
rt               1124 net/ipv6/ip6_fib.c 				    lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1132 net/ipv6/ip6_fib.c 					 &rt->fib6_siblings, fib6_siblings) {
rt               1134 net/ipv6/ip6_fib.c 			BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
rt               1137 net/ipv6/ip6_fib.c 		BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
rt               1154 net/ipv6/ip6_fib.c 							rt, extack);
rt               1161 net/ipv6/ip6_fib.c 				if (!rt->fib6_nsiblings)
rt               1165 net/ipv6/ip6_fib.c 							 &rt->fib6_siblings,
rt               1168 net/ipv6/ip6_fib.c 				rt->fib6_nsiblings = 0;
rt               1169 net/ipv6/ip6_fib.c 				list_del_init(&rt->fib6_siblings);
rt               1175 net/ipv6/ip6_fib.c 		rcu_assign_pointer(rt->fib6_next, iter);
rt               1176 net/ipv6/ip6_fib.c 		fib6_info_hold(rt);
rt               1177 net/ipv6/ip6_fib.c 		rcu_assign_pointer(rt->fib6_node, fn);
rt               1178 net/ipv6/ip6_fib.c 		rcu_assign_pointer(*ins, rt);
rt               1180 net/ipv6/ip6_fib.c 			inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
rt               1201 net/ipv6/ip6_fib.c 							rt, extack);
rt               1206 net/ipv6/ip6_fib.c 		fib6_info_hold(rt);
rt               1207 net/ipv6/ip6_fib.c 		rcu_assign_pointer(rt->fib6_node, fn);
rt               1208 net/ipv6/ip6_fib.c 		rt->fib6_next = iter->fib6_next;
rt               1209 net/ipv6/ip6_fib.c 		rcu_assign_pointer(*ins, rt);
rt               1211 net/ipv6/ip6_fib.c 			inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
rt               1225 net/ipv6/ip6_fib.c 			ins = &rt->fib6_next;
rt               1227 net/ipv6/ip6_fib.c 				    lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1229 net/ipv6/ip6_fib.c 				if (iter->fib6_metric > rt->fib6_metric)
rt               1244 net/ipv6/ip6_fib.c 					lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1253 net/ipv6/ip6_fib.c static void fib6_start_gc(struct net *net, struct fib6_info *rt)
rt               1256 net/ipv6/ip6_fib.c 	    (rt->fib6_flags & RTF_EXPIRES))
rt               1268 net/ipv6/ip6_fib.c static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
rt               1271 net/ipv6/ip6_fib.c 	struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
rt               1272 net/ipv6/ip6_fib.c 				lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1279 net/ipv6/ip6_fib.c 				lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1283 net/ipv6/ip6_fib.c void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt)
rt               1285 net/ipv6/ip6_fib.c 	__fib6_update_sernum_upto_root(rt, fib6_new_sernum(net));
rt               1303 net/ipv6/ip6_fib.c int fib6_add(struct fib6_node *root, struct fib6_info *rt,
rt               1306 net/ipv6/ip6_fib.c 	struct fib6_table *table = rt->fib6_table;
rt               1323 net/ipv6/ip6_fib.c 			&rt->fib6_dst.addr, rt->fib6_dst.plen,
rt               1335 net/ipv6/ip6_fib.c 	if (rt->fib6_src.plen) {
rt               1364 net/ipv6/ip6_fib.c 					&rt->fib6_src.addr, rt->fib6_src.plen,
rt               1383 net/ipv6/ip6_fib.c 					&rt->fib6_src.addr, rt->fib6_src.plen,
rt               1399 net/ipv6/ip6_fib.c 				fib6_info_hold(rt);
rt               1400 net/ipv6/ip6_fib.c 				rcu_assign_pointer(fn->leaf, rt);
rt               1407 net/ipv6/ip6_fib.c 	err = fib6_add_rt2node(fn, rt, info, extack);
rt               1409 net/ipv6/ip6_fib.c 		if (rt->nh)
rt               1410 net/ipv6/ip6_fib.c 			list_add(&rt->nh_list, &rt->nh->f6i_list);
rt               1411 net/ipv6/ip6_fib.c 		__fib6_update_sernum_upto_root(rt, sernum);
rt               1412 net/ipv6/ip6_fib.c 		fib6_start_gc(info->nl_net, rt);
rt               1426 net/ipv6/ip6_fib.c 			if (pn_leaf == rt) {
rt               1429 net/ipv6/ip6_fib.c 				fib6_info_release(rt);
rt               1830 net/ipv6/ip6_fib.c 	struct fib6_info *rt = rcu_dereference_protected(*rtp,
rt               1837 net/ipv6/ip6_fib.c 	*rtp = rt->fib6_next;
rt               1838 net/ipv6/ip6_fib.c 	rt->fib6_node = NULL;
rt               1843 net/ipv6/ip6_fib.c 	rt6_flush_exceptions(rt);
rt               1846 net/ipv6/ip6_fib.c 	if (rcu_access_pointer(fn->rr_ptr) == rt)
rt               1850 net/ipv6/ip6_fib.c 	if (rt->fib6_nsiblings) {
rt               1854 net/ipv6/ip6_fib.c 					 &rt->fib6_siblings, fib6_siblings)
rt               1856 net/ipv6/ip6_fib.c 		rt->fib6_nsiblings = 0;
rt               1857 net/ipv6/ip6_fib.c 		list_del_init(&rt->fib6_siblings);
rt               1864 net/ipv6/ip6_fib.c 		if (w->state == FWS_C && w->leaf == rt) {
rt               1866 net/ipv6/ip6_fib.c 			w->leaf = rcu_dereference_protected(rt->fib6_next,
rt               1886 net/ipv6/ip6_fib.c 	fib6_purge_rt(rt, fn, net);
rt               1889 net/ipv6/ip6_fib.c 		call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, rt, NULL);
rt               1891 net/ipv6/ip6_fib.c 		inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
rt               1893 net/ipv6/ip6_fib.c 	fib6_info_release(rt);
rt               1897 net/ipv6/ip6_fib.c int fib6_del(struct fib6_info *rt, struct nl_info *info)
rt               1899 net/ipv6/ip6_fib.c 	struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
rt               1900 net/ipv6/ip6_fib.c 				    lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               1901 net/ipv6/ip6_fib.c 	struct fib6_table *table = rt->fib6_table;
rt               1906 net/ipv6/ip6_fib.c 	if (!fn || rt == net->ipv6.fib6_null_entry)
rt               1918 net/ipv6/ip6_fib.c 		if (rt == cur) {
rt               2060 net/ipv6/ip6_fib.c 	struct fib6_info *rt;
rt               2078 net/ipv6/ip6_fib.c 		res = c->func(rt, c->arg);
rt               2080 net/ipv6/ip6_fib.c 			w->leaf = rt;
rt               2081 net/ipv6/ip6_fib.c 			res = fib6_del(rt, &info);
rt               2085 net/ipv6/ip6_fib.c 					 __func__, rt,
rt               2086 net/ipv6/ip6_fib.c 					 rcu_access_pointer(rt->fib6_node),
rt               2093 net/ipv6/ip6_fib.c 			if (WARN_ON(!rt->fib6_nsiblings))
rt               2095 net/ipv6/ip6_fib.c 			rt = list_last_entry(&rt->fib6_siblings,
rt               2101 net/ipv6/ip6_fib.c 	w->leaf = rt;
rt               2179 net/ipv6/ip6_fib.c static int fib6_age(struct fib6_info *rt, void *arg)
rt               2189 net/ipv6/ip6_fib.c 	if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
rt               2190 net/ipv6/ip6_fib.c 		if (time_after(now, rt->expires)) {
rt               2191 net/ipv6/ip6_fib.c 			RT6_TRACE("expiring %p\n", rt);
rt               2201 net/ipv6/ip6_fib.c 	rt6_age_exceptions(rt, gc_args, now);
rt               2374 net/ipv6/ip6_fib.c 	struct fib6_info *rt = v;
rt               2376 net/ipv6/ip6_fib.c 	struct fib6_nh *fib6_nh = rt->fib6_nh;
rt               2377 net/ipv6/ip6_fib.c 	unsigned int flags = rt->fib6_flags;
rt               2380 net/ipv6/ip6_fib.c 	if (rt->nh)
rt               2381 net/ipv6/ip6_fib.c 		fib6_nh = nexthop_fib6_nh(rt->nh);
rt               2383 net/ipv6/ip6_fib.c 	seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
rt               2386 net/ipv6/ip6_fib.c 	seq_printf(seq, "%pi6 %02x ", &rt->fib6_src.addr, rt->fib6_src.plen);
rt               2399 net/ipv6/ip6_fib.c 		   rt->fib6_metric, refcount_read(&rt->fib6_ref), 0,
rt               1112 net/ipv6/ip6_gre.c 		struct rt6_info *rt = rt6_lookup(t->net,
rt               1116 net/ipv6/ip6_gre.c 		if (!rt)
rt               1119 net/ipv6/ip6_gre.c 		if (rt->dst.dev) {
rt               1120 net/ipv6/ip6_gre.c 			dev->needed_headroom = rt->dst.dev->hard_header_len +
rt               1124 net/ipv6/ip6_gre.c 				dev->mtu = rt->dst.dev->mtu - t_hlen;
rt               1134 net/ipv6/ip6_gre.c 		ip6_rt_put(rt);
rt                505 net/ipv6/ip6_output.c 		struct rt6_info *rt;
rt                512 net/ipv6/ip6_output.c 		rt = (struct rt6_info *) dst;
rt                513 net/ipv6/ip6_output.c 		if (rt->rt6i_flags & RTF_GATEWAY)
rt                514 net/ipv6/ip6_output.c 			target = &rt->rt6i_gateway;
rt                766 net/ipv6/ip6_output.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
rt                817 net/ipv6/ip6_output.c 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
rt                862 net/ipv6/ip6_output.c 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
rt                874 net/ipv6/ip6_output.c 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
rt                881 net/ipv6/ip6_output.c 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
rt                900 net/ipv6/ip6_output.c 	ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
rt                901 net/ipv6/ip6_output.c 		      LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id,
rt                958 net/ipv6/ip6_output.c 	struct rt6_info *rt;
rt                968 net/ipv6/ip6_output.c 	rt = (struct rt6_info *)dst;
rt                986 net/ipv6/ip6_output.c 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
rt                988 net/ipv6/ip6_output.c 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
rt               1005 net/ipv6/ip6_output.c 	struct rt6_info *rt;
rt               1021 net/ipv6/ip6_output.c 		struct rt6_info *rt;
rt               1026 net/ipv6/ip6_output.c 		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
rt               1029 net/ipv6/ip6_output.c 		from = rt ? rcu_dereference(rt->from) : NULL;
rt               1067 net/ipv6/ip6_output.c 	rt = (struct rt6_info *) *dst;
rt               1069 net/ipv6/ip6_output.c 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
rt               1070 net/ipv6/ip6_output.c 				      rt6_nexthop(rt, &fl6->daddr));
rt               1215 net/ipv6/ip6_output.c 				struct rt6_info *rt,
rt               1218 net/ipv6/ip6_output.c 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
rt               1221 net/ipv6/ip6_output.c 			*mtu = orig_mtu - rt->dst.header_len;
rt               1237 net/ipv6/ip6_output.c 			  struct rt6_info *rt, struct flowi6 *fl6)
rt               1280 net/ipv6/ip6_output.c 	dst_hold(&rt->dst);
rt               1281 net/ipv6/ip6_output.c 	cork->base.dst = &rt->dst;
rt               1285 net/ipv6/ip6_output.c 	if (rt->dst.flags & DST_XFRM_TUNNEL)
rt               1287 net/ipv6/ip6_output.c 		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
rt               1290 net/ipv6/ip6_output.c 			READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
rt               1303 net/ipv6/ip6_output.c 	if (dst_allfrag(xfrm_dst_path(&rt->dst)))
rt               1333 net/ipv6/ip6_output.c 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
rt               1343 net/ipv6/ip6_output.c 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
rt               1354 net/ipv6/ip6_output.c 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
rt               1356 net/ipv6/ip6_output.c 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
rt               1363 net/ipv6/ip6_output.c 		     (dst_allfrag(&rt->dst) ?
rt               1365 net/ipv6/ip6_output.c 		     rt->rt6i_nfheader_len;
rt               1400 net/ipv6/ip6_output.c 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
rt               1408 net/ipv6/ip6_output.c 		if (rt->dst.dev->features & NETIF_F_SG &&
rt               1459 net/ipv6/ip6_output.c 						    fragheaderlen, skb, rt,
rt               1471 net/ipv6/ip6_output.c 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
rt               1476 net/ipv6/ip6_output.c 			    !(rt->dst.dev->features&NETIF_F_SG))
rt               1492 net/ipv6/ip6_output.c 				datalen += rt->dst.trailer_len;
rt               1495 net/ipv6/ip6_output.c 			alloclen += rt->dst.trailer_len;
rt               1591 net/ipv6/ip6_output.c 		if (!(rt->dst.dev->features&NETIF_F_SG) &&
rt               1651 net/ipv6/ip6_output.c 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
rt               1661 net/ipv6/ip6_output.c 		    struct rt6_info *rt, unsigned int flags)
rt               1675 net/ipv6/ip6_output.c 				     ipc6, rt, fl6);
rt               1725 net/ipv6/ip6_output.c 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
rt               1775 net/ipv6/ip6_output.c 	skb_dst_set(skb, dst_clone(&rt->dst));
rt               1776 net/ipv6/ip6_output.c 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
rt               1792 net/ipv6/ip6_output.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
rt               1800 net/ipv6/ip6_output.c 			IP6_INC_STATS(net, rt->rt6i_idev,
rt               1848 net/ipv6/ip6_output.c 			     struct rt6_info *rt, unsigned int flags,
rt               1866 net/ipv6/ip6_output.c 	err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6);
rt                570 net/ipv6/ip6_tunnel.c 	struct rtable *rt;
rt                612 net/ipv6/ip6_tunnel.c 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
rt                614 net/ipv6/ip6_tunnel.c 	if (IS_ERR(rt))
rt                617 net/ipv6/ip6_tunnel.c 	skb2->dev = rt->dst.dev;
rt                618 net/ipv6/ip6_tunnel.c 	ip_rt_put(rt);
rt                621 net/ipv6/ip6_tunnel.c 	if (rt->rt_flags & RTCF_LOCAL) {
rt                622 net/ipv6/ip6_tunnel.c 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
rt                625 net/ipv6/ip6_tunnel.c 		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
rt                626 net/ipv6/ip6_tunnel.c 			if (!IS_ERR(rt))
rt                627 net/ipv6/ip6_tunnel.c 				ip_rt_put(rt);
rt                630 net/ipv6/ip6_tunnel.c 		skb_dst_set(skb2, &rt->dst);
rt                668 net/ipv6/ip6_tunnel.c 		struct rt6_info *rt;
rt                679 net/ipv6/ip6_tunnel.c 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
rt                682 net/ipv6/ip6_tunnel.c 		if (rt && rt->dst.dev)
rt                683 net/ipv6/ip6_tunnel.c 			skb2->dev = rt->dst.dev;
rt                687 net/ipv6/ip6_tunnel.c 		ip6_rt_put(rt);
rt               1457 net/ipv6/ip6_tunnel.c 		struct rt6_info *rt = rt6_lookup(t->net,
rt               1461 net/ipv6/ip6_tunnel.c 		if (!rt)
rt               1464 net/ipv6/ip6_tunnel.c 		if (rt->dst.dev) {
rt               1465 net/ipv6/ip6_tunnel.c 			dev->hard_header_len = rt->dst.dev->hard_header_len +
rt               1468 net/ipv6/ip6_tunnel.c 			dev->mtu = rt->dst.dev->mtu - t_hlen;
rt               1475 net/ipv6/ip6_tunnel.c 		ip6_rt_put(rt);
rt                455 net/ipv6/ip6_vti.c 			struct rtable *rt;
rt                459 net/ipv6/ip6_vti.c 			rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
rt                460 net/ipv6/ip6_vti.c 			if (IS_ERR(rt))
rt                462 net/ipv6/ip6_vti.c 			dst = &rt->dst;
rt                672 net/ipv6/ip6_vti.c 		struct rt6_info *rt = rt6_lookup(t->net,
rt                676 net/ipv6/ip6_vti.c 		if (rt)
rt                677 net/ipv6/ip6_vti.c 			tdev = rt->dst.dev;
rt                678 net/ipv6/ip6_vti.c 		ip6_rt_put(rt);
rt               2256 net/ipv6/ip6mr.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
rt               2263 net/ipv6/ip6mr.c 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
rt               2268 net/ipv6/ip6mr.c 			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
rt               2306 net/ipv6/ip6mr.c 		iph->saddr = rt->rt6i_src.addr;
rt               2307 net/ipv6/ip6mr.c 		iph->daddr = rt->rt6i_dst.addr;
rt                166 net/ipv6/mcast.c 		struct rt6_info *rt;
rt                167 net/ipv6/mcast.c 		rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
rt                168 net/ipv6/mcast.c 		if (rt) {
rt                169 net/ipv6/mcast.c 			dev = rt->dst.dev;
rt                170 net/ipv6/mcast.c 			ip6_rt_put(rt);
rt                267 net/ipv6/mcast.c 		struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
rt                269 net/ipv6/mcast.c 		if (rt) {
rt                270 net/ipv6/mcast.c 			dev = rt->dst.dev;
rt                271 net/ipv6/mcast.c 			ip6_rt_put(rt);
rt                406 net/ipv6/mip6.c 				struct ipv6_rt_hdr *rt;
rt                407 net/ipv6/mip6.c 				rt = (struct ipv6_rt_hdr *)(nh + offset);
rt                408 net/ipv6/mip6.c 				if (rt->type != 0)
rt               1172 net/ipv6/ndisc.c 	struct fib6_info *rt = NULL;
rt               1290 net/ipv6/ndisc.c 	rt = rt6_get_dflt_router(net, &ipv6_hdr(skb)->saddr, skb->dev);
rt               1291 net/ipv6/ndisc.c 	if (rt) {
rt               1292 net/ipv6/ndisc.c 		neigh = ip6_neigh_lookup(&rt->fib6_nh->fib_nh_gw6,
rt               1293 net/ipv6/ndisc.c 					 rt->fib6_nh->fib_nh_dev, NULL,
rt               1299 net/ipv6/ndisc.c 			fib6_info_release(rt);
rt               1303 net/ipv6/ndisc.c 	if (rt && lifetime == 0) {
rt               1304 net/ipv6/ndisc.c 		ip6_del_rt(net, rt);
rt               1305 net/ipv6/ndisc.c 		rt = NULL;
rt               1309 net/ipv6/ndisc.c 		  rt, lifetime, skb->dev->name);
rt               1310 net/ipv6/ndisc.c 	if (!rt && lifetime) {
rt               1313 net/ipv6/ndisc.c 		rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
rt               1315 net/ipv6/ndisc.c 		if (!rt) {
rt               1322 net/ipv6/ndisc.c 		neigh = ip6_neigh_lookup(&rt->fib6_nh->fib_nh_gw6,
rt               1323 net/ipv6/ndisc.c 					 rt->fib6_nh->fib_nh_dev, NULL,
rt               1329 net/ipv6/ndisc.c 			fib6_info_release(rt);
rt               1333 net/ipv6/ndisc.c 	} else if (rt) {
rt               1334 net/ipv6/ndisc.c 		rt->fib6_flags = (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
rt               1337 net/ipv6/ndisc.c 	if (rt)
rt               1338 net/ipv6/ndisc.c 		fib6_set_expires(rt, jiffies + (HZ * lifetime));
rt               1343 net/ipv6/ndisc.c 			fib6_metric_set(rt, RTAX_HOPLIMIT,
rt               1496 net/ipv6/ndisc.c 			fib6_metric_set(rt, RTAX_MTU, mtu);
rt               1515 net/ipv6/ndisc.c 	fib6_info_release(rt);
rt               1586 net/ipv6/ndisc.c 	struct rt6_info *rt;
rt               1625 net/ipv6/ndisc.c 	rt = (struct rt6_info *) dst;
rt               1627 net/ipv6/ndisc.c 	if (rt->rt6i_flags & RTF_GATEWAY) {
rt                 35 net/ipv6/netfilter/ip6t_rpfilter.c 	struct rt6_info *rt;
rt                 63 net/ipv6/netfilter/ip6t_rpfilter.c 	rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
rt                 64 net/ipv6/netfilter/ip6t_rpfilter.c 	if (rt->dst.error)
rt                 67 net/ipv6/netfilter/ip6t_rpfilter.c 	if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST))
rt                 70 net/ipv6/netfilter/ip6t_rpfilter.c 	if (rt->rt6i_flags & RTF_LOCAL) {
rt                 75 net/ipv6/netfilter/ip6t_rpfilter.c 	if (rt->rt6i_idev->dev == dev ||
rt                 76 net/ipv6/netfilter/ip6t_rpfilter.c 	    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
rt                 80 net/ipv6/netfilter/ip6t_rpfilter.c 	ip6_rt_put(rt);
rt                 59 net/ipv6/netfilter/nft_fib_ipv6.c 	struct rt6_info *rt;
rt                 76 net/ipv6/netfilter/nft_fib_ipv6.c 	route_err = nf_ip6_route(nft_net(pkt), (struct dst_entry **)&rt,
rt                 81 net/ipv6/netfilter/nft_fib_ipv6.c 	if (rt->rt6i_flags & RTF_REJECT) {
rt                 82 net/ipv6/netfilter/nft_fib_ipv6.c 		route_err = rt->dst.error;
rt                 83 net/ipv6/netfilter/nft_fib_ipv6.c 		dst_release(&rt->dst);
rt                 87 net/ipv6/netfilter/nft_fib_ipv6.c 	if (ipv6_anycast_destination((struct dst_entry *)rt, &fl6.daddr))
rt                 89 net/ipv6/netfilter/nft_fib_ipv6.c 	else if (!dev && rt->rt6i_flags & RTF_LOCAL)
rt                 92 net/ipv6/netfilter/nft_fib_ipv6.c 	dst_release(&rt->dst);
rt                150 net/ipv6/netfilter/nft_fib_ipv6.c 	struct rt6_info *rt;
rt                173 net/ipv6/netfilter/nft_fib_ipv6.c 	rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
rt                175 net/ipv6/netfilter/nft_fib_ipv6.c 	if (rt->dst.error)
rt                179 net/ipv6/netfilter/nft_fib_ipv6.c 	if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
rt                182 net/ipv6/netfilter/nft_fib_ipv6.c 	if (oif && oif != rt->rt6i_idev->dev)
rt                185 net/ipv6/netfilter/nft_fib_ipv6.c 	nft_fib_store_result(dest, priv, rt->rt6i_idev->dev);
rt                187 net/ipv6/netfilter/nft_fib_ipv6.c 	ip6_rt_put(rt);
rt                 58 net/ipv6/ping.c 	struct rt6_info *rt;
rt                122 net/ipv6/ping.c 	rt = (struct rt6_info *) dst;
rt                142 net/ipv6/ping.c 			      0, &ipc6, &fl6, rt,
rt                146 net/ipv6/ping.c 		ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
rt                627 net/ipv6/raw.c 	struct rt6_info *rt = (struct rt6_info *)*dstp;
rt                628 net/ipv6/raw.c 	int hlen = LL_RESERVED_SPACE(rt->dst.dev);
rt                629 net/ipv6/raw.c 	int tlen = rt->dst.dev->needed_tailroom;
rt                631 net/ipv6/raw.c 	if (length > rt->dst.dev->mtu) {
rt                632 net/ipv6/raw.c 		ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
rt                671 net/ipv6/raw.c 	skb_dst_set(skb, &rt->dst);
rt                686 net/ipv6/raw.c 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
rt                688 net/ipv6/raw.c 		      NULL, rt->dst.dev, dst_output);
rt                692 net/ipv6/raw.c 		IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
rt                701 net/ipv6/raw.c 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
rt                106 net/ipv6/route.c 			 struct fib6_info *rt, struct dst_entry *dst,
rt                133 net/ipv6/route.c void rt6_uncached_list_add(struct rt6_info *rt)
rt                137 net/ipv6/route.c 	rt->rt6i_uncached_list = ul;
rt                140 net/ipv6/route.c 	list_add_tail(&rt->rt6i_uncached, &ul->head);
rt                144 net/ipv6/route.c void rt6_uncached_list_del(struct rt6_info *rt)
rt                146 net/ipv6/route.c 	if (!list_empty(&rt->rt6i_uncached)) {
rt                147 net/ipv6/route.c 		struct uncached_list *ul = rt->rt6i_uncached_list;
rt                148 net/ipv6/route.c 		struct net *net = dev_net(rt->dst.dev);
rt                151 net/ipv6/route.c 		list_del(&rt->rt6i_uncached);
rt                167 net/ipv6/route.c 		struct rt6_info *rt;
rt                170 net/ipv6/route.c 		list_for_each_entry(rt, &ul->head, rt6i_uncached) {
rt                171 net/ipv6/route.c 			struct inet6_dev *rt_idev = rt->rt6i_idev;
rt                172 net/ipv6/route.c 			struct net_device *rt_dev = rt->dst.dev;
rt                175 net/ipv6/route.c 				rt->rt6i_idev = in6_dev_get(loopback_dev);
rt                180 net/ipv6/route.c 				rt->dst.dev = blackhole_netdev;
rt                181 net/ipv6/route.c 				dev_hold(rt->dst.dev);
rt                220 net/ipv6/route.c 	const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
rt                222 net/ipv6/route.c 	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
rt                229 net/ipv6/route.c 	struct rt6_info *rt = (struct rt6_info *)dst;
rt                231 net/ipv6/route.c 	daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
rt                343 net/ipv6/route.c static void rt6_info_init(struct rt6_info *rt)
rt                345 net/ipv6/route.c 	struct dst_entry *dst = &rt->dst;
rt                347 net/ipv6/route.c 	memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
rt                348 net/ipv6/route.c 	INIT_LIST_HEAD(&rt->rt6i_uncached);
rt                355 net/ipv6/route.c 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
rt                358 net/ipv6/route.c 	if (rt) {
rt                359 net/ipv6/route.c 		rt6_info_init(rt);
rt                363 net/ipv6/route.c 	return rt;
rt                369 net/ipv6/route.c 	struct rt6_info *rt = (struct rt6_info *)dst;
rt                374 net/ipv6/route.c 	rt6_uncached_list_del(rt);
rt                376 net/ipv6/route.c 	idev = rt->rt6i_idev;
rt                378 net/ipv6/route.c 		rt->rt6i_idev = NULL;
rt                382 net/ipv6/route.c 	from = xchg((__force struct fib6_info **)&rt->from, NULL);
rt                389 net/ipv6/route.c 	struct rt6_info *rt = (struct rt6_info *)dst;
rt                390 net/ipv6/route.c 	struct inet6_dev *idev = rt->rt6i_idev;
rt                397 net/ipv6/route.c 			rt->rt6i_idev = loopback_idev;
rt                403 net/ipv6/route.c static bool __rt6_check_expired(const struct rt6_info *rt)
rt                405 net/ipv6/route.c 	if (rt->rt6i_flags & RTF_EXPIRES)
rt                406 net/ipv6/route.c 		return time_after(jiffies, rt->dst.expires);
rt                411 net/ipv6/route.c static bool rt6_check_expired(const struct rt6_info *rt)
rt                415 net/ipv6/route.c 	from = rcu_dereference(rt->from);
rt                417 net/ipv6/route.c 	if (rt->rt6i_flags & RTF_EXPIRES) {
rt                418 net/ipv6/route.c 		if (time_after(jiffies, rt->dst.expires))
rt                421 net/ipv6/route.c 		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
rt                943 net/ipv6/route.c 	struct fib6_info *rt;
rt                981 net/ipv6/route.c 		rt = rt6_get_dflt_router(net, gwaddr, dev);
rt                983 net/ipv6/route.c 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
rt                986 net/ipv6/route.c 	if (rt && !lifetime) {
rt                987 net/ipv6/route.c 		ip6_del_rt(net, rt);
rt                988 net/ipv6/route.c 		rt = NULL;
rt                991 net/ipv6/route.c 	if (!rt && lifetime)
rt                992 net/ipv6/route.c 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
rt                994 net/ipv6/route.c 	else if (rt)
rt                995 net/ipv6/route.c 		rt->fib6_flags = RTF_ROUTEINFO |
rt                996 net/ipv6/route.c 				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
rt                998 net/ipv6/route.c 	if (rt) {
rt               1000 net/ipv6/route.c 			fib6_clean_expires(rt);
rt               1002 net/ipv6/route.c 			fib6_set_expires(rt, jiffies + HZ * lifetime);
rt               1004 net/ipv6/route.c 		fib6_info_release(rt);
rt               1057 net/ipv6/route.c static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
rt               1061 net/ipv6/route.c 	if (rt->dst_nocount)
rt               1063 net/ipv6/route.c 	if (rt->dst_nopolicy)
rt               1065 net/ipv6/route.c 	if (rt->dst_host)
rt               1071 net/ipv6/route.c static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
rt               1073 net/ipv6/route.c 	rt->dst.error = ip6_rt_type_to_error(fib6_type);
rt               1077 net/ipv6/route.c 		rt->dst.output = dst_discard_out;
rt               1078 net/ipv6/route.c 		rt->dst.input = dst_discard;
rt               1081 net/ipv6/route.c 		rt->dst.output = ip6_pkt_prohibit_out;
rt               1082 net/ipv6/route.c 		rt->dst.input = ip6_pkt_prohibit;
rt               1087 net/ipv6/route.c 		rt->dst.output = ip6_pkt_discard_out;
rt               1088 net/ipv6/route.c 		rt->dst.input = ip6_pkt_discard;
rt               1093 net/ipv6/route.c static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
rt               1098 net/ipv6/route.c 		ip6_rt_init_dst_reject(rt, res->fib6_type);
rt               1102 net/ipv6/route.c 	rt->dst.error = 0;
rt               1103 net/ipv6/route.c 	rt->dst.output = ip6_output;
rt               1106 net/ipv6/route.c 		rt->dst.input = ip6_input;
rt               1108 net/ipv6/route.c 		rt->dst.input = ip6_mc_input;
rt               1110 net/ipv6/route.c 		rt->dst.input = ip6_forward;
rt               1114 net/ipv6/route.c 		rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
rt               1115 net/ipv6/route.c 		lwtunnel_set_redirect(&rt->dst);
rt               1118 net/ipv6/route.c 	rt->dst.lastuse = jiffies;
rt               1122 net/ipv6/route.c static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
rt               1124 net/ipv6/route.c 	rt->rt6i_flags &= ~RTF_EXPIRES;
rt               1125 net/ipv6/route.c 	rcu_assign_pointer(rt->from, from);
rt               1126 net/ipv6/route.c 	ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
rt               1130 net/ipv6/route.c static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
rt               1136 net/ipv6/route.c 	ip6_rt_init_dst(rt, res);
rt               1138 net/ipv6/route.c 	rt->rt6i_dst = f6i->fib6_dst;
rt               1139 net/ipv6/route.c 	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
rt               1140 net/ipv6/route.c 	rt->rt6i_flags = res->fib6_flags;
rt               1142 net/ipv6/route.c 		rt->rt6i_gateway = nh->fib_nh_gw6;
rt               1143 net/ipv6/route.c 		rt->rt6i_flags |= RTF_GATEWAY;
rt               1145 net/ipv6/route.c 	rt6_set_from(rt, f6i);
rt               1147 net/ipv6/route.c 	rt->rt6i_src = f6i->fib6_src;
rt               1171 net/ipv6/route.c 	struct rt6_info *rt = *prt;
rt               1173 net/ipv6/route.c 	if (dst_hold_safe(&rt->dst))
rt               1176 net/ipv6/route.c 		rt = net->ipv6.ip6_null_entry;
rt               1177 net/ipv6/route.c 		dst_hold(&rt->dst);
rt               1179 net/ipv6/route.c 		rt = NULL;
rt               1181 net/ipv6/route.c 	*prt = rt;
rt               1220 net/ipv6/route.c 	struct rt6_info *rt;
rt               1240 net/ipv6/route.c 		rt = net->ipv6.ip6_null_entry;
rt               1241 net/ipv6/route.c 		dst_hold(&rt->dst);
rt               1251 net/ipv6/route.c 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
rt               1252 net/ipv6/route.c 	if (rt) {
rt               1253 net/ipv6/route.c 		if (ip6_hold_safe(net, &rt))
rt               1254 net/ipv6/route.c 			dst_use_noref(&rt->dst, jiffies);
rt               1257 net/ipv6/route.c 		rt = ip6_create_rt_rcu(&res);
rt               1265 net/ipv6/route.c 	return rt;
rt               1307 net/ipv6/route.c static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
rt               1313 net/ipv6/route.c 	table = rt->fib6_table;
rt               1315 net/ipv6/route.c 	err = fib6_add(&table->tb6_root, rt, info, extack);
rt               1321 net/ipv6/route.c int ip6_ins_rt(struct net *net, struct fib6_info *rt)
rt               1325 net/ipv6/route.c 	return __ip6_ins_rt(rt, &info, NULL);
rt               1334 net/ipv6/route.c 	struct rt6_info *rt;
rt               1344 net/ipv6/route.c 	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
rt               1345 net/ipv6/route.c 	if (!rt) {
rt               1350 net/ipv6/route.c 	ip6_rt_copy_init(rt, res);
rt               1351 net/ipv6/route.c 	rt->rt6i_flags |= RTF_CACHE;
rt               1352 net/ipv6/route.c 	rt->dst.flags |= DST_HOST;
rt               1353 net/ipv6/route.c 	rt->rt6i_dst.addr = *daddr;
rt               1354 net/ipv6/route.c 	rt->rt6i_dst.plen = 128;
rt               1359 net/ipv6/route.c 			rt->rt6i_flags |= RTF_ANYCAST;
rt               1361 net/ipv6/route.c 		if (rt->rt6i_src.plen && saddr) {
rt               1362 net/ipv6/route.c 			rt->rt6i_src.addr = *saddr;
rt               1363 net/ipv6/route.c 			rt->rt6i_src.plen = 128;
rt               1368 net/ipv6/route.c 	return rt;
rt               1833 net/ipv6/route.c 				    const struct rt6_info *rt)
rt               1854 net/ipv6/route.c 		src_key = &rt->rt6i_src.addr;
rt               1857 net/ipv6/route.c 					       &rt->rt6i_dst.addr,
rt               1871 net/ipv6/route.c 	struct rt6_info	*rt;
rt               1880 net/ipv6/route.c 	err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
rt               1887 net/ipv6/route.c static int rt6_remove_exception_rt(struct rt6_info *rt)
rt               1891 net/ipv6/route.c 	from = rcu_dereference(rt->from);
rt               1892 net/ipv6/route.c 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
rt               1897 net/ipv6/route.c 			.rt = rt,
rt               1910 net/ipv6/route.c 					from->fib6_src.plen, rt);
rt               1917 net/ipv6/route.c 				     const struct rt6_info *rt)
rt               1932 net/ipv6/route.c 		src_key = &rt->rt6i_src.addr;
rt               1934 net/ipv6/route.c 	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
rt               1962 net/ipv6/route.c static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
rt               1969 net/ipv6/route.c 	from = rcu_dereference(rt->from);
rt               1970 net/ipv6/route.c 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
rt               1975 net/ipv6/route.c 			.dev = rt->dst.dev,
rt               1976 net/ipv6/route.c 			.gw = &rt->rt6i_gateway,
rt               1987 net/ipv6/route.c 	fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
rt               1993 net/ipv6/route.c 					 struct rt6_info *rt, int mtu)
rt               2005 net/ipv6/route.c 	if (dst_mtu(&rt->dst) >= mtu)
rt               2008 net/ipv6/route.c 	if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
rt               2081 net/ipv6/route.c 	struct rt6_info *rt = rt6_ex->rt6i;
rt               2089 net/ipv6/route.c 	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
rt               2090 net/ipv6/route.c 		if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
rt               2091 net/ipv6/route.c 			RT6_TRACE("aging clone %p\n", rt);
rt               2095 net/ipv6/route.c 	} else if (time_after(jiffies, rt->dst.expires)) {
rt               2096 net/ipv6/route.c 		RT6_TRACE("purging expired route %p\n", rt);
rt               2101 net/ipv6/route.c 	if (rt->rt6i_flags & RTF_GATEWAY) {
rt               2105 net/ipv6/route.c 		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
rt               2111 net/ipv6/route.c 				  rt);
rt               2215 net/ipv6/route.c 	struct rt6_info *rt = NULL;
rt               2235 net/ipv6/route.c 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
rt               2236 net/ipv6/route.c 	if (rt) {
rt               2245 net/ipv6/route.c 		rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
rt               2247 net/ipv6/route.c 		if (rt) {
rt               2253 net/ipv6/route.c 			rt6_uncached_list_add(rt);
rt               2257 net/ipv6/route.c 			return rt;
rt               2262 net/ipv6/route.c 		rt = rt6_get_pcpu_route(&res);
rt               2264 net/ipv6/route.c 		if (!rt)
rt               2265 net/ipv6/route.c 			rt = rt6_make_pcpu_route(net, &res);
rt               2270 net/ipv6/route.c 	if (!rt)
rt               2271 net/ipv6/route.c 		rt = net->ipv6.ip6_null_entry;
rt               2273 net/ipv6/route.c 		ip6_hold_safe(net, &rt);
rt               2276 net/ipv6/route.c 	return rt;
rt               2540 net/ipv6/route.c 	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
rt               2544 net/ipv6/route.c 	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
rt               2546 net/ipv6/route.c 	if (rt) {
rt               2547 net/ipv6/route.c 		rt6_info_init(rt);
rt               2550 net/ipv6/route.c 		new = &rt->dst;
rt               2557 net/ipv6/route.c 		rt->rt6i_idev = in6_dev_get(loopback_dev);
rt               2558 net/ipv6/route.c 		rt->rt6i_gateway = ort->rt6i_gateway;
rt               2559 net/ipv6/route.c 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
rt               2561 net/ipv6/route.c 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
rt               2563 net/ipv6/route.c 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
rt               2588 net/ipv6/route.c static struct dst_entry *rt6_check(struct rt6_info *rt,
rt               2598 net/ipv6/route.c 	if (rt6_check_expired(rt))
rt               2601 net/ipv6/route.c 	return &rt->dst;
rt               2604 net/ipv6/route.c static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
rt               2608 net/ipv6/route.c 	if (!__rt6_check_expired(rt) &&
rt               2609 net/ipv6/route.c 	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
rt               2611 net/ipv6/route.c 		return &rt->dst;
rt               2620 net/ipv6/route.c 	struct rt6_info *rt;
rt               2622 net/ipv6/route.c 	rt = container_of(dst, struct rt6_info, dst);
rt               2624 net/ipv6/route.c 	if (rt->sernum)
rt               2625 net/ipv6/route.c 		return rt6_is_valid(rt) ? dst : NULL;
rt               2634 net/ipv6/route.c 	from = rcu_dereference(rt->from);
rt               2636 net/ipv6/route.c 	if (from && (rt->rt6i_flags & RTF_PCPU ||
rt               2637 net/ipv6/route.c 	    unlikely(!list_empty(&rt->rt6i_uncached))))
rt               2638 net/ipv6/route.c 		dst_ret = rt6_dst_from_check(rt, from, cookie);
rt               2640 net/ipv6/route.c 		dst_ret = rt6_check(rt, from, cookie);
rt               2649 net/ipv6/route.c 	struct rt6_info *rt = (struct rt6_info *) dst;
rt               2651 net/ipv6/route.c 	if (rt) {
rt               2652 net/ipv6/route.c 		if (rt->rt6i_flags & RTF_CACHE) {
rt               2654 net/ipv6/route.c 			if (rt6_check_expired(rt)) {
rt               2655 net/ipv6/route.c 				rt6_remove_exception_rt(rt);
rt               2669 net/ipv6/route.c 	struct rt6_info *rt;
rt               2673 net/ipv6/route.c 	rt = (struct rt6_info *) skb_dst(skb);
rt               2674 net/ipv6/route.c 	if (rt) {
rt               2676 net/ipv6/route.c 		if (rt->rt6i_flags & RTF_CACHE) {
rt               2677 net/ipv6/route.c 			rt6_remove_exception_rt(rt);
rt               2682 net/ipv6/route.c 			from = rcu_dereference(rt->from);
rt               2685 net/ipv6/route.c 				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
rt               2709 net/ipv6/route.c static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
rt               2711 net/ipv6/route.c 	struct net *net = dev_net(rt->dst.dev);
rt               2713 net/ipv6/route.c 	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
rt               2714 net/ipv6/route.c 	rt->rt6i_flags |= RTF_MODIFIED;
rt               2715 net/ipv6/route.c 	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
rt               2718 net/ipv6/route.c static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
rt               2720 net/ipv6/route.c 	return !(rt->rt6i_flags & RTF_CACHE) &&
rt               2721 net/ipv6/route.c 		(rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
rt               2936 net/ipv6/route.c 	struct fib6_info *rt;
rt               2959 net/ipv6/route.c 		res.f6i = rt;
rt               2960 net/ipv6/route.c 		if (fib6_check_expired(rt))
rt               2962 net/ipv6/route.c 		if (rt->fib6_flags & RTF_REJECT)
rt               2964 net/ipv6/route.c 		if (unlikely(rt->nh)) {
rt               2965 net/ipv6/route.c 			if (nexthop_is_blackhole(rt->nh))
rt               2968 net/ipv6/route.c 			if (nexthop_for_each_fib6_nh(rt->nh,
rt               2973 net/ipv6/route.c 			res.nh = rt->fib6_nh;
rt               2980 net/ipv6/route.c 	if (!rt)
rt               2981 net/ipv6/route.c 		rt = net->ipv6.fib6_null_entry;
rt               2982 net/ipv6/route.c 	else if (rt->fib6_flags & RTF_REJECT) {
rt               2987 net/ipv6/route.c 	if (rt == net->ipv6.fib6_null_entry) {
rt               2993 net/ipv6/route.c 	res.f6i = rt;
rt               2994 net/ipv6/route.c 	res.nh = rt->fib6_nh;
rt               3131 net/ipv6/route.c 	struct rt6_info *rt;
rt               3140 net/ipv6/route.c 	rt = rt6_find_cached_rt(res, daddr, saddr);
rt               3141 net/ipv6/route.c 	if (unlikely(rt)) {
rt               3142 net/ipv6/route.c 		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
rt               3161 net/ipv6/route.c 	struct rt6_info *rt;
rt               3168 net/ipv6/route.c 	rt = ip6_dst_alloc(net, dev, 0);
rt               3169 net/ipv6/route.c 	if (unlikely(!rt)) {
rt               3175 net/ipv6/route.c 	rt->dst.flags |= DST_HOST;
rt               3176 net/ipv6/route.c 	rt->dst.input = ip6_input;
rt               3177 net/ipv6/route.c 	rt->dst.output  = ip6_output;
rt               3178 net/ipv6/route.c 	rt->rt6i_gateway  = fl6->daddr;
rt               3179 net/ipv6/route.c 	rt->rt6i_dst.addr = fl6->daddr;
rt               3180 net/ipv6/route.c 	rt->rt6i_dst.plen = 128;
rt               3181 net/ipv6/route.c 	rt->rt6i_idev     = idev;
rt               3182 net/ipv6/route.c 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
rt               3187 net/ipv6/route.c 	rt6_uncached_list_add(rt);
rt               3190 net/ipv6/route.c 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
rt               3579 net/ipv6/route.c 	struct fib6_info *rt = NULL;
rt               3645 net/ipv6/route.c 	rt = fib6_info_alloc(gfp_flags, !nh);
rt               3646 net/ipv6/route.c 	if (!rt)
rt               3649 net/ipv6/route.c 	rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
rt               3651 net/ipv6/route.c 	if (IS_ERR(rt->fib6_metrics)) {
rt               3652 net/ipv6/route.c 		err = PTR_ERR(rt->fib6_metrics);
rt               3654 net/ipv6/route.c 		rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
rt               3659 net/ipv6/route.c 		rt->dst_nocount = true;
rt               3662 net/ipv6/route.c 		fib6_set_expires(rt, jiffies +
rt               3665 net/ipv6/route.c 		fib6_clean_expires(rt);
rt               3669 net/ipv6/route.c 	rt->fib6_protocol = cfg->fc_protocol;
rt               3671 net/ipv6/route.c 	rt->fib6_table = table;
rt               3672 net/ipv6/route.c 	rt->fib6_metric = cfg->fc_metric;
rt               3673 net/ipv6/route.c 	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
rt               3674 net/ipv6/route.c 	rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
rt               3676 net/ipv6/route.c 	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt               3677 net/ipv6/route.c 	rt->fib6_dst.plen = cfg->fc_dst_len;
rt               3678 net/ipv6/route.c 	if (rt->fib6_dst.plen == 128)
rt               3679 net/ipv6/route.c 		rt->dst_host = true;
rt               3682 net/ipv6/route.c 	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
rt               3683 net/ipv6/route.c 	rt->fib6_src.plen = cfg->fc_src_len;
rt               3690 net/ipv6/route.c 		if (rt->fib6_src.plen) {
rt               3694 net/ipv6/route.c 		rt->nh = nh;
rt               3695 net/ipv6/route.c 		fib6_nh = nexthop_fib6_nh(rt->nh);
rt               3697 net/ipv6/route.c 		err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
rt               3701 net/ipv6/route.c 		fib6_nh = rt->fib6_nh;
rt               3707 net/ipv6/route.c 		if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
rt               3709 net/ipv6/route.c 			rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
rt               3720 net/ipv6/route.c 		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
rt               3721 net/ipv6/route.c 		rt->fib6_prefsrc.plen = 128;
rt               3723 net/ipv6/route.c 		rt->fib6_prefsrc.plen = 0;
rt               3725 net/ipv6/route.c 	return rt;
rt               3727 net/ipv6/route.c 	fib6_info_release(rt);
rt               3734 net/ipv6/route.c 	struct fib6_info *rt;
rt               3737 net/ipv6/route.c 	rt = ip6_route_info_create(cfg, gfp_flags, extack);
rt               3738 net/ipv6/route.c 	if (IS_ERR(rt))
rt               3739 net/ipv6/route.c 		return PTR_ERR(rt);
rt               3741 net/ipv6/route.c 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
rt               3742 net/ipv6/route.c 	fib6_info_release(rt);
rt               3747 net/ipv6/route.c static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
rt               3753 net/ipv6/route.c 	if (rt == net->ipv6.fib6_null_entry) {
rt               3758 net/ipv6/route.c 	table = rt->fib6_table;
rt               3760 net/ipv6/route.c 	err = fib6_del(rt, info);
rt               3764 net/ipv6/route.c 	fib6_info_release(rt);
rt               3768 net/ipv6/route.c int ip6_del_rt(struct net *net, struct fib6_info *rt)
rt               3772 net/ipv6/route.c 	return __ip6_del_rt(rt, &info);
rt               3775 net/ipv6/route.c static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
rt               3783 net/ipv6/route.c 	if (rt == net->ipv6.fib6_null_entry)
rt               3785 net/ipv6/route.c 	table = rt->fib6_table;
rt               3788 net/ipv6/route.c 	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
rt               3792 net/ipv6/route.c 		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
rt               3796 net/ipv6/route.c 			if (rt6_fill_node(net, skb, rt, NULL,
rt               3808 net/ipv6/route.c 						    rt,
rt               3809 net/ipv6/route.c 						    rt->fib6_nsiblings,
rt               3812 net/ipv6/route.c 					 &rt->fib6_siblings,
rt               3820 net/ipv6/route.c 	err = fib6_del(rt, info);
rt               3824 net/ipv6/route.c 	fib6_info_release(rt);
rt               3833 net/ipv6/route.c static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
rt               3837 net/ipv6/route.c 	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
rt               3841 net/ipv6/route.c 	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
rt               3844 net/ipv6/route.c 	rc = rt6_remove_exception_rt(rt);
rt               3849 net/ipv6/route.c static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
rt               3853 net/ipv6/route.c 		.f6i = rt,
rt               3893 net/ipv6/route.c 	struct fib6_info *rt;
rt               3914 net/ipv6/route.c 			if (rt->nh && cfg->fc_nh_id &&
rt               3915 net/ipv6/route.c 			    rt->nh->id != cfg->fc_nh_id)
rt               3921 net/ipv6/route.c 				if (rt->nh) {
rt               3922 net/ipv6/route.c 					rc = ip6_del_cached_rt_nh(cfg, rt);
rt               3926 net/ipv6/route.c 					nh = rt->fib6_nh;
rt               3927 net/ipv6/route.c 					rc = ip6_del_cached_rt(cfg, rt, nh);
rt               3936 net/ipv6/route.c 			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
rt               3939 net/ipv6/route.c 			    cfg->fc_protocol != rt->fib6_protocol)
rt               3942 net/ipv6/route.c 			if (rt->nh) {
rt               3943 net/ipv6/route.c 				if (!fib6_info_hold_safe(rt))
rt               3947 net/ipv6/route.c 				return __ip6_del_rt(rt, &cfg->fc_nlinfo);
rt               3952 net/ipv6/route.c 			nh = rt->fib6_nh;
rt               3960 net/ipv6/route.c 			if (!fib6_info_hold_safe(rt))
rt               3966 net/ipv6/route.c 				return __ip6_del_rt(rt, &cfg->fc_nlinfo);
rt               3968 net/ipv6/route.c 			return __ip6_del_rt_siblings(rt, cfg);
rt               3979 net/ipv6/route.c 	struct rt6_info *rt, *nrt = NULL;
rt               4038 net/ipv6/route.c 	rt = (struct rt6_info *) dst;
rt               4039 net/ipv6/route.c 	if (rt->rt6i_flags & RTF_REJECT) {
rt               4048 net/ipv6/route.c 	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
rt               4066 net/ipv6/route.c 	res.f6i = rcu_dereference(rt->from);
rt               4073 net/ipv6/route.c 			.gw = &rt->rt6i_gateway,
rt               4107 net/ipv6/route.c 	netevent.old = &rt->dst;
rt               4127 net/ipv6/route.c 	struct fib6_info *rt = NULL;
rt               4141 net/ipv6/route.c 		if (rt->nh)
rt               4143 net/ipv6/route.c 		if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
rt               4145 net/ipv6/route.c 		if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
rt               4146 net/ipv6/route.c 		    !rt->fib6_nh->fib_nh_gw_family)
rt               4148 net/ipv6/route.c 		if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
rt               4150 net/ipv6/route.c 		if (!fib6_info_hold_safe(rt))
rt               4156 net/ipv6/route.c 	return rt;
rt               4197 net/ipv6/route.c 	struct fib6_info *rt;
rt               4209 net/ipv6/route.c 		if (rt->nh)
rt               4212 net/ipv6/route.c 		nh = rt->fib6_nh;
rt               4214 net/ipv6/route.c 		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
rt               4218 net/ipv6/route.c 	if (rt && !fib6_info_hold_safe(rt))
rt               4219 net/ipv6/route.c 		rt = NULL;
rt               4221 net/ipv6/route.c 	return rt;
rt               4258 net/ipv6/route.c 	struct fib6_info *rt;
rt               4263 net/ipv6/route.c 		struct net_device *dev = fib6_info_nh_dev(rt);
rt               4266 net/ipv6/route.c 		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
rt               4268 net/ipv6/route.c 		    fib6_info_hold_safe(rt)) {
rt               4270 net/ipv6/route.c 			ip6_del_rt(net, rt);
rt               4461 net/ipv6/route.c static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
rt               4467 net/ipv6/route.c 	if (!rt->nh &&
rt               4468 net/ipv6/route.c 	    ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
rt               4469 net/ipv6/route.c 	    rt != net->ipv6.fib6_null_entry &&
rt               4470 net/ipv6/route.c 	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
rt               4473 net/ipv6/route.c 		rt->fib6_prefsrc.plen = 0;
rt               4493 net/ipv6/route.c static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
rt               4499 net/ipv6/route.c 	if (rt->nh)
rt               4502 net/ipv6/route.c 	nh = rt->fib6_nh;
rt               4503 net/ipv6/route.c 	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
rt               4529 net/ipv6/route.c static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
rt               4534 net/ipv6/route.c 	fn = rcu_dereference_protected(rt->fib6_node,
rt               4535 net/ipv6/route.c 			lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               4537 net/ipv6/route.c 			lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               4539 net/ipv6/route.c 		if (iter->fib6_metric == rt->fib6_metric &&
rt               4543 net/ipv6/route.c 				lockdep_is_held(&rt->fib6_table->tb6_lock));
rt               4550 net/ipv6/route.c static bool rt6_is_dead(const struct fib6_info *rt)
rt               4552 net/ipv6/route.c 	if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
rt               4553 net/ipv6/route.c 	    (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
rt               4554 net/ipv6/route.c 	     ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
rt               4560 net/ipv6/route.c static int rt6_multipath_total_weight(const struct fib6_info *rt)
rt               4565 net/ipv6/route.c 	if (!rt6_is_dead(rt))
rt               4566 net/ipv6/route.c 		total += rt->fib6_nh->fib_nh_weight;
rt               4568 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
rt               4576 net/ipv6/route.c static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
rt               4580 net/ipv6/route.c 	if (!rt6_is_dead(rt)) {
rt               4581 net/ipv6/route.c 		*weight += rt->fib6_nh->fib_nh_weight;
rt               4585 net/ipv6/route.c 	atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
rt               4588 net/ipv6/route.c static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
rt               4593 net/ipv6/route.c 	rt6_upper_bound_set(rt, &weight, total);
rt               4595 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
rt               4599 net/ipv6/route.c void rt6_multipath_rebalance(struct fib6_info *rt)
rt               4608 net/ipv6/route.c 	if (!rt->fib6_nsiblings || rt->should_flush)
rt               4615 net/ipv6/route.c 	first = rt6_multipath_first_sibling(rt);
rt               4623 net/ipv6/route.c static int fib6_ifup(struct fib6_info *rt, void *p_arg)
rt               4628 net/ipv6/route.c 	if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
rt               4629 net/ipv6/route.c 	    rt->fib6_nh->fib_nh_dev == arg->dev) {
rt               4630 net/ipv6/route.c 		rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
rt               4631 net/ipv6/route.c 		fib6_update_sernum_upto_root(net, rt);
rt               4632 net/ipv6/route.c 		rt6_multipath_rebalance(rt);
rt               4654 net/ipv6/route.c static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
rt               4659 net/ipv6/route.c 	if (rt->fib6_nh->fib_nh_dev == dev)
rt               4661 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
rt               4668 net/ipv6/route.c static void rt6_multipath_flush(struct fib6_info *rt)
rt               4672 net/ipv6/route.c 	rt->should_flush = 1;
rt               4673 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
rt               4677 net/ipv6/route.c static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
rt               4683 net/ipv6/route.c 	if (rt->fib6_nh->fib_nh_dev == down_dev ||
rt               4684 net/ipv6/route.c 	    rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
rt               4686 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
rt               4694 net/ipv6/route.c static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
rt               4700 net/ipv6/route.c 	if (rt->fib6_nh->fib_nh_dev == dev)
rt               4701 net/ipv6/route.c 		rt->fib6_nh->fib_nh_flags |= nh_flags;
rt               4702 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
rt               4708 net/ipv6/route.c static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
rt               4714 net/ipv6/route.c 	if (rt == net->ipv6.fib6_null_entry || rt->nh)
rt               4719 net/ipv6/route.c 		return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
rt               4721 net/ipv6/route.c 		if (rt->should_flush)
rt               4723 net/ipv6/route.c 		if (!rt->fib6_nsiblings)
rt               4724 net/ipv6/route.c 			return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
rt               4725 net/ipv6/route.c 		if (rt6_multipath_uses_dev(rt, dev)) {
rt               4728 net/ipv6/route.c 			count = rt6_multipath_dead_count(rt, dev);
rt               4729 net/ipv6/route.c 			if (rt->fib6_nsiblings + 1 == count) {
rt               4730 net/ipv6/route.c 				rt6_multipath_flush(rt);
rt               4733 net/ipv6/route.c 			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
rt               4735 net/ipv6/route.c 			fib6_update_sernum(net, rt);
rt               4736 net/ipv6/route.c 			rt6_multipath_rebalance(rt);
rt               4740 net/ipv6/route.c 		if (rt->fib6_nh->fib_nh_dev != dev ||
rt               4741 net/ipv6/route.c 		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
rt               4743 net/ipv6/route.c 		rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
rt               4744 net/ipv6/route.c 		rt6_multipath_rebalance(rt);
rt               5015 net/ipv6/route.c 				 struct fib6_info *rt,
rt               5023 net/ipv6/route.c 		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
rt               5030 net/ipv6/route.c 	nh->fib6_info = rt;
rt               5037 net/ipv6/route.c static void ip6_route_mpath_notify(struct fib6_info *rt,
rt               5049 net/ipv6/route.c 		rt = list_first_entry(&rt_last->fib6_siblings,
rt               5054 net/ipv6/route.c 	if (rt)
rt               5055 net/ipv6/route.c 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
rt               5066 net/ipv6/route.c 	struct fib6_info *rt;
rt               5109 net/ipv6/route.c 		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
rt               5110 net/ipv6/route.c 		if (IS_ERR(rt)) {
rt               5111 net/ipv6/route.c 			err = PTR_ERR(rt);
rt               5112 net/ipv6/route.c 			rt = NULL;
rt               5115 net/ipv6/route.c 		if (!rt6_qualify_for_ecmp(rt)) {
rt               5119 net/ipv6/route.c 			fib6_info_release(rt);
rt               5123 net/ipv6/route.c 		rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
rt               5126 net/ipv6/route.c 					    rt, &r_cfg);
rt               5128 net/ipv6/route.c 			fib6_info_release(rt);
rt               5397 net/ipv6/route.c 			 struct fib6_info *rt, struct dst_entry *dst,
rt               5419 net/ipv6/route.c 		rt6_dst = &rt->fib6_dst;
rt               5420 net/ipv6/route.c 		rt6_src = &rt->fib6_src;
rt               5421 net/ipv6/route.c 		rt6_flags = rt->fib6_flags;
rt               5429 net/ipv6/route.c 	if (rt->fib6_table)
rt               5430 net/ipv6/route.c 		table = rt->fib6_table->tb6_id;
rt               5437 net/ipv6/route.c 	rtm->rtm_type = rt->fib6_type;
rt               5440 net/ipv6/route.c 	rtm->rtm_protocol = rt->fib6_protocol;
rt               5476 net/ipv6/route.c 		if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
rt               5481 net/ipv6/route.c 	if (rt->fib6_prefsrc.plen) {
rt               5483 net/ipv6/route.c 		saddr_buf = rt->fib6_prefsrc.addr;
rt               5488 net/ipv6/route.c 	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
rt               5492 net/ipv6/route.c 	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
rt               5505 net/ipv6/route.c 	} else if (rt->fib6_nsiblings) {
rt               5513 net/ipv6/route.c 		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
rt               5514 net/ipv6/route.c 				    rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
rt               5518 net/ipv6/route.c 					 &rt->fib6_siblings, fib6_siblings) {
rt               5526 net/ipv6/route.c 	} else if (rt->nh) {
rt               5527 net/ipv6/route.c 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
rt               5530 net/ipv6/route.c 		if (nexthop_is_blackhole(rt->nh))
rt               5533 net/ipv6/route.c 		if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
rt               5538 net/ipv6/route.c 		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
rt               5546 net/ipv6/route.c 		expires = dst ? dst->expires : rt->expires;
rt               5604 net/ipv6/route.c 	struct fib6_info *rt;
rt               5645 net/ipv6/route.c 			err = rt6_fill_node(dump->net, dump->skb, w->rt,
rt               5662 net/ipv6/route.c int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
rt               5670 net/ipv6/route.c 	if (rt == net->ipv6.fib6_null_entry)
rt               5674 net/ipv6/route.c 	    !(rt->fib6_flags & RTF_PREFIX_RT)) {
rt               5679 net/ipv6/route.c 	    ((filter->rt_type  && rt->fib6_type != filter->rt_type) ||
rt               5680 net/ipv6/route.c 	     (filter->dev      && !fib6_info_uses_dev(rt, filter->dev)) ||
rt               5681 net/ipv6/route.c 	     (filter->protocol && rt->fib6_protocol != filter->protocol))) {
rt               5694 net/ipv6/route.c 			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
rt               5706 net/ipv6/route.c 							   .rt = rt,
rt               5713 net/ipv6/route.c 		if (rt->nh) {
rt               5714 net/ipv6/route.c 			err = nexthop_for_each_fib6_nh(rt->nh,
rt               5718 net/ipv6/route.c 			err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
rt               5804 net/ipv6/route.c 	struct rt6_info *rt;
rt               5890 net/ipv6/route.c 	rt = container_of(dst, struct rt6_info, dst);
rt               5891 net/ipv6/route.c 	if (rt->dst.error) {
rt               5892 net/ipv6/route.c 		err = rt->dst.error;
rt               5893 net/ipv6/route.c 		ip6_rt_put(rt);
rt               5897 net/ipv6/route.c 	if (rt == net->ipv6.ip6_null_entry) {
rt               5898 net/ipv6/route.c 		err = rt->dst.error;
rt               5899 net/ipv6/route.c 		ip6_rt_put(rt);
rt               5905 net/ipv6/route.c 		ip6_rt_put(rt);
rt               5910 net/ipv6/route.c 	skb_dst_set(skb, &rt->dst);
rt               5913 net/ipv6/route.c 	from = rcu_dereference(rt->from);
rt               5940 net/ipv6/route.c void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
rt               5951 net/ipv6/route.c 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
rt               5955 net/ipv6/route.c 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
rt               5971 net/ipv6/route.c void fib6_rt_update(struct net *net, struct fib6_info *rt,
rt               5981 net/ipv6/route.c 	call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
rt               5983 net/ipv6/route.c 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
rt               5987 net/ipv6/route.c 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
rt                161 net/ipv6/seg6_local.c 	struct rt6_info *rt;
rt                183 net/ipv6/seg6_local.c 		rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
rt                184 net/ipv6/seg6_local.c 		dst = &rt->dst;
rt                194 net/ipv6/seg6_local.c 		rt = net->ipv6.ip6_blk_hole_entry;
rt                195 net/ipv6/seg6_local.c 		dst = &rt->dst;
rt                824 net/ipv6/sit.c 	struct rtable *rt;		/* Route to the other host */
rt                904 net/ipv6/sit.c 	rt = dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr);
rt                905 net/ipv6/sit.c 	if (!rt) {
rt                906 net/ipv6/sit.c 		rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
rt                907 net/ipv6/sit.c 		if (IS_ERR(rt)) {
rt                911 net/ipv6/sit.c 		dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
rt                914 net/ipv6/sit.c 	if (rt->rt_type != RTN_UNICAST) {
rt                915 net/ipv6/sit.c 		ip_rt_put(rt);
rt                919 net/ipv6/sit.c 	tdev = rt->dst.dev;
rt                922 net/ipv6/sit.c 		ip_rt_put(rt);
rt                928 net/ipv6/sit.c 		ip_rt_put(rt);
rt                933 net/ipv6/sit.c 		mtu = dst_mtu(&rt->dst) - t_hlen;
rt                937 net/ipv6/sit.c 			ip_rt_put(rt);
rt                951 net/ipv6/sit.c 			ip_rt_put(rt);
rt                974 net/ipv6/sit.c 			ip_rt_put(rt);
rt                991 net/ipv6/sit.c 		ip_rt_put(rt);
rt                997 net/ipv6/sit.c 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
rt               1070 net/ipv6/sit.c 		struct rtable *rt = ip_route_output_ports(tunnel->net, &fl4,
rt               1078 net/ipv6/sit.c 		if (!IS_ERR(rt)) {
rt               1079 net/ipv6/sit.c 			tdev = rt->dst.dev;
rt               1080 net/ipv6/sit.c 			ip_rt_put(rt);
rt                107 net/ipv6/tcp_ipv6.c 		const struct rt6_info *rt = (const struct rt6_info *)dst;
rt                111 net/ipv6/tcp_ipv6.c 		tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
rt                815 net/ipv6/udp.c 		const struct rt6_info *rt = (const struct rt6_info *)dst;
rt                817 net/ipv6/udp.c 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
rt                 74 net/ipv6/xfrm6_policy.c 	struct rt6_info *rt = (struct rt6_info *)xdst->route;
rt                 87 net/ipv6/xfrm6_policy.c 	xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
rt                 89 net/ipv6/xfrm6_policy.c 	xdst->route_cookie = rt6_get_cookie(rt);
rt                 90 net/ipv6/xfrm6_policy.c 	xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
rt                 91 net/ipv6/xfrm6_policy.c 	xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
rt                 92 net/ipv6/xfrm6_policy.c 	xdst->u.rt6.rt6i_src = rt->rt6i_src;
rt                411 net/l2tp/l2tp_ip.c 	struct rtable *rt = NULL;
rt                470 net/l2tp/l2tp_ip.c 		rt = (struct rtable *) __sk_dst_check(sk, 0);
rt                473 net/l2tp/l2tp_ip.c 	if (rt == NULL) {
rt                486 net/l2tp/l2tp_ip.c 		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
rt                491 net/l2tp/l2tp_ip.c 		if (IS_ERR(rt))
rt                494 net/l2tp/l2tp_ip.c 			sk_setup_caps(sk, &rt->dst);
rt                496 net/l2tp/l2tp_ip.c 			skb_dst_set(skb, &rt->dst);
rt                504 net/l2tp/l2tp_ip.c 	skb_dst_set_noref(skb, &rt->dst);
rt                 73 net/mpls/af_mpls.c static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
rt                 79 net/mpls/af_mpls.c 	struct mpls_route *rt = NULL;
rt                 84 net/mpls/af_mpls.c 		rt = rcu_dereference(platform_label[index]);
rt                 86 net/mpls/af_mpls.c 	return rt;
rt                 95 net/mpls/af_mpls.c static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
rt                 97 net/mpls/af_mpls.c 	return (u8 *)nh + rt->rt_via_offset;
rt                100 net/mpls/af_mpls.c static const u8 *mpls_nh_via(const struct mpls_route *rt,
rt                103 net/mpls/af_mpls.c 	return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
rt                156 net/mpls/af_mpls.c static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
rt                223 net/mpls/af_mpls.c static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
rt                225 net/mpls/af_mpls.c 	return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
rt                233 net/mpls/af_mpls.c static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
rt                244 net/mpls/af_mpls.c 	if (rt->rt_nhn == 1)
rt                245 net/mpls/af_mpls.c 		return rt->rt_nh;
rt                247 net/mpls/af_mpls.c 	alive = READ_ONCE(rt->rt_nhn_alive);
rt                251 net/mpls/af_mpls.c 	hash = mpls_multipath_hash(rt, skb);
rt                253 net/mpls/af_mpls.c 	if (alive == rt->rt_nhn)
rt                255 net/mpls/af_mpls.c 	for_nexthops(rt) {
rt                263 net/mpls/af_mpls.c 	} endfor_nexthops(rt);
rt                266 net/mpls/af_mpls.c 	return mpls_get_nexthop(rt, nh_index);
rt                269 net/mpls/af_mpls.c static bool mpls_egress(struct net *net, struct mpls_route *rt,
rt                288 net/mpls/af_mpls.c 	payload_type = rt->rt_payload_type;
rt                302 net/mpls/af_mpls.c 		if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
rt                303 net/mpls/af_mpls.c 		    (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
rt                324 net/mpls/af_mpls.c 		if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
rt                325 net/mpls/af_mpls.c 		    (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
rt                346 net/mpls/af_mpls.c 	struct mpls_route *rt;
rt                384 net/mpls/af_mpls.c 	rt = mpls_route_input_rcu(net, dec.label);
rt                385 net/mpls/af_mpls.c 	if (!rt) {
rt                390 net/mpls/af_mpls.c 	nh = mpls_select_multipath(rt, skb);
rt                434 net/mpls/af_mpls.c 		if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
rt                459 net/mpls/af_mpls.c 				 mpls_nh_via(rt, nh), skb);
rt                511 net/mpls/af_mpls.c 	struct mpls_route *rt;
rt                514 net/mpls/af_mpls.c 	size = sizeof(*rt) + num_nh * nh_size;
rt                518 net/mpls/af_mpls.c 	rt = kzalloc(size, GFP_KERNEL);
rt                519 net/mpls/af_mpls.c 	if (!rt)
rt                522 net/mpls/af_mpls.c 	rt->rt_nhn = num_nh;
rt                523 net/mpls/af_mpls.c 	rt->rt_nhn_alive = num_nh;
rt                524 net/mpls/af_mpls.c 	rt->rt_nh_size = nh_size;
rt                525 net/mpls/af_mpls.c 	rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
rt                527 net/mpls/af_mpls.c 	return rt;
rt                530 net/mpls/af_mpls.c static void mpls_rt_free(struct mpls_route *rt)
rt                532 net/mpls/af_mpls.c 	if (rt)
rt                533 net/mpls/af_mpls.c 		kfree_rcu(rt, rt_rcu);
rt                543 net/mpls/af_mpls.c 	struct mpls_route *rt = new ? new : old;
rt                546 net/mpls/af_mpls.c 	if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
rt                547 net/mpls/af_mpls.c 		rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
rt                555 net/mpls/af_mpls.c 	struct mpls_route *rt;
rt                560 net/mpls/af_mpls.c 	rt = rtnl_dereference(platform_label[index]);
rt                563 net/mpls/af_mpls.c 	mpls_notify_route(net, index, rt, new, info);
rt                566 net/mpls/af_mpls.c 	mpls_rt_free(rt);
rt                590 net/mpls/af_mpls.c 	struct rtable *rt;
rt                594 net/mpls/af_mpls.c 	rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
rt                595 net/mpls/af_mpls.c 	if (IS_ERR(rt))
rt                596 net/mpls/af_mpls.c 		return ERR_CAST(rt);
rt                598 net/mpls/af_mpls.c 	dev = rt->dst.dev;
rt                601 net/mpls/af_mpls.c 	ip_rt_put(rt);
rt                645 net/mpls/af_mpls.c 				      struct mpls_route *rt,
rt                653 net/mpls/af_mpls.c 			dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
rt                656 net/mpls/af_mpls.c 			dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
rt                677 net/mpls/af_mpls.c static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
rt                683 net/mpls/af_mpls.c 	dev = find_outdev(net, rt, nh, oif);
rt                766 net/mpls/af_mpls.c 				  struct mpls_route *rt)
rt                769 net/mpls/af_mpls.c 	struct mpls_nh *nh = rt->rt_nh;
rt                781 net/mpls/af_mpls.c 	memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
rt                784 net/mpls/af_mpls.c 	err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
rt                789 net/mpls/af_mpls.c 		rt->rt_nhn_alive--;
rt                797 net/mpls/af_mpls.c static int mpls_nh_build(struct net *net, struct mpls_route *rt,
rt                816 net/mpls/af_mpls.c 				  __mpls_nh_via(rt, nh), extack);
rt                823 net/mpls/af_mpls.c 	err = mpls_nh_assign_dev(net, rt, nh, oif);
rt                883 net/mpls/af_mpls.c 			       struct mpls_route *rt, u8 max_labels,
rt                892 net/mpls/af_mpls.c 	change_nexthops(rt) {
rt                916 net/mpls/af_mpls.c 		err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
rt                923 net/mpls/af_mpls.c 			rt->rt_nhn_alive--;
rt                927 net/mpls/af_mpls.c 	} endfor_nexthops(rt);
rt                929 net/mpls/af_mpls.c 	rt->rt_nhn = nhs;
rt                965 net/mpls/af_mpls.c 	struct mpls_route *rt, *old;
rt               1021 net/mpls/af_mpls.c 	rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
rt               1022 net/mpls/af_mpls.c 	if (IS_ERR(rt)) {
rt               1023 net/mpls/af_mpls.c 		err = PTR_ERR(rt);
rt               1027 net/mpls/af_mpls.c 	rt->rt_protocol = cfg->rc_protocol;
rt               1028 net/mpls/af_mpls.c 	rt->rt_payload_type = cfg->rc_payload_type;
rt               1029 net/mpls/af_mpls.c 	rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
rt               1032 net/mpls/af_mpls.c 		err = mpls_nh_build_multi(cfg, rt, max_labels, extack);
rt               1034 net/mpls/af_mpls.c 		err = mpls_nh_build_from_cfg(cfg, rt);
rt               1038 net/mpls/af_mpls.c 	mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
rt               1043 net/mpls/af_mpls.c 	mpls_rt_free(rt);
rt               1503 net/mpls/af_mpls.c 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
rt               1505 net/mpls/af_mpls.c 		if (!rt)
rt               1510 net/mpls/af_mpls.c 		change_nexthops(rt) {
rt               1535 net/mpls/af_mpls.c 		} endfor_nexthops(rt);
rt               1537 net/mpls/af_mpls.c 		WRITE_ONCE(rt->rt_nhn_alive, alive);
rt               1540 net/mpls/af_mpls.c 		if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
rt               1554 net/mpls/af_mpls.c 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
rt               1556 net/mpls/af_mpls.c 		if (!rt)
rt               1560 net/mpls/af_mpls.c 		change_nexthops(rt) {
rt               1574 net/mpls/af_mpls.c 		} endfor_nexthops(rt);
rt               1576 net/mpls/af_mpls.c 		WRITE_ONCE(rt->rt_nhn_alive, alive);
rt               1964 net/mpls/af_mpls.c 			   u32 label, struct mpls_route *rt, int flags)
rt               1980 net/mpls/af_mpls.c 	rtm->rtm_protocol = rt->rt_protocol;
rt               1988 net/mpls/af_mpls.c 	if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
rt               1990 net/mpls/af_mpls.c 			rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
rt               1996 net/mpls/af_mpls.c 	if (rt->rt_nhn == 1) {
rt               1997 net/mpls/af_mpls.c 		const struct mpls_nh *nh = rt->rt_nh;
rt               2004 net/mpls/af_mpls.c 		    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
rt               2024 net/mpls/af_mpls.c 		for_nexthops(rt) {
rt               2049 net/mpls/af_mpls.c 					mpls_nh_via(rt, nh),
rt               2055 net/mpls/af_mpls.c 		} endfor_nexthops(rt);
rt               2057 net/mpls/af_mpls.c 		if (linkdown == rt->rt_nhn)
rt               2059 net/mpls/af_mpls.c 		if (dead == rt->rt_nhn)
rt               2133 net/mpls/af_mpls.c static bool mpls_rt_uses_dev(struct mpls_route *rt,
rt               2138 net/mpls/af_mpls.c 	if (rt->rt_nhn == 1) {
rt               2139 net/mpls/af_mpls.c 		struct mpls_nh *nh = rt->rt_nh;
rt               2145 net/mpls/af_mpls.c 		for_nexthops(rt) {
rt               2149 net/mpls/af_mpls.c 		} endfor_nexthops(rt);
rt               2194 net/mpls/af_mpls.c 		struct mpls_route *rt;
rt               2196 net/mpls/af_mpls.c 		rt = rtnl_dereference(platform_label[index]);
rt               2197 net/mpls/af_mpls.c 		if (!rt)
rt               2200 net/mpls/af_mpls.c 		if ((filter.dev && !mpls_rt_uses_dev(rt, filter.dev)) ||
rt               2201 net/mpls/af_mpls.c 		    (filter.protocol && rt->rt_protocol != filter.protocol))
rt               2206 net/mpls/af_mpls.c 				    index, rt, flags) < 0)
rt               2214 net/mpls/af_mpls.c static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
rt               2221 net/mpls/af_mpls.c 	if (rt->rt_nhn == 1) {
rt               2222 net/mpls/af_mpls.c 		struct mpls_nh *nh = rt->rt_nh;
rt               2234 net/mpls/af_mpls.c 		for_nexthops(rt) {
rt               2243 net/mpls/af_mpls.c 		} endfor_nexthops(rt);
rt               2251 net/mpls/af_mpls.c static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
rt               2259 net/mpls/af_mpls.c 	skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
rt               2263 net/mpls/af_mpls.c 	err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
rt               2347 net/mpls/af_mpls.c 	struct mpls_route *rt;
rt               2376 net/mpls/af_mpls.c 	rt = mpls_route_input_rcu(net, in_label);
rt               2377 net/mpls/af_mpls.c 	if (!rt) {
rt               2383 net/mpls/af_mpls.c 		skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
rt               2390 net/mpls/af_mpls.c 				      RTM_NEWROUTE, in_label, rt, 0);
rt               2441 net/mpls/af_mpls.c 	nh = mpls_select_multipath(rt, skb);
rt               2466 net/mpls/af_mpls.c 	r->rtm_protocol = rt->rt_protocol;
rt               2478 net/mpls/af_mpls.c 	    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
rt               2704 net/mpls/af_mpls.c 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
rt               2706 net/mpls/af_mpls.c 		mpls_notify_route(net, index, rt, NULL, NULL);
rt               2707 net/mpls/af_mpls.c 		mpls_rt_free(rt);
rt                160 net/mpls/internal.h #define for_nexthops(rt) {						\
rt                162 net/mpls/internal.h 	for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh);	\
rt                163 net/mpls/internal.h 	     nhsel < (rt)->rt_nhn;					\
rt                164 net/mpls/internal.h 	     __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
rt                166 net/mpls/internal.h #define change_nexthops(rt) {						\
rt                168 net/mpls/internal.h 	for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh),		\
rt                169 net/mpls/internal.h 			__nh = (u8 *)((rt)->rt_nh);			\
rt                170 net/mpls/internal.h 	     nhsel < (rt)->rt_nhn;					\
rt                171 net/mpls/internal.h 	     __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
rt                173 net/mpls/internal.h #define endfor_nexthops(rt) }
rt                 45 net/mpls/mpls_iptunnel.c 	struct rtable *rt = NULL;
rt                 86 net/mpls/mpls_iptunnel.c 		rt = (struct rtable *)dst;
rt                135 net/mpls/mpls_iptunnel.c 	if (rt) {
rt                136 net/mpls/mpls_iptunnel.c 		if (rt->rt_gw_family == AF_INET6)
rt                137 net/mpls/mpls_iptunnel.c 			err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
rt                140 net/mpls/mpls_iptunnel.c 			err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
rt                127 net/netfilter/ipvs/ip_vs_xmit.c 	struct rtable *rt;
rt                136 net/netfilter/ipvs/ip_vs_xmit.c 	rt = ip_route_output_key(net, &fl4);
rt                137 net/netfilter/ipvs/ip_vs_xmit.c 	if (IS_ERR(rt)) {
rt                139 net/netfilter/ipvs/ip_vs_xmit.c 		if (PTR_ERR(rt) == -EINVAL && *saddr &&
rt                148 net/netfilter/ipvs/ip_vs_xmit.c 		ip_rt_put(rt);
rt                155 net/netfilter/ipvs/ip_vs_xmit.c 	return rt;
rt                159 net/netfilter/ipvs/ip_vs_xmit.c static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
rt                161 net/netfilter/ipvs/ip_vs_xmit.c 	return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
rt                314 net/netfilter/ipvs/ip_vs_xmit.c 	struct rtable *rt;			/* Route to the other host */
rt                321 net/netfilter/ipvs/ip_vs_xmit.c 			rt = (struct rtable *) dest_dst->dst_cache;
rt                330 net/netfilter/ipvs/ip_vs_xmit.c 			rt = do_output_route4(net, dest->addr.ip, rt_mode,
rt                332 net/netfilter/ipvs/ip_vs_xmit.c 			if (!rt) {
rt                338 net/netfilter/ipvs/ip_vs_xmit.c 			__ip_vs_dst_set(dest, dest_dst, &rt->dst, 0);
rt                342 net/netfilter/ipvs/ip_vs_xmit.c 				  atomic_read(&rt->dst.__refcnt));
rt                355 net/netfilter/ipvs/ip_vs_xmit.c 		rt = do_output_route4(net, daddr, rt_mode, &saddr);
rt                356 net/netfilter/ipvs/ip_vs_xmit.c 		if (!rt)
rt                362 net/netfilter/ipvs/ip_vs_xmit.c 	local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
rt                373 net/netfilter/ipvs/ip_vs_xmit.c 			ip_rt_put(rt);
rt                381 net/netfilter/ipvs/ip_vs_xmit.c 		mtu = dst_mtu(&rt->dst);
rt                383 net/netfilter/ipvs/ip_vs_xmit.c 		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
rt                412 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set_noref(skb, &rt->dst);
rt                414 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set(skb, dst_clone(&rt->dst));
rt                416 net/netfilter/ipvs/ip_vs_xmit.c 		skb_dst_set(skb, &rt->dst);
rt                422 net/netfilter/ipvs/ip_vs_xmit.c 		ip_rt_put(rt);
rt                479 net/netfilter/ipvs/ip_vs_xmit.c 	struct rt6_info *rt;			/* Route to the other host */
rt                487 net/netfilter/ipvs/ip_vs_xmit.c 			rt = (struct rt6_info *) dest_dst->dst_cache;
rt                507 net/netfilter/ipvs/ip_vs_xmit.c 			rt = (struct rt6_info *) dst;
rt                508 net/netfilter/ipvs/ip_vs_xmit.c 			cookie = rt6_get_cookie(rt);
rt                509 net/netfilter/ipvs/ip_vs_xmit.c 			__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
rt                513 net/netfilter/ipvs/ip_vs_xmit.c 				  atomic_read(&rt->dst.__refcnt));
rt                523 net/netfilter/ipvs/ip_vs_xmit.c 		rt = (struct rt6_info *) dst;
rt                526 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_is_local_route6(rt);
rt                538 net/netfilter/ipvs/ip_vs_xmit.c 			dst_release(&rt->dst);
rt                547 net/netfilter/ipvs/ip_vs_xmit.c 		mtu = dst_mtu(&rt->dst);
rt                549 net/netfilter/ipvs/ip_vs_xmit.c 		mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
rt                579 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set_noref(skb, &rt->dst);
rt                581 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set(skb, dst_clone(&rt->dst));
rt                583 net/netfilter/ipvs/ip_vs_xmit.c 		skb_dst_set(skb, &rt->dst);
rt                589 net/netfilter/ipvs/ip_vs_xmit.c 		dst_release(&rt->dst);
rt                768 net/netfilter/ipvs/ip_vs_xmit.c 	struct rtable *rt;		/* Route to the other host */
rt                791 net/netfilter/ipvs/ip_vs_xmit.c 	rt = skb_rtable(skb);
rt                822 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
rt                856 net/netfilter/ipvs/ip_vs_xmit.c 	struct rt6_info *rt;		/* Route to the other host */
rt                879 net/netfilter/ipvs/ip_vs_xmit.c 	rt = (struct rt6_info *) skb_dst(skb);
rt                911 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
rt               1138 net/netfilter/ipvs/ip_vs_xmit.c 	struct rtable *rt;			/* Route to the other host */
rt               1164 net/netfilter/ipvs/ip_vs_xmit.c 	rt = skb_rtable(skb);
rt               1165 net/netfilter/ipvs/ip_vs_xmit.c 	tdev = rt->dst.dev;
rt               1288 net/netfilter/ipvs/ip_vs_xmit.c 	struct rt6_info *rt;		/* Route to the other host */
rt               1314 net/netfilter/ipvs/ip_vs_xmit.c 	rt = (struct rt6_info *) skb_dst(skb);
rt               1315 net/netfilter/ipvs/ip_vs_xmit.c 	tdev = rt->dst.dev;
rt               1512 net/netfilter/ipvs/ip_vs_xmit.c 	struct rtable	*rt;	/* Route to the other host */
rt               1545 net/netfilter/ipvs/ip_vs_xmit.c 	rt = skb_rtable(skb);
rt               1577 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
rt               1602 net/netfilter/ipvs/ip_vs_xmit.c 	struct rt6_info	*rt;	/* Route to the other host */
rt               1634 net/netfilter/ipvs/ip_vs_xmit.c 	rt = (struct rt6_info *) skb_dst(skb);
rt               1666 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
rt                 25 net/netfilter/nf_conntrack_broadcast.c 	struct rtable *rt = skb_rtable(skb);
rt                 33 net/netfilter/nf_conntrack_broadcast.c 	if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
rt                 38 net/netfilter/nf_conntrack_broadcast.c 	in_dev = __in_dev_get_rcu(rt->dst.dev);
rt                246 net/netfilter/nf_flow_table_ip.c 	struct rtable *rt;
rt                263 net/netfilter/nf_flow_table_ip.c 	rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
rt                264 net/netfilter/nf_flow_table_ip.c 	outdev = rt->dst.dev;
rt                276 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_offload_dst_check(&rt->dst)) {
rt                289 net/netfilter/nf_flow_table_ip.c 	if (unlikely(dst_xfrm(&rt->dst))) {
rt                293 net/netfilter/nf_flow_table_ip.c 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
rt                297 net/netfilter/nf_flow_table_ip.c 	nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
rt                298 net/netfilter/nf_flow_table_ip.c 	skb_dst_set_noref(skb, &rt->dst);
rt                479 net/netfilter/nf_flow_table_ip.c 	struct rt6_info *rt;
rt                493 net/netfilter/nf_flow_table_ip.c 	rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
rt                494 net/netfilter/nf_flow_table_ip.c 	outdev = rt->dst.dev;
rt                503 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_offload_dst_check(&rt->dst)) {
rt                519 net/netfilter/nf_flow_table_ip.c 	if (unlikely(dst_xfrm(&rt->dst))) {
rt                523 net/netfilter/nf_flow_table_ip.c 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
rt                527 net/netfilter/nf_flow_table_ip.c 	nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
rt                528 net/netfilter/nf_flow_table_ip.c 	skb_dst_set_noref(skb, &rt->dst);
rt                 24 net/netfilter/nf_nat_masquerade.c 	const struct rtable *rt;
rt                 40 net/netfilter/nf_nat_masquerade.c 	rt = skb_rtable(skb);
rt                 41 net/netfilter/nf_nat_masquerade.c 	nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
rt                 48 net/netfilter/xt_TCPMSS.c 	struct rtable *rt = NULL;
rt                 62 net/netfilter/xt_TCPMSS.c 	nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
rt                 63 net/netfilter/xt_TCPMSS.c 	if (rt != NULL) {
rt                 64 net/netfilter/xt_TCPMSS.c 		mtu = dst_mtu(&rt->dst);
rt                 65 net/netfilter/xt_TCPMSS.c 		dst_release(&rt->dst);
rt                 37 net/netfilter/xt_addrtype.c 	struct rt6_info *rt;
rt                 51 net/netfilter/xt_addrtype.c 	route_err = nf_ip6_route(net, (struct dst_entry **)&rt,
rt                 56 net/netfilter/xt_addrtype.c 	if (rt->rt6i_flags & RTF_REJECT)
rt                 59 net/netfilter/xt_addrtype.c 	if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
rt                 61 net/netfilter/xt_addrtype.c 	if (ipv6_anycast_destination((struct dst_entry *)rt, addr))
rt                 64 net/netfilter/xt_addrtype.c 	dst_release(&rt->dst);
rt                806 net/netlink/genetlink.c 	struct genl_family *rt;
rt                811 net/netlink/genetlink.c 	idr_for_each_entry(&genl_fam_idr, rt, id) {
rt                812 net/netlink/genetlink.c 		if (!rt->netnsok && !net_eq(net, &init_net))
rt                818 net/netlink/genetlink.c 		if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
rt                157 net/rxrpc/peer_object.c 	struct rtable *rt;
rt                169 net/rxrpc/peer_object.c 		rt = ip_route_output_ports(
rt                173 net/rxrpc/peer_object.c 		if (IS_ERR(rt)) {
rt                174 net/rxrpc/peer_object.c 			_leave(" [route err %ld]", PTR_ERR(rt));
rt                177 net/rxrpc/peer_object.c 		dst = &rt->dst;
rt                348 net/sched/cls_route.c 				struct route4_filter *rt;
rt                350 net/sched/cls_route.c 				rt = rtnl_dereference(b->ht[i]);
rt                351 net/sched/cls_route.c 				if (rt)
rt                369 net/sctp/ipv6.c 		struct rt6_info *rt;
rt                371 net/sctp/ipv6.c 		rt = (struct rt6_info *)dst;
rt                372 net/sctp/ipv6.c 		t->dst_cookie = rt6_get_cookie(rt);
rt                374 net/sctp/ipv6.c 			 &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
rt                411 net/sctp/protocol.c 	struct rtable *rt;
rt                441 net/sctp/protocol.c 	rt = ip_route_output_key(sock_net(sk), fl4);
rt                442 net/sctp/protocol.c 	if (!IS_ERR(rt)) {
rt                443 net/sctp/protocol.c 		dst = &rt->dst;
rt                499 net/sctp/protocol.c 		rt = ip_route_output_key(sock_net(sk), fl4);
rt                500 net/sctp/protocol.c 		if (IS_ERR(rt))
rt                510 net/sctp/protocol.c 				dst = &rt->dst;
rt                514 net/sctp/protocol.c 				dst_release(&rt->dst);
rt                520 net/sctp/protocol.c 		dst = &rt->dst;
rt                546 net/sctp/protocol.c 	struct rtable *rt = (struct rtable *)t->dst;
rt                548 net/sctp/protocol.c 	if (rt) {
rt                 35 net/sunrpc/timer.c void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
rt                 40 net/sunrpc/timer.c 	rt->timeo = timeo;
rt                 45 net/sunrpc/timer.c 		rt->srtt[i] = init;
rt                 46 net/sunrpc/timer.c 		rt->sdrtt[i] = RPC_RTO_INIT;
rt                 47 net/sunrpc/timer.c 		rt->ntimeouts[i] = 0;
rt                 61 net/sunrpc/timer.c void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
rt                 75 net/sunrpc/timer.c 	srtt = (long *)&rt->srtt[timer];
rt                 82 net/sunrpc/timer.c 	sdrtt = (long *)&rt->sdrtt[timer];
rt                110 net/sunrpc/timer.c unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
rt                115 net/sunrpc/timer.c 		return rt->timeo;
rt                117 net/sunrpc/timer.c 	res = ((rt->srtt[timer] + 7) >> 3) + rt->sdrtt[timer];
rt                170 net/tipc/udp_media.c 		struct rtable *rt = (struct rtable *)ndst;
rt                172 net/tipc/udp_media.c 		if (!rt) {
rt                179 net/tipc/udp_media.c 			rt = ip_route_output_key(net, &fl);
rt                180 net/tipc/udp_media.c 			if (IS_ERR(rt)) {
rt                181 net/tipc/udp_media.c 				err = PTR_ERR(rt);
rt                184 net/tipc/udp_media.c 			dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
rt                187 net/tipc/udp_media.c 		ttl = ip4_dst_hoplimit(&rt->dst);
rt                188 net/tipc/udp_media.c 		udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
rt                744 net/x25/af_x25.c 	struct x25_route *rt;
rt                776 net/x25/af_x25.c 	rt = x25_get_route(&addr->sx25_addr);
rt                777 net/x25/af_x25.c 	if (!rt)
rt                780 net/x25/af_x25.c 	x25->neighbour = x25_get_neigh(rt->dev);
rt                830 net/x25/af_x25.c 	x25_route_put(rt);
rt                 20 net/x25/x25_forward.c 	struct x25_route *rt;
rt                 28 net/x25/x25_forward.c 	if ((rt = x25_get_route(dest_addr)) == NULL)
rt                 31 net/x25/x25_forward.c 	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
rt                 41 net/x25/x25_forward.c 	if (rt->dev == from->dev) {
rt                 66 net/x25/x25_forward.c 		new_frwd->dev1 = rt->dev;
rt                 85 net/x25/x25_forward.c 	x25_route_put(rt);
rt                 45 net/x25/x25_proc.c 	struct x25_route *rt = list_entry(v, struct x25_route, node);
rt                 52 net/x25/x25_proc.c 	rt = v;
rt                 54 net/x25/x25_proc.c 		   rt->address.x25_addr, rt->sigdigits,
rt                 55 net/x25/x25_proc.c 		   rt->dev ? rt->dev->name : "???");
rt                 29 net/x25/x25_route.c 	struct x25_route *rt;
rt                 36 net/x25/x25_route.c 		rt = list_entry(entry, struct x25_route, node);
rt                 38 net/x25/x25_route.c 		if (!memcmp(&rt->address, address, sigdigits) &&
rt                 39 net/x25/x25_route.c 		    rt->sigdigits == sigdigits)
rt                 43 net/x25/x25_route.c 	rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
rt                 45 net/x25/x25_route.c 	if (!rt)
rt                 48 net/x25/x25_route.c 	strcpy(rt->address.x25_addr, "000000000000000");
rt                 49 net/x25/x25_route.c 	memcpy(rt->address.x25_addr, address->x25_addr, sigdigits);
rt                 51 net/x25/x25_route.c 	rt->sigdigits = sigdigits;
rt                 52 net/x25/x25_route.c 	rt->dev       = dev;
rt                 53 net/x25/x25_route.c 	refcount_set(&rt->refcnt, 1);
rt                 55 net/x25/x25_route.c 	list_add(&rt->node, &x25_route_list);
rt                 69 net/x25/x25_route.c static void __x25_remove_route(struct x25_route *rt)
rt                 71 net/x25/x25_route.c 	if (rt->node.next) {
rt                 72 net/x25/x25_route.c 		list_del(&rt->node);
rt                 73 net/x25/x25_route.c 		x25_route_put(rt);
rt                 80 net/x25/x25_route.c 	struct x25_route *rt;
rt                 87 net/x25/x25_route.c 		rt = list_entry(entry, struct x25_route, node);
rt                 89 net/x25/x25_route.c 		if (!memcmp(&rt->address, address, sigdigits) &&
rt                 90 net/x25/x25_route.c 		    rt->sigdigits == sigdigits && rt->dev == dev) {
rt                 91 net/x25/x25_route.c 			__x25_remove_route(rt);
rt                106 net/x25/x25_route.c 	struct x25_route *rt;
rt                112 net/x25/x25_route.c 		rt = list_entry(entry, struct x25_route, node);
rt                114 net/x25/x25_route.c 		if (rt->dev == dev)
rt                115 net/x25/x25_route.c 			__x25_remove_route(rt);
rt                151 net/x25/x25_route.c 	struct x25_route *rt, *use = NULL;
rt                157 net/x25/x25_route.c 		rt = list_entry(entry, struct x25_route, node);
rt                159 net/x25/x25_route.c 		if (!memcmp(&rt->address, addr, rt->sigdigits)) {
rt                161 net/x25/x25_route.c 				use = rt;
rt                162 net/x25/x25_route.c 			else if (rt->sigdigits > use->sigdigits)
rt                163 net/x25/x25_route.c 				use = rt;
rt                179 net/x25/x25_route.c 	struct x25_route_struct rt;
rt                187 net/x25/x25_route.c 	if (copy_from_user(&rt, arg, sizeof(rt)))
rt                191 net/x25/x25_route.c 	if (rt.sigdigits > 15)
rt                194 net/x25/x25_route.c 	dev = x25_dev_get(rt.device);
rt                199 net/x25/x25_route.c 		rc = x25_add_route(&rt.address, rt.sigdigits, dev);
rt                201 net/x25/x25_route.c 		rc = x25_del_route(&rt.address, rt.sigdigits, dev);
rt                212 net/x25/x25_route.c 	struct x25_route *rt;
rt                217 net/x25/x25_route.c 		rt = list_entry(entry, struct x25_route, node);
rt                218 net/x25/x25_route.c 		__x25_remove_route(rt);
rt                369 net/xfrm/xfrm_interface.c 			struct rtable *rt;
rt                373 net/xfrm/xfrm_interface.c 			rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
rt                374 net/xfrm/xfrm_interface.c 			if (IS_ERR(rt)) {
rt                378 net/xfrm/xfrm_interface.c 			skb_dst_set(skb, &rt->dst);
rt               2501 net/xfrm/xfrm_policy.c 		struct rt6_info *rt = (struct rt6_info *)dst;
rt               2502 net/xfrm/xfrm_policy.c 		path->path_cookie = rt6_get_cookie(rt);
rt                 39 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[type];
rt                 42 net/xfrm/xfrm_user.c 	if (!rt)
rt                 45 net/xfrm/xfrm_user.c 	algp = nla_data(rt);
rt                 46 net/xfrm/xfrm_user.c 	if (nla_len(rt) < (int)xfrm_alg_len(algp))
rt                 65 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
rt                 68 net/xfrm/xfrm_user.c 	if (!rt)
rt                 71 net/xfrm/xfrm_user.c 	algp = nla_data(rt);
rt                 72 net/xfrm/xfrm_user.c 	if (nla_len(rt) < (int)xfrm_alg_auth_len(algp))
rt                 81 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
rt                 84 net/xfrm/xfrm_user.c 	if (!rt)
rt                 87 net/xfrm/xfrm_user.c 	algp = nla_data(rt);
rt                 88 net/xfrm/xfrm_user.c 	if (nla_len(rt) < (int)aead_len(algp))
rt                 98 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[type];
rt                100 net/xfrm/xfrm_user.c 	if (rt && addrp)
rt                101 net/xfrm/xfrm_user.c 		*addrp = nla_data(rt);
rt                106 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
rt                109 net/xfrm/xfrm_user.c 	if (!rt)
rt                112 net/xfrm/xfrm_user.c 	uctx = nla_data(rt);
rt                113 net/xfrm/xfrm_user.c 	if (uctx->len > nla_len(rt) ||
rt                123 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
rt                126 net/xfrm/xfrm_user.c 	if (!rt)
rt                129 net/xfrm/xfrm_user.c 	rs = nla_data(rt);
rt                134 net/xfrm/xfrm_user.c 	if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
rt                135 net/xfrm/xfrm_user.c 	    nla_len(rt) != sizeof(*rs))
rt                523 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
rt                553 net/xfrm/xfrm_user.c 	if (rt)
rt                554 net/xfrm/xfrm_user.c 		x->replay_maxdiff = nla_get_u32(rt);
rt               1456 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
rt               1459 net/xfrm/xfrm_user.c 	if (!rt)
rt               1462 net/xfrm/xfrm_user.c 	uctx = nla_data(rt);
rt               1546 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_TMPL];
rt               1548 net/xfrm/xfrm_user.c 	if (!rt) {
rt               1551 net/xfrm/xfrm_user.c 		struct xfrm_user_tmpl *utmpl = nla_data(rt);
rt               1552 net/xfrm/xfrm_user.c 		int nr = nla_len(rt) / sizeof(*utmpl);
rt               1566 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
rt               1571 net/xfrm/xfrm_user.c 	if (rt) {
rt               1572 net/xfrm/xfrm_user.c 		upt = nla_data(rt);
rt               1886 net/xfrm/xfrm_user.c 		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
rt               1894 net/xfrm/xfrm_user.c 		if (rt) {
rt               1895 net/xfrm/xfrm_user.c 			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
rt               2094 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
rt               2096 net/xfrm/xfrm_user.c 	if (!lt && !rp && !re && !et && !rt)
rt               2186 net/xfrm/xfrm_user.c 		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
rt               2194 net/xfrm/xfrm_user.c 		if (rt) {
rt               2195 net/xfrm/xfrm_user.c 			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
rt               2264 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_TMPL];
rt               2293 net/xfrm/xfrm_user.c 	ut = nla_data(rt);
rt               2324 net/xfrm/xfrm_user.c 	struct nlattr *rt = attrs[XFRMA_MIGRATE];
rt               2338 net/xfrm/xfrm_user.c 	um = nla_data(rt);
rt               2339 net/xfrm/xfrm_user.c 	num_migrate = nla_len(rt) / sizeof(*um);
rt                324 samples/bpf/xdp_router_ipv4_user.c 		struct rtmsg rt;
rt                345 samples/bpf/xdp_router_ipv4_user.c 	req.rt.rtm_family = rtm_family;
rt                346 samples/bpf/xdp_router_ipv4_user.c 	req.rt.rtm_table = RT_TABLE_MAIN;
rt                463 samples/bpf/xdp_router_ipv4_user.c 		struct ndmsg rt;
rt                483 samples/bpf/xdp_router_ipv4_user.c 	req.rt.ndm_state = NUD_REACHABLE;
rt                484 samples/bpf/xdp_router_ipv4_user.c 	req.rt.ndm_family = rtm_family;
rt                918 scripts/unifdef.c 	Linetype lt, rt;
rt                934 scripts/unifdef.c 		rt = ops->inner(ops+1, &val, &cp);
rt                935 scripts/unifdef.c 		if (rt == LT_ERROR)
rt                937 scripts/unifdef.c 		lt = op->fn(valp, lt, *valp, rt, val);
rt                345 security/selinux/ss/policydb.c 	struct mls_range *rt = datum;
rt                348 security/selinux/ss/policydb.c 	ebitmap_destroy(&rt->level[0].cat);
rt                349 security/selinux/ss/policydb.c 	ebitmap_destroy(&rt->level[1].cat);
rt               1803 security/selinux/ss/policydb.c 	struct range_trans *rt = NULL;
rt               1819 security/selinux/ss/policydb.c 		rt = kzalloc(sizeof(*rt), GFP_KERNEL);
rt               1820 security/selinux/ss/policydb.c 		if (!rt)
rt               1827 security/selinux/ss/policydb.c 		rt->source_type = le32_to_cpu(buf[0]);
rt               1828 security/selinux/ss/policydb.c 		rt->target_type = le32_to_cpu(buf[1]);
rt               1833 security/selinux/ss/policydb.c 			rt->target_class = le32_to_cpu(buf[0]);
rt               1835 security/selinux/ss/policydb.c 			rt->target_class = p->process_class;
rt               1838 security/selinux/ss/policydb.c 		if (!policydb_type_isvalid(p, rt->source_type) ||
rt               1839 security/selinux/ss/policydb.c 		    !policydb_type_isvalid(p, rt->target_type) ||
rt               1840 security/selinux/ss/policydb.c 		    !policydb_class_isvalid(p, rt->target_class))
rt               1858 security/selinux/ss/policydb.c 		rc = hashtab_insert(p->range_tr, rt, r);
rt               1862 security/selinux/ss/policydb.c 		rt = NULL;
rt               1868 security/selinux/ss/policydb.c 	kfree(rt);
rt               3268 security/selinux/ss/policydb.c 	struct range_trans *rt = key;
rt               3275 security/selinux/ss/policydb.c 	buf[0] = cpu_to_le32(rt->source_type);
rt               3276 security/selinux/ss/policydb.c 	buf[1] = cpu_to_le32(rt->target_type);
rt               3281 security/selinux/ss/policydb.c 		buf[0] = cpu_to_le32(rt->target_class);
rt                 25 sound/aoa/aoa-gpio.h 	void (*init)(struct gpio_runtime *rt);
rt                 26 sound/aoa/aoa-gpio.h 	void (*exit)(struct gpio_runtime *rt);
rt                 29 sound/aoa/aoa-gpio.h 	void (*all_amps_off)(struct gpio_runtime *rt);
rt                 31 sound/aoa/aoa-gpio.h 	void (*all_amps_restore)(struct gpio_runtime *rt);
rt                 33 sound/aoa/aoa-gpio.h 	void (*set_headphone)(struct gpio_runtime *rt, int on);
rt                 34 sound/aoa/aoa-gpio.h 	void (*set_speakers)(struct gpio_runtime *rt, int on);
rt                 35 sound/aoa/aoa-gpio.h 	void (*set_lineout)(struct gpio_runtime *rt, int on);
rt                 36 sound/aoa/aoa-gpio.h 	void (*set_master)(struct gpio_runtime *rt, int on);
rt                 38 sound/aoa/aoa-gpio.h 	int (*get_headphone)(struct gpio_runtime *rt);
rt                 39 sound/aoa/aoa-gpio.h 	int (*get_speakers)(struct gpio_runtime *rt);
rt                 40 sound/aoa/aoa-gpio.h 	int (*get_lineout)(struct gpio_runtime *rt);
rt                 41 sound/aoa/aoa-gpio.h 	int (*get_master)(struct gpio_runtime *rt);
rt                 43 sound/aoa/aoa-gpio.h 	void (*set_hw_reset)(struct gpio_runtime *rt, int on);
rt                 52 sound/aoa/aoa-gpio.h 	int (*set_notify)(struct gpio_runtime *rt,
rt                 58 sound/aoa/aoa-gpio.h 	int (*get_detect)(struct gpio_runtime *rt,
rt                133 sound/aoa/core/gpio-feature.c static void ftr_gpio_set_##name(struct gpio_runtime *rt, int on)\
rt                137 sound/aoa/core/gpio-feature.c 	if (unlikely(!rt)) return;				\
rt                152 sound/aoa/core/gpio-feature.c 	rt->implementation_private &= ~(1<<bit);		\
rt                153 sound/aoa/core/gpio-feature.c 	rt->implementation_private |= (!!on << bit);		\
rt                155 sound/aoa/core/gpio-feature.c static int ftr_gpio_get_##name(struct gpio_runtime *rt)		\
rt                157 sound/aoa/core/gpio-feature.c 	if (unlikely(!rt)) return 0;				\
rt                158 sound/aoa/core/gpio-feature.c 	return (rt->implementation_private>>bit)&1;		\
rt                166 sound/aoa/core/gpio-feature.c static void ftr_gpio_set_hw_reset(struct gpio_runtime *rt, int on)
rt                170 sound/aoa/core/gpio-feature.c 	if (unlikely(!rt)) return;
rt                183 sound/aoa/core/gpio-feature.c static void ftr_gpio_all_amps_off(struct gpio_runtime *rt)
rt                187 sound/aoa/core/gpio-feature.c 	if (unlikely(!rt)) return;
rt                188 sound/aoa/core/gpio-feature.c 	saved = rt->implementation_private;
rt                189 sound/aoa/core/gpio-feature.c 	ftr_gpio_set_headphone(rt, 0);
rt                190 sound/aoa/core/gpio-feature.c 	ftr_gpio_set_amp(rt, 0);
rt                191 sound/aoa/core/gpio-feature.c 	ftr_gpio_set_lineout(rt, 0);
rt                193 sound/aoa/core/gpio-feature.c 		ftr_gpio_set_master(rt, 0);
rt                194 sound/aoa/core/gpio-feature.c 	rt->implementation_private = saved;
rt                197 sound/aoa/core/gpio-feature.c static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt)
rt                201 sound/aoa/core/gpio-feature.c 	if (unlikely(!rt)) return;
rt                202 sound/aoa/core/gpio-feature.c 	s = rt->implementation_private;
rt                203 sound/aoa/core/gpio-feature.c 	ftr_gpio_set_headphone(rt, (s>>0)&1);
rt                204 sound/aoa/core/gpio-feature.c 	ftr_gpio_set_amp(rt, (s>>1)&1);
rt                205 sound/aoa/core/gpio-feature.c 	ftr_gpio_set_lineout(rt, (s>>2)&1);
rt                207 sound/aoa/core/gpio-feature.c 		ftr_gpio_set_master(rt, (s>>3)&1);
rt                232 sound/aoa/core/gpio-feature.c static void ftr_gpio_init(struct gpio_runtime *rt)
rt                273 sound/aoa/core/gpio-feature.c 	ftr_gpio_all_amps_off(rt);
rt                274 sound/aoa/core/gpio-feature.c 	rt->implementation_private = 0;
rt                275 sound/aoa/core/gpio-feature.c 	INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
rt                276 sound/aoa/core/gpio-feature.c 	INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
rt                277 sound/aoa/core/gpio-feature.c 	INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
rt                278 sound/aoa/core/gpio-feature.c 	mutex_init(&rt->headphone_notify.mutex);
rt                279 sound/aoa/core/gpio-feature.c 	mutex_init(&rt->line_in_notify.mutex);
rt                280 sound/aoa/core/gpio-feature.c 	mutex_init(&rt->line_out_notify.mutex);
rt                283 sound/aoa/core/gpio-feature.c static void ftr_gpio_exit(struct gpio_runtime *rt)
rt                285 sound/aoa/core/gpio-feature.c 	ftr_gpio_all_amps_off(rt);
rt                286 sound/aoa/core/gpio-feature.c 	rt->implementation_private = 0;
rt                287 sound/aoa/core/gpio-feature.c 	if (rt->headphone_notify.notify)
rt                288 sound/aoa/core/gpio-feature.c 		free_irq(headphone_detect_irq, &rt->headphone_notify);
rt                289 sound/aoa/core/gpio-feature.c 	if (rt->line_in_notify.gpio_private)
rt                290 sound/aoa/core/gpio-feature.c 		free_irq(linein_detect_irq, &rt->line_in_notify);
rt                291 sound/aoa/core/gpio-feature.c 	if (rt->line_out_notify.gpio_private)
rt                292 sound/aoa/core/gpio-feature.c 		free_irq(lineout_detect_irq, &rt->line_out_notify);
rt                293 sound/aoa/core/gpio-feature.c 	cancel_delayed_work_sync(&rt->headphone_notify.work);
rt                294 sound/aoa/core/gpio-feature.c 	cancel_delayed_work_sync(&rt->line_in_notify.work);
rt                295 sound/aoa/core/gpio-feature.c 	cancel_delayed_work_sync(&rt->line_out_notify.work);
rt                296 sound/aoa/core/gpio-feature.c 	mutex_destroy(&rt->headphone_notify.mutex);
rt                297 sound/aoa/core/gpio-feature.c 	mutex_destroy(&rt->line_in_notify.mutex);
rt                298 sound/aoa/core/gpio-feature.c 	mutex_destroy(&rt->line_out_notify.mutex);
rt                310 sound/aoa/core/gpio-feature.c static int ftr_set_notify(struct gpio_runtime *rt,
rt                323 sound/aoa/core/gpio-feature.c 		notif = &rt->headphone_notify;
rt                328 sound/aoa/core/gpio-feature.c 		notif = &rt->line_in_notify;
rt                333 sound/aoa/core/gpio-feature.c 		notif = &rt->line_out_notify;
rt                377 sound/aoa/core/gpio-feature.c static int ftr_get_detect(struct gpio_runtime *rt,
rt                 14 sound/aoa/core/gpio-pmf.c static void pmf_gpio_set_##name(struct gpio_runtime *rt, int on)\
rt                 19 sound/aoa/core/gpio-pmf.c 	if (unlikely(!rt)) return;				\
rt                 20 sound/aoa/core/gpio-pmf.c 	rc = pmf_call_function(rt->node, #name "-mute", &args);	\
rt                 24 sound/aoa/core/gpio-pmf.c 	rt->implementation_private &= ~(1<<bit);		\
rt                 25 sound/aoa/core/gpio-pmf.c 	rt->implementation_private |= (!!on << bit);		\
rt                 27 sound/aoa/core/gpio-pmf.c static int pmf_gpio_get_##name(struct gpio_runtime *rt)		\
rt                 29 sound/aoa/core/gpio-pmf.c 	if (unlikely(!rt)) return 0;				\
rt                 30 sound/aoa/core/gpio-pmf.c 	return (rt->implementation_private>>bit)&1;		\
rt                 37 sound/aoa/core/gpio-pmf.c static void pmf_gpio_set_hw_reset(struct gpio_runtime *rt, int on)
rt                 42 sound/aoa/core/gpio-pmf.c 	if (unlikely(!rt)) return;
rt                 43 sound/aoa/core/gpio-pmf.c 	rc = pmf_call_function(rt->node, "hw-reset", &args);
rt                 49 sound/aoa/core/gpio-pmf.c static void pmf_gpio_all_amps_off(struct gpio_runtime *rt)
rt                 53 sound/aoa/core/gpio-pmf.c 	if (unlikely(!rt)) return;
rt                 54 sound/aoa/core/gpio-pmf.c 	saved = rt->implementation_private;
rt                 55 sound/aoa/core/gpio-pmf.c 	pmf_gpio_set_headphone(rt, 0);
rt                 56 sound/aoa/core/gpio-pmf.c 	pmf_gpio_set_amp(rt, 0);
rt                 57 sound/aoa/core/gpio-pmf.c 	pmf_gpio_set_lineout(rt, 0);
rt                 58 sound/aoa/core/gpio-pmf.c 	rt->implementation_private = saved;
rt                 61 sound/aoa/core/gpio-pmf.c static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt)
rt                 65 sound/aoa/core/gpio-pmf.c 	if (unlikely(!rt)) return;
rt                 66 sound/aoa/core/gpio-pmf.c 	s = rt->implementation_private;
rt                 67 sound/aoa/core/gpio-pmf.c 	pmf_gpio_set_headphone(rt, (s>>0)&1);
rt                 68 sound/aoa/core/gpio-pmf.c 	pmf_gpio_set_amp(rt, (s>>1)&1);
rt                 69 sound/aoa/core/gpio-pmf.c 	pmf_gpio_set_lineout(rt, (s>>2)&1);
rt                 83 sound/aoa/core/gpio-pmf.c static void pmf_gpio_init(struct gpio_runtime *rt)
rt                 85 sound/aoa/core/gpio-pmf.c 	pmf_gpio_all_amps_off(rt);
rt                 86 sound/aoa/core/gpio-pmf.c 	rt->implementation_private = 0;
rt                 87 sound/aoa/core/gpio-pmf.c 	INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
rt                 88 sound/aoa/core/gpio-pmf.c 	INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
rt                 89 sound/aoa/core/gpio-pmf.c 	INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
rt                 90 sound/aoa/core/gpio-pmf.c 	mutex_init(&rt->headphone_notify.mutex);
rt                 91 sound/aoa/core/gpio-pmf.c 	mutex_init(&rt->line_in_notify.mutex);
rt                 92 sound/aoa/core/gpio-pmf.c 	mutex_init(&rt->line_out_notify.mutex);
rt                 95 sound/aoa/core/gpio-pmf.c static void pmf_gpio_exit(struct gpio_runtime *rt)
rt                 97 sound/aoa/core/gpio-pmf.c 	pmf_gpio_all_amps_off(rt);
rt                 98 sound/aoa/core/gpio-pmf.c 	rt->implementation_private = 0;
rt                100 sound/aoa/core/gpio-pmf.c 	if (rt->headphone_notify.gpio_private)
rt                101 sound/aoa/core/gpio-pmf.c 		pmf_unregister_irq_client(rt->headphone_notify.gpio_private);
rt                102 sound/aoa/core/gpio-pmf.c 	if (rt->line_in_notify.gpio_private)
rt                103 sound/aoa/core/gpio-pmf.c 		pmf_unregister_irq_client(rt->line_in_notify.gpio_private);
rt                104 sound/aoa/core/gpio-pmf.c 	if (rt->line_out_notify.gpio_private)
rt                105 sound/aoa/core/gpio-pmf.c 		pmf_unregister_irq_client(rt->line_out_notify.gpio_private);
rt                109 sound/aoa/core/gpio-pmf.c 	cancel_delayed_work_sync(&rt->headphone_notify.work);
rt                110 sound/aoa/core/gpio-pmf.c 	cancel_delayed_work_sync(&rt->line_in_notify.work);
rt                111 sound/aoa/core/gpio-pmf.c 	cancel_delayed_work_sync(&rt->line_out_notify.work);
rt                113 sound/aoa/core/gpio-pmf.c 	mutex_destroy(&rt->headphone_notify.mutex);
rt                114 sound/aoa/core/gpio-pmf.c 	mutex_destroy(&rt->line_in_notify.mutex);
rt                115 sound/aoa/core/gpio-pmf.c 	mutex_destroy(&rt->line_out_notify.mutex);
rt                117 sound/aoa/core/gpio-pmf.c 	kfree(rt->headphone_notify.gpio_private);
rt                118 sound/aoa/core/gpio-pmf.c 	kfree(rt->line_in_notify.gpio_private);
rt                119 sound/aoa/core/gpio-pmf.c 	kfree(rt->line_out_notify.gpio_private);
rt                129 sound/aoa/core/gpio-pmf.c static int pmf_set_notify(struct gpio_runtime *rt,
rt                142 sound/aoa/core/gpio-pmf.c 		notif = &rt->headphone_notify;
rt                146 sound/aoa/core/gpio-pmf.c 		notif = &rt->line_in_notify;
rt                150 sound/aoa/core/gpio-pmf.c 		notif = &rt->line_out_notify;
rt                188 sound/aoa/core/gpio-pmf.c 		err = pmf_register_irq_client(rt->node,
rt                208 sound/aoa/core/gpio-pmf.c static int pmf_get_detect(struct gpio_runtime *rt,
rt                229 sound/aoa/core/gpio-pmf.c 	err = pmf_call_function(rt->node, name, &args);
rt                398 sound/parisc/harmony.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                409 sound/parisc/harmony.c 	h->st.rate = snd_harmony_rate_bits(rt->rate);
rt                410 sound/parisc/harmony.c 	h->st.format = snd_harmony_set_data_format(h, rt->format, 0);
rt                412 sound/parisc/harmony.c 	if (rt->channels == 2)
rt                419 sound/parisc/harmony.c 	h->pbuf.addr = rt->dma_addr;
rt                428 sound/parisc/harmony.c         struct snd_pcm_runtime *rt = ss->runtime;
rt                439 sound/parisc/harmony.c         h->st.rate = snd_harmony_rate_bits(rt->rate);
rt                440 sound/parisc/harmony.c         h->st.format = snd_harmony_set_data_format(h, rt->format, 0);
rt                442 sound/parisc/harmony.c         if (rt->channels == 2)
rt                449 sound/parisc/harmony.c         h->cbuf.addr = rt->dma_addr;
rt                457 sound/parisc/harmony.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                480 sound/parisc/harmony.c 	return bytes_to_frames(rt, played);
rt                486 sound/parisc/harmony.c         struct snd_pcm_runtime *rt = ss->runtime;
rt                509 sound/parisc/harmony.c         return bytes_to_frames(rt, caught);
rt                516 sound/parisc/harmony.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                520 sound/parisc/harmony.c 	rt->hw = snd_harmony_playback;
rt                521 sound/parisc/harmony.c 	snd_pcm_hw_constraint_list(rt, 0, SNDRV_PCM_HW_PARAM_RATE, 
rt                524 sound/parisc/harmony.c 	err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
rt                535 sound/parisc/harmony.c         struct snd_pcm_runtime *rt = ss->runtime;
rt                539 sound/parisc/harmony.c         rt->hw = snd_harmony_capture;
rt                540 sound/parisc/harmony.c         snd_pcm_hw_constraint_list(rt, 0, SNDRV_PCM_HW_PARAM_RATE,
rt                543 sound/parisc/harmony.c         err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
rt                312 sound/pci/ad1889.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                315 sound/pci/ad1889.c 	rt->hw = snd_ad1889_playback_hw;
rt                324 sound/pci/ad1889.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                327 sound/pci/ad1889.c 	rt->hw = snd_ad1889_capture_hw;
rt                352 sound/pci/ad1889.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                364 sound/pci/ad1889.c 	if (snd_pcm_format_width(rt->format) == 16)
rt                367 sound/pci/ad1889.c 	if (rt->channels > 1)
rt                375 sound/pci/ad1889.c 	chip->wave.addr = rt->dma_addr;
rt                380 sound/pci/ad1889.c 	ad1889_writew(chip, AD_DS_WAS, rt->rate);
rt                394 sound/pci/ad1889.c 		chip->wave.addr, count, size, reg, rt->rate);
rt                402 sound/pci/ad1889.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                414 sound/pci/ad1889.c 	if (snd_pcm_format_width(rt->format) == 16)
rt                417 sound/pci/ad1889.c 	if (rt->channels > 1)
rt                425 sound/pci/ad1889.c 	chip->ramc.addr = rt->dma_addr;
rt                441 sound/pci/ad1889.c 		chip->ramc.addr, count, size, reg, rt->rate);
rt                952 sound/soc/codecs/tscs42xx.c #define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb,		\
rt                957 sound/soc/codecs/tscs42xx.c 			{R_TIMEBASE,  rt,   0xFF},	\
rt                131 sound/soc/qcom/lpass-platform.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                132 sound/soc/qcom/lpass-platform.c 	struct lpass_pcm_data *pcm_data = rt->private_data;
rt                224 sound/soc/qcom/lpass-platform.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                225 sound/soc/qcom/lpass-platform.c 	struct lpass_pcm_data *pcm_data = rt->private_data;
rt                245 sound/soc/qcom/lpass-platform.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                246 sound/soc/qcom/lpass-platform.c 	struct lpass_pcm_data *pcm_data = rt->private_data;
rt                297 sound/soc/qcom/lpass-platform.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                298 sound/soc/qcom/lpass-platform.c 	struct lpass_pcm_data *pcm_data = rt->private_data;
rt                371 sound/soc/qcom/lpass-platform.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                372 sound/soc/qcom/lpass-platform.c 	struct lpass_pcm_data *pcm_data = rt->private_data;
rt                497 sound/soc/sh/siu_dai.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                506 sound/soc/sh/siu_dai.c 	ret = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
rt                544 sound/soc/sh/siu_dai.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                551 sound/soc/sh/siu_dai.c 		__func__, info->port_id, port_info->play_cap, rt->channels);
rt                206 sound/soc/sh/siu_pcm.c 	struct snd_pcm_runtime *rt = substream->runtime;
rt                221 sound/soc/sh/siu_pcm.c 		buff = (dma_addr_t)PERIOD_OFFSET(rt->dma_addr,
rt                224 sound/soc/sh/siu_pcm.c 		virt = PERIOD_OFFSET(rt->dma_area,
rt                233 sound/soc/sh/siu_pcm.c 			       (dma_addr_t)PERIOD_OFFSET(rt->dma_addr,
rt                397 sound/soc/sh/siu_pcm.c 	struct snd_pcm_runtime 	*rt = ss->runtime;
rt                406 sound/soc/sh/siu_pcm.c 	rt = siu_stream->substream->runtime;
rt                412 sound/soc/sh/siu_pcm.c 		info->port_id, rt->channels, siu_stream->period_bytes);
rt                422 sound/soc/sh/siu_pcm.c 	xfer_cnt = bytes_to_frames(rt, siu_stream->period_bytes);
rt                426 sound/soc/sh/siu_pcm.c 	siu_stream->format = rt->format;
rt                431 sound/soc/sh/siu_pcm.c 		(unsigned long)rt->dma_addr, siu_stream->buf_bytes,
rt                433 sound/soc/sh/siu_pcm.c 		siu_stream->format, rt->channels, (int)xfer_cnt);
rt                486 sound/soc/sh/siu_pcm.c 	struct snd_pcm_runtime *rt = ss->runtime;
rt                499 sound/soc/sh/siu_pcm.c 	ptr = PERIOD_OFFSET(rt->dma_addr,
rt                501 sound/soc/sh/siu_pcm.c 			    siu_stream->period_bytes) - rt->dma_addr;
rt                 21 sound/usb/6fire/comm.c static void usb6fire_comm_init_urb(struct comm_runtime *rt, struct urb *urb,
rt                 26 sound/usb/6fire/comm.c 	urb->pipe = usb_sndintpipe(rt->chip->dev, COMM_EP);
rt                 30 sound/usb/6fire/comm.c 	urb->dev = rt->chip->dev;
rt                 35 sound/usb/6fire/comm.c 	struct comm_runtime *rt = urb->context;
rt                 36 sound/usb/6fire/comm.c 	struct midi_runtime *midi_rt = rt->chip->midi;
rt                 39 sound/usb/6fire/comm.c 		if (rt->receiver_buffer[0] == 0x10) /* midi in event */
rt                 42 sound/usb/6fire/comm.c 						rt->receiver_buffer + 2,
rt                 43 sound/usb/6fire/comm.c 						rt->receiver_buffer[1]);
rt                 46 sound/usb/6fire/comm.c 	if (!rt->chip->shutdown) {
rt                106 sound/usb/6fire/comm.c static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
rt                118 sound/usb/6fire/comm.c 	ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
rt                124 sound/usb/6fire/comm.c static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
rt                136 sound/usb/6fire/comm.c 	ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
rt                144 sound/usb/6fire/comm.c 	struct comm_runtime *rt = kzalloc(sizeof(struct comm_runtime),
rt                149 sound/usb/6fire/comm.c 	if (!rt)
rt                152 sound/usb/6fire/comm.c 	rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
rt                153 sound/usb/6fire/comm.c 	if (!rt->receiver_buffer) {
rt                154 sound/usb/6fire/comm.c 		kfree(rt);
rt                158 sound/usb/6fire/comm.c 	urb = &rt->receiver;
rt                159 sound/usb/6fire/comm.c 	rt->serial = 1;
rt                160 sound/usb/6fire/comm.c 	rt->chip = chip;
rt                162 sound/usb/6fire/comm.c 	rt->init_urb = usb6fire_comm_init_urb;
rt                163 sound/usb/6fire/comm.c 	rt->write8 = usb6fire_comm_write8;
rt                164 sound/usb/6fire/comm.c 	rt->write16 = usb6fire_comm_write16;
rt                167 sound/usb/6fire/comm.c 	urb->transfer_buffer = rt->receiver_buffer;
rt                172 sound/usb/6fire/comm.c 	urb->context = rt;
rt                176 sound/usb/6fire/comm.c 		kfree(rt->receiver_buffer);
rt                177 sound/usb/6fire/comm.c 		kfree(rt);
rt                181 sound/usb/6fire/comm.c 	chip->comm = rt;
rt                187 sound/usb/6fire/comm.c 	struct comm_runtime *rt = chip->comm;
rt                189 sound/usb/6fire/comm.c 	if (rt)
rt                190 sound/usb/6fire/comm.c 		usb_poison_urb(&rt->receiver);
rt                195 sound/usb/6fire/comm.c 	struct comm_runtime *rt = chip->comm;
rt                197 sound/usb/6fire/comm.c 	kfree(rt->receiver_buffer);
rt                198 sound/usb/6fire/comm.c 	kfree(rt);
rt                 27 sound/usb/6fire/comm.h 	void (*init_urb)(struct comm_runtime *rt, struct urb *urb, u8 *buffer,
rt                 30 sound/usb/6fire/comm.h 	int (*write8)(struct comm_runtime *rt, u8 request, u8 reg, u8 value);
rt                 31 sound/usb/6fire/comm.h 	int (*write16)(struct comm_runtime *rt, u8 request, u8 reg,
rt                 60 sound/usb/6fire/control.c static void usb6fire_control_output_vol_update(struct control_runtime *rt)
rt                 62 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                 67 sound/usb/6fire/control.c 			if (!(rt->ovol_updated & (1 << i))) {
rt                 69 sound/usb/6fire/control.c 					180 - rt->output_vol[i]);
rt                 70 sound/usb/6fire/control.c 				rt->ovol_updated |= 1 << i;
rt                 74 sound/usb/6fire/control.c static void usb6fire_control_output_mute_update(struct control_runtime *rt)
rt                 76 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                 79 sound/usb/6fire/control.c 		comm_rt->write8(comm_rt, 0x12, 0x0e, ~rt->output_mute);
rt                 82 sound/usb/6fire/control.c static void usb6fire_control_input_vol_update(struct control_runtime *rt)
rt                 84 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                 89 sound/usb/6fire/control.c 			if (!(rt->ivol_updated & (1 << i))) {
rt                 91 sound/usb/6fire/control.c 					rt->input_vol[i] & 0x3f);
rt                 92 sound/usb/6fire/control.c 				rt->ivol_updated |= 1 << i;
rt                 96 sound/usb/6fire/control.c static void usb6fire_control_line_phono_update(struct control_runtime *rt)
rt                 98 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                100 sound/usb/6fire/control.c 		comm_rt->write8(comm_rt, 0x22, 0x02, rt->line_phono_switch);
rt                101 sound/usb/6fire/control.c 		comm_rt->write8(comm_rt, 0x21, 0x02, rt->line_phono_switch);
rt                105 sound/usb/6fire/control.c static void usb6fire_control_opt_coax_update(struct control_runtime *rt)
rt                107 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                109 sound/usb/6fire/control.c 		comm_rt->write8(comm_rt, 0x22, 0x00, rt->opt_coax_switch);
rt                110 sound/usb/6fire/control.c 		comm_rt->write8(comm_rt, 0x21, 0x00, rt->opt_coax_switch);
rt                114 sound/usb/6fire/control.c static int usb6fire_control_set_rate(struct control_runtime *rt, int rate)
rt                117 sound/usb/6fire/control.c 	struct usb_device *device = rt->chip->dev;
rt                118 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                137 sound/usb/6fire/control.c 	struct control_runtime *rt, int n_analog_out,
rt                141 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                160 sound/usb/6fire/control.c static int usb6fire_control_streaming_update(struct control_runtime *rt)
rt                162 sound/usb/6fire/control.c 	struct comm_runtime *comm_rt = rt->chip->comm;
rt                165 sound/usb/6fire/control.c 		if (!rt->usb_streaming && rt->digital_thru_switch)
rt                166 sound/usb/6fire/control.c 			usb6fire_control_set_rate(rt,
rt                169 sound/usb/6fire/control.c 			(rt->usb_streaming ? 0x01 : 0x00) |
rt                170 sound/usb/6fire/control.c 			(rt->digital_thru_switch ? 0x08 : 0x00));
rt                188 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                193 sound/usb/6fire/control.c 		dev_err(&rt->chip->dev->dev,
rt                198 sound/usb/6fire/control.c 	if (rt->output_vol[ch] != ucontrol->value.integer.value[0]) {
rt                199 sound/usb/6fire/control.c 		rt->output_vol[ch] = ucontrol->value.integer.value[0];
rt                200 sound/usb/6fire/control.c 		rt->ovol_updated &= ~(1 << ch);
rt                203 sound/usb/6fire/control.c 	if (rt->output_vol[ch + 1] != ucontrol->value.integer.value[1]) {
rt                204 sound/usb/6fire/control.c 		rt->output_vol[ch + 1] = ucontrol->value.integer.value[1];
rt                205 sound/usb/6fire/control.c 		rt->ovol_updated &= ~(2 << ch);
rt                210 sound/usb/6fire/control.c 		usb6fire_control_output_vol_update(rt);
rt                218 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                222 sound/usb/6fire/control.c 		dev_err(&rt->chip->dev->dev,
rt                227 sound/usb/6fire/control.c 	ucontrol->value.integer.value[0] = rt->output_vol[ch];
rt                228 sound/usb/6fire/control.c 	ucontrol->value.integer.value[1] = rt->output_vol[ch + 1];
rt                235 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                237 sound/usb/6fire/control.c 	u8 old = rt->output_mute;
rt                241 sound/usb/6fire/control.c 		dev_err(&rt->chip->dev->dev,
rt                246 sound/usb/6fire/control.c 	rt->output_mute &= ~(3 << ch);
rt                251 sound/usb/6fire/control.c 	rt->output_mute |= value << ch;
rt                253 sound/usb/6fire/control.c 	if (rt->output_mute != old)
rt                254 sound/usb/6fire/control.c 		usb6fire_control_output_mute_update(rt);
rt                256 sound/usb/6fire/control.c 	return rt->output_mute != old;
rt                262 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                264 sound/usb/6fire/control.c 	u8 value = rt->output_mute >> ch;
rt                267 sound/usb/6fire/control.c 		dev_err(&rt->chip->dev->dev,
rt                292 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                295 sound/usb/6fire/control.c 	if (rt->input_vol[0] != ucontrol->value.integer.value[0]) {
rt                296 sound/usb/6fire/control.c 		rt->input_vol[0] = ucontrol->value.integer.value[0] - 15;
rt                297 sound/usb/6fire/control.c 		rt->ivol_updated &= ~(1 << 0);
rt                300 sound/usb/6fire/control.c 	if (rt->input_vol[1] != ucontrol->value.integer.value[1]) {
rt                301 sound/usb/6fire/control.c 		rt->input_vol[1] = ucontrol->value.integer.value[1] - 15;
rt                302 sound/usb/6fire/control.c 		rt->ivol_updated &= ~(1 << 1);
rt                307 sound/usb/6fire/control.c 		usb6fire_control_input_vol_update(rt);
rt                315 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                317 sound/usb/6fire/control.c 	ucontrol->value.integer.value[0] = rt->input_vol[0] + 15;
rt                318 sound/usb/6fire/control.c 	ucontrol->value.integer.value[1] = rt->input_vol[1] + 15;
rt                332 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                334 sound/usb/6fire/control.c 	if (rt->line_phono_switch != ucontrol->value.integer.value[0]) {
rt                335 sound/usb/6fire/control.c 		rt->line_phono_switch = ucontrol->value.integer.value[0];
rt                336 sound/usb/6fire/control.c 		usb6fire_control_line_phono_update(rt);
rt                345 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                346 sound/usb/6fire/control.c 	ucontrol->value.integer.value[0] = rt->line_phono_switch;
rt                359 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                362 sound/usb/6fire/control.c 	if (rt->opt_coax_switch != ucontrol->value.enumerated.item[0]) {
rt                363 sound/usb/6fire/control.c 		rt->opt_coax_switch = ucontrol->value.enumerated.item[0];
rt                364 sound/usb/6fire/control.c 		usb6fire_control_opt_coax_update(rt);
rt                373 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                374 sound/usb/6fire/control.c 	ucontrol->value.enumerated.item[0] = rt->opt_coax_switch;
rt                381 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                384 sound/usb/6fire/control.c 	if (rt->digital_thru_switch != ucontrol->value.integer.value[0]) {
rt                385 sound/usb/6fire/control.c 		rt->digital_thru_switch = ucontrol->value.integer.value[0];
rt                386 sound/usb/6fire/control.c 		usb6fire_control_streaming_update(rt);
rt                395 sound/usb/6fire/control.c 	struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
rt                396 sound/usb/6fire/control.c 	ucontrol->value.integer.value[0] = rt->digital_thru_switch;
rt                517 sound/usb/6fire/control.c 	struct control_runtime *rt,
rt                536 sound/usb/6fire/control.c 		control = snd_ctl_new1(&elems[i], rt);
rt                554 sound/usb/6fire/control.c 	struct control_runtime *rt = kzalloc(sizeof(struct control_runtime),
rt                558 sound/usb/6fire/control.c 	if (!rt)
rt                561 sound/usb/6fire/control.c 	rt->chip = chip;
rt                562 sound/usb/6fire/control.c 	rt->update_streaming = usb6fire_control_streaming_update;
rt                563 sound/usb/6fire/control.c 	rt->set_rate = usb6fire_control_set_rate;
rt                564 sound/usb/6fire/control.c 	rt->set_channels = usb6fire_control_set_channels;
rt                573 sound/usb/6fire/control.c 	usb6fire_control_opt_coax_update(rt);
rt                574 sound/usb/6fire/control.c 	usb6fire_control_line_phono_update(rt);
rt                575 sound/usb/6fire/control.c 	usb6fire_control_output_vol_update(rt);
rt                576 sound/usb/6fire/control.c 	usb6fire_control_output_mute_update(rt);
rt                577 sound/usb/6fire/control.c 	usb6fire_control_input_vol_update(rt);
rt                578 sound/usb/6fire/control.c 	usb6fire_control_streaming_update(rt);
rt                580 sound/usb/6fire/control.c 	ret = usb6fire_control_add_virtual(rt, chip->card,
rt                584 sound/usb/6fire/control.c 		kfree(rt);
rt                587 sound/usb/6fire/control.c 	ret = usb6fire_control_add_virtual(rt, chip->card,
rt                591 sound/usb/6fire/control.c 		kfree(rt);
rt                597 sound/usb/6fire/control.c 		ret = snd_ctl_add(chip->card, snd_ctl_new1(&elements[i], rt));
rt                599 sound/usb/6fire/control.c 			kfree(rt);
rt                606 sound/usb/6fire/control.c 	chip->control = rt;
rt                 30 sound/usb/6fire/control.h 	int (*update_streaming)(struct control_runtime *rt);
rt                 31 sound/usb/6fire/control.h 	int (*set_rate)(struct control_runtime *rt, int rate);
rt                 32 sound/usb/6fire/control.h 	int (*set_channels)(struct control_runtime *rt, int n_analog_out,
rt                 24 sound/usb/6fire/midi.c 	struct midi_runtime *rt = urb->context;
rt                 28 sound/usb/6fire/midi.c 	spin_lock_irqsave(&rt->out_lock, flags);
rt                 30 sound/usb/6fire/midi.c 	if (rt->out) {
rt                 31 sound/usb/6fire/midi.c 		ret = snd_rawmidi_transmit(rt->out, rt->out_buffer + 4,
rt                 34 sound/usb/6fire/midi.c 			rt->out_buffer[1] = ret + 2;
rt                 35 sound/usb/6fire/midi.c 			rt->out_buffer[3] = rt->out_serial++;
rt                 44 sound/usb/6fire/midi.c 			rt->out = NULL;
rt                 46 sound/usb/6fire/midi.c 	spin_unlock_irqrestore(&rt->out_lock, flags);
rt                 50 sound/usb/6fire/midi.c 		struct midi_runtime *rt, u8 *data, int length)
rt                 54 sound/usb/6fire/midi.c 	spin_lock_irqsave(&rt->in_lock, flags);
rt                 55 sound/usb/6fire/midi.c 	if (rt->in)
rt                 56 sound/usb/6fire/midi.c 		snd_rawmidi_receive(rt->in, data, length);
rt                 57 sound/usb/6fire/midi.c 	spin_unlock_irqrestore(&rt->in_lock, flags);
rt                 73 sound/usb/6fire/midi.c 	struct midi_runtime *rt = alsa_sub->rmidi->private_data;
rt                 74 sound/usb/6fire/midi.c 	struct urb *urb = &rt->out_urb;
rt                 78 sound/usb/6fire/midi.c 	spin_lock_irqsave(&rt->out_lock, flags);
rt                 80 sound/usb/6fire/midi.c 		if (rt->out) { /* we are already transmitting so just return */
rt                 81 sound/usb/6fire/midi.c 			spin_unlock_irqrestore(&rt->out_lock, flags);
rt                 85 sound/usb/6fire/midi.c 		ret = snd_rawmidi_transmit(alsa_sub, rt->out_buffer + 4,
rt                 88 sound/usb/6fire/midi.c 			rt->out_buffer[1] = ret + 2;
rt                 89 sound/usb/6fire/midi.c 			rt->out_buffer[3] = rt->out_serial++;
rt                 98 sound/usb/6fire/midi.c 				rt->out = alsa_sub;
rt                100 sound/usb/6fire/midi.c 	} else if (rt->out == alsa_sub)
rt                101 sound/usb/6fire/midi.c 		rt->out = NULL;
rt                102 sound/usb/6fire/midi.c 	spin_unlock_irqrestore(&rt->out_lock, flags);
rt                107 sound/usb/6fire/midi.c 	struct midi_runtime *rt = alsa_sub->rmidi->private_data;
rt                110 sound/usb/6fire/midi.c 	while (rt->out && retry++ < 100)
rt                127 sound/usb/6fire/midi.c 	struct midi_runtime *rt = alsa_sub->rmidi->private_data;
rt                130 sound/usb/6fire/midi.c 	spin_lock_irqsave(&rt->in_lock, flags);
rt                132 sound/usb/6fire/midi.c 		rt->in = alsa_sub;
rt                134 sound/usb/6fire/midi.c 		rt->in = NULL;
rt                135 sound/usb/6fire/midi.c 	spin_unlock_irqrestore(&rt->in_lock, flags);
rt                154 sound/usb/6fire/midi.c 	struct midi_runtime *rt = kzalloc(sizeof(struct midi_runtime),
rt                158 sound/usb/6fire/midi.c 	if (!rt)
rt                161 sound/usb/6fire/midi.c 	rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
rt                162 sound/usb/6fire/midi.c 	if (!rt->out_buffer) {
rt                163 sound/usb/6fire/midi.c 		kfree(rt);
rt                167 sound/usb/6fire/midi.c 	rt->chip = chip;
rt                168 sound/usb/6fire/midi.c 	rt->in_received = usb6fire_midi_in_received;
rt                169 sound/usb/6fire/midi.c 	rt->out_buffer[0] = 0x80; /* 'send midi' command */
rt                170 sound/usb/6fire/midi.c 	rt->out_buffer[1] = 0x00; /* size of data */
rt                171 sound/usb/6fire/midi.c 	rt->out_buffer[2] = 0x00; /* always 0 */
rt                172 sound/usb/6fire/midi.c 	spin_lock_init(&rt->in_lock);
rt                173 sound/usb/6fire/midi.c 	spin_lock_init(&rt->out_lock);
rt                175 sound/usb/6fire/midi.c 	comm_rt->init_urb(comm_rt, &rt->out_urb, rt->out_buffer, rt,
rt                178 sound/usb/6fire/midi.c 	ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
rt                180 sound/usb/6fire/midi.c 		kfree(rt->out_buffer);
rt                181 sound/usb/6fire/midi.c 		kfree(rt);
rt                185 sound/usb/6fire/midi.c 	rt->instance->private_data = rt;
rt                186 sound/usb/6fire/midi.c 	strcpy(rt->instance->name, "DMX6FireUSB MIDI");
rt                187 sound/usb/6fire/midi.c 	rt->instance->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
rt                190 sound/usb/6fire/midi.c 	snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_OUTPUT,
rt                192 sound/usb/6fire/midi.c 	snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_INPUT,
rt                195 sound/usb/6fire/midi.c 	chip->midi = rt;
rt                201 sound/usb/6fire/midi.c 	struct midi_runtime *rt = chip->midi;
rt                203 sound/usb/6fire/midi.c 	if (rt)
rt                204 sound/usb/6fire/midi.c 		usb_poison_urb(&rt->out_urb);
rt                209 sound/usb/6fire/midi.c 	struct midi_runtime *rt = chip->midi;
rt                211 sound/usb/6fire/midi.c 	kfree(rt->out_buffer);
rt                212 sound/usb/6fire/midi.c 	kfree(rt);
rt                 30 sound/usb/6fire/midi.h 	void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
rt                 70 sound/usb/6fire/pcm.c static int usb6fire_pcm_set_rate(struct pcm_runtime *rt)
rt                 73 sound/usb/6fire/pcm.c 	struct control_runtime *ctrl_rt = rt->chip->control;
rt                 78 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev,
rt                 80 sound/usb/6fire/pcm.c 			rates[rt->rate]);
rt                 84 sound/usb/6fire/pcm.c 	ret = ctrl_rt->set_rate(ctrl_rt, rt->rate);
rt                 86 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev,
rt                 88 sound/usb/6fire/pcm.c 			rates[rt->rate]);
rt                 95 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev,
rt                 97 sound/usb/6fire/pcm.c 			rates[rt->rate]);
rt                104 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev,
rt                106 sound/usb/6fire/pcm.c 			rates[rt->rate]);
rt                110 sound/usb/6fire/pcm.c 	rt->in_n_analog = IN_N_CHANNELS;
rt                111 sound/usb/6fire/pcm.c 	rt->out_n_analog = OUT_N_CHANNELS;
rt                112 sound/usb/6fire/pcm.c 	rt->in_packet_size = rates_in_packet_size[rt->rate];
rt                113 sound/usb/6fire/pcm.c 	rt->out_packet_size = rates_out_packet_size[rt->rate];
rt                120 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                123 sound/usb/6fire/pcm.c 		return &rt->playback;
rt                125 sound/usb/6fire/pcm.c 		return &rt->capture;
rt                126 sound/usb/6fire/pcm.c 	dev_err(&rt->chip->dev->dev, "error getting pcm substream slot.\n");
rt                131 sound/usb/6fire/pcm.c static void usb6fire_pcm_stream_stop(struct pcm_runtime *rt)
rt                134 sound/usb/6fire/pcm.c 	struct control_runtime *ctrl_rt = rt->chip->control;
rt                136 sound/usb/6fire/pcm.c 	if (rt->stream_state != STREAM_DISABLED) {
rt                138 sound/usb/6fire/pcm.c 		rt->stream_state = STREAM_STOPPING;
rt                141 sound/usb/6fire/pcm.c 			usb_kill_urb(&rt->in_urbs[i].instance);
rt                142 sound/usb/6fire/pcm.c 			usb_kill_urb(&rt->out_urbs[i].instance);
rt                146 sound/usb/6fire/pcm.c 		rt->stream_state = STREAM_DISABLED;
rt                151 sound/usb/6fire/pcm.c static int usb6fire_pcm_stream_start(struct pcm_runtime *rt)
rt                158 sound/usb/6fire/pcm.c 	if (rt->stream_state == STREAM_DISABLED) {
rt                160 sound/usb/6fire/pcm.c 		rt->stream_wait_cond = false;
rt                161 sound/usb/6fire/pcm.c 		rt->stream_state = STREAM_STARTING;
rt                164 sound/usb/6fire/pcm.c 				packet = &rt->in_urbs[i].packets[k];
rt                165 sound/usb/6fire/pcm.c 				packet->offset = k * rt->in_packet_size;
rt                166 sound/usb/6fire/pcm.c 				packet->length = rt->in_packet_size;
rt                170 sound/usb/6fire/pcm.c 			ret = usb_submit_urb(&rt->in_urbs[i].instance,
rt                173 sound/usb/6fire/pcm.c 				usb6fire_pcm_stream_stop(rt);
rt                179 sound/usb/6fire/pcm.c 		wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond,
rt                181 sound/usb/6fire/pcm.c 		if (rt->stream_wait_cond)
rt                182 sound/usb/6fire/pcm.c 			rt->stream_state = STREAM_RUNNING;
rt                184 sound/usb/6fire/pcm.c 			usb6fire_pcm_stream_stop(rt);
rt                198 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(sub->instance);
rt                212 sound/usb/6fire/pcm.c 					/ (rt->in_n_analog << 2);
rt                227 sound/usb/6fire/pcm.c 			src += rt->in_n_analog;
rt                245 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(sub->instance);
rt                259 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev, "Unknown sample format.");
rt                268 sound/usb/6fire/pcm.c 					/ (rt->out_n_analog << 2);
rt                275 sound/usb/6fire/pcm.c 			dest += rt->out_n_analog;
rt                290 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = in_urb->chip->pcm;
rt                300 sound/usb/6fire/pcm.c 	if (usb_urb->status || rt->panic || rt->stream_state == STREAM_STOPPING)
rt                304 sound/usb/6fire/pcm.c 			rt->panic = true;
rt                308 sound/usb/6fire/pcm.c 	if (rt->stream_state == STREAM_DISABLED) {
rt                309 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev,
rt                315 sound/usb/6fire/pcm.c 	sub = &rt->capture;
rt                332 sound/usb/6fire/pcm.c 				- 4) / (rt->in_n_analog << 2)
rt                333 sound/usb/6fire/pcm.c 				* (rt->out_n_analog << 2) + 4;
rt                340 sound/usb/6fire/pcm.c 	sub = &rt->playback;
rt                358 sound/usb/6fire/pcm.c 					/ (rt->out_n_analog << 2);
rt                365 sound/usb/6fire/pcm.c 						channel < rt->out_n_analog;
rt                378 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = urb->chip->pcm;
rt                380 sound/usb/6fire/pcm.c 	if (rt->stream_state == STREAM_STARTING) {
rt                381 sound/usb/6fire/pcm.c 		rt->stream_wait_cond = true;
rt                382 sound/usb/6fire/pcm.c 		wake_up(&rt->stream_wait_queue);
rt                388 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                392 sound/usb/6fire/pcm.c 	if (rt->panic)
rt                395 sound/usb/6fire/pcm.c 	mutex_lock(&rt->stream_mutex);
rt                399 sound/usb/6fire/pcm.c 		if (rt->rate < ARRAY_SIZE(rates))
rt                400 sound/usb/6fire/pcm.c 			alsa_rt->hw.rates = rates_alsaid[rt->rate];
rt                402 sound/usb/6fire/pcm.c 		sub = &rt->playback;
rt                404 sound/usb/6fire/pcm.c 		if (rt->rate < ARRAY_SIZE(rates))
rt                405 sound/usb/6fire/pcm.c 			alsa_rt->hw.rates = rates_alsaid[rt->rate];
rt                407 sound/usb/6fire/pcm.c 		sub = &rt->capture;
rt                411 sound/usb/6fire/pcm.c 		mutex_unlock(&rt->stream_mutex);
rt                412 sound/usb/6fire/pcm.c 		dev_err(&rt->chip->dev->dev, "invalid stream type.\n");
rt                418 sound/usb/6fire/pcm.c 	mutex_unlock(&rt->stream_mutex);
rt                424 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                428 sound/usb/6fire/pcm.c 	if (rt->panic)
rt                431 sound/usb/6fire/pcm.c 	mutex_lock(&rt->stream_mutex);
rt                440 sound/usb/6fire/pcm.c 		if (!rt->playback.instance && !rt->capture.instance) {
rt                441 sound/usb/6fire/pcm.c 			usb6fire_pcm_stream_stop(rt);
rt                442 sound/usb/6fire/pcm.c 			rt->rate = ARRAY_SIZE(rates);
rt                445 sound/usb/6fire/pcm.c 	mutex_unlock(&rt->stream_mutex);
rt                463 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                468 sound/usb/6fire/pcm.c 	if (rt->panic)
rt                473 sound/usb/6fire/pcm.c 	mutex_lock(&rt->stream_mutex);
rt                477 sound/usb/6fire/pcm.c 	if (rt->stream_state == STREAM_DISABLED) {
rt                478 sound/usb/6fire/pcm.c 		for (rt->rate = 0; rt->rate < ARRAY_SIZE(rates); rt->rate++)
rt                479 sound/usb/6fire/pcm.c 			if (alsa_rt->rate == rates[rt->rate])
rt                481 sound/usb/6fire/pcm.c 		if (rt->rate == ARRAY_SIZE(rates)) {
rt                482 sound/usb/6fire/pcm.c 			mutex_unlock(&rt->stream_mutex);
rt                483 sound/usb/6fire/pcm.c 			dev_err(&rt->chip->dev->dev,
rt                489 sound/usb/6fire/pcm.c 		ret = usb6fire_pcm_set_rate(rt);
rt                491 sound/usb/6fire/pcm.c 			mutex_unlock(&rt->stream_mutex);
rt                494 sound/usb/6fire/pcm.c 		ret = usb6fire_pcm_stream_start(rt);
rt                496 sound/usb/6fire/pcm.c 			mutex_unlock(&rt->stream_mutex);
rt                497 sound/usb/6fire/pcm.c 			dev_err(&rt->chip->dev->dev,
rt                502 sound/usb/6fire/pcm.c 	mutex_unlock(&rt->stream_mutex);
rt                509 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                512 sound/usb/6fire/pcm.c 	if (rt->panic)
rt                541 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                545 sound/usb/6fire/pcm.c 	if (rt->panic || !sub)
rt                584 sound/usb/6fire/pcm.c static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
rt                589 sound/usb/6fire/pcm.c 		rt->out_urbs[i].buffer = kcalloc(PCM_MAX_PACKET_SIZE,
rt                592 sound/usb/6fire/pcm.c 		if (!rt->out_urbs[i].buffer)
rt                594 sound/usb/6fire/pcm.c 		rt->in_urbs[i].buffer = kcalloc(PCM_MAX_PACKET_SIZE,
rt                597 sound/usb/6fire/pcm.c 		if (!rt->in_urbs[i].buffer)
rt                603 sound/usb/6fire/pcm.c static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
rt                608 sound/usb/6fire/pcm.c 		kfree(rt->out_urbs[i].buffer);
rt                609 sound/usb/6fire/pcm.c 		kfree(rt->in_urbs[i].buffer);
rt                618 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt =
rt                621 sound/usb/6fire/pcm.c 	if (!rt)
rt                624 sound/usb/6fire/pcm.c 	ret = usb6fire_pcm_buffers_init(rt);
rt                626 sound/usb/6fire/pcm.c 		usb6fire_pcm_buffers_destroy(rt);
rt                627 sound/usb/6fire/pcm.c 		kfree(rt);
rt                631 sound/usb/6fire/pcm.c 	rt->chip = chip;
rt                632 sound/usb/6fire/pcm.c 	rt->stream_state = STREAM_DISABLED;
rt                633 sound/usb/6fire/pcm.c 	rt->rate = ARRAY_SIZE(rates);
rt                634 sound/usb/6fire/pcm.c 	init_waitqueue_head(&rt->stream_wait_queue);
rt                635 sound/usb/6fire/pcm.c 	mutex_init(&rt->stream_mutex);
rt                637 sound/usb/6fire/pcm.c 	spin_lock_init(&rt->playback.lock);
rt                638 sound/usb/6fire/pcm.c 	spin_lock_init(&rt->capture.lock);
rt                641 sound/usb/6fire/pcm.c 		usb6fire_pcm_init_urb(&rt->in_urbs[i], chip, true, IN_EP,
rt                643 sound/usb/6fire/pcm.c 		usb6fire_pcm_init_urb(&rt->out_urbs[i], chip, false, OUT_EP,
rt                646 sound/usb/6fire/pcm.c 		rt->in_urbs[i].peer = &rt->out_urbs[i];
rt                647 sound/usb/6fire/pcm.c 		rt->out_urbs[i].peer = &rt->in_urbs[i];
rt                652 sound/usb/6fire/pcm.c 		usb6fire_pcm_buffers_destroy(rt);
rt                653 sound/usb/6fire/pcm.c 		kfree(rt);
rt                658 sound/usb/6fire/pcm.c 	pcm->private_data = rt;
rt                664 sound/usb/6fire/pcm.c 		usb6fire_pcm_buffers_destroy(rt);
rt                665 sound/usb/6fire/pcm.c 		kfree(rt);
rt                670 sound/usb/6fire/pcm.c 	rt->instance = pcm;
rt                672 sound/usb/6fire/pcm.c 	chip->pcm = rt;
rt                678 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = chip->pcm;
rt                681 sound/usb/6fire/pcm.c 	if (rt) {
rt                682 sound/usb/6fire/pcm.c 		rt->panic = true;
rt                684 sound/usb/6fire/pcm.c 		if (rt->playback.instance)
rt                685 sound/usb/6fire/pcm.c 			snd_pcm_stop_xrun(rt->playback.instance);
rt                687 sound/usb/6fire/pcm.c 		if (rt->capture.instance)
rt                688 sound/usb/6fire/pcm.c 			snd_pcm_stop_xrun(rt->capture.instance);
rt                691 sound/usb/6fire/pcm.c 			usb_poison_urb(&rt->in_urbs[i].instance);
rt                692 sound/usb/6fire/pcm.c 			usb_poison_urb(&rt->out_urbs[i].instance);
rt                700 sound/usb/6fire/pcm.c 	struct pcm_runtime *rt = chip->pcm;
rt                702 sound/usb/6fire/pcm.c 	usb6fire_pcm_buffers_destroy(rt);
rt                703 sound/usb/6fire/pcm.c 	kfree(rt);
rt                378 sound/usb/caiaq/audio.c 				struct snd_pcm_runtime *rt = sub->runtime;
rt                379 sound/usb/caiaq/audio.c 				char *audio_buf = rt->dma_area;
rt                380 sound/usb/caiaq/audio.c 				int sz = frames_to_bytes(rt, rt->buffer_size);
rt                425 sound/usb/caiaq/audio.c 				struct snd_pcm_runtime *rt = sub->runtime;
rt                426 sound/usb/caiaq/audio.c 				char *audio_buf = rt->dma_area;
rt                427 sound/usb/caiaq/audio.c 				int sz = frames_to_bytes(rt, rt->buffer_size);
rt                457 sound/usb/caiaq/audio.c 				struct snd_pcm_runtime *rt = sub->runtime;
rt                458 sound/usb/caiaq/audio.c 				audio_buf = rt->dma_area;
rt                459 sound/usb/caiaq/audio.c 				sz = frames_to_bytes(rt, rt->buffer_size);
rt                538 sound/usb/caiaq/audio.c 				struct snd_pcm_runtime *rt = sub->runtime;
rt                539 sound/usb/caiaq/audio.c 				char *audio_buf = rt->dma_area;
rt                540 sound/usb/caiaq/audio.c 				int sz = frames_to_bytes(rt, rt->buffer_size);
rt                574 sound/usb/caiaq/audio.c 				struct snd_pcm_runtime *rt = sub->runtime;
rt                575 sound/usb/caiaq/audio.c 				audio_buf = rt->dma_area;
rt                576 sound/usb/caiaq/audio.c 				sz = frames_to_bytes(rt, rt->buffer_size);
rt                112 sound/usb/hiface/pcm.c static int hiface_pcm_set_rate(struct pcm_runtime *rt, unsigned int rate)
rt                114 sound/usb/hiface/pcm.c 	struct usb_device *device = rt->chip->dev;
rt                174 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                175 sound/usb/hiface/pcm.c 	struct device *device = &rt->chip->dev->dev;
rt                178 sound/usb/hiface/pcm.c 		return &rt->playback;
rt                185 sound/usb/hiface/pcm.c static void hiface_pcm_stream_stop(struct pcm_runtime *rt)
rt                189 sound/usb/hiface/pcm.c 	if (rt->stream_state != STREAM_DISABLED) {
rt                190 sound/usb/hiface/pcm.c 		rt->stream_state = STREAM_STOPPING;
rt                194 sound/usb/hiface/pcm.c 					&rt->out_urbs[i].submitted, 100);
rt                197 sound/usb/hiface/pcm.c 					&rt->out_urbs[i].submitted);
rt                198 sound/usb/hiface/pcm.c 			usb_kill_urb(&rt->out_urbs[i].instance);
rt                201 sound/usb/hiface/pcm.c 		rt->stream_state = STREAM_DISABLED;
rt                206 sound/usb/hiface/pcm.c static int hiface_pcm_stream_start(struct pcm_runtime *rt)
rt                211 sound/usb/hiface/pcm.c 	if (rt->stream_state == STREAM_DISABLED) {
rt                214 sound/usb/hiface/pcm.c 		rt->panic = false;
rt                217 sound/usb/hiface/pcm.c 		rt->stream_state = STREAM_STARTING;
rt                219 sound/usb/hiface/pcm.c 			memset(rt->out_urbs[i].buffer, 0, PCM_PACKET_SIZE);
rt                220 sound/usb/hiface/pcm.c 			usb_anchor_urb(&rt->out_urbs[i].instance,
rt                221 sound/usb/hiface/pcm.c 				       &rt->out_urbs[i].submitted);
rt                222 sound/usb/hiface/pcm.c 			ret = usb_submit_urb(&rt->out_urbs[i].instance,
rt                225 sound/usb/hiface/pcm.c 				hiface_pcm_stream_stop(rt);
rt                231 sound/usb/hiface/pcm.c 		wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond,
rt                233 sound/usb/hiface/pcm.c 		if (rt->stream_wait_cond) {
rt                234 sound/usb/hiface/pcm.c 			struct device *device = &rt->chip->dev->dev;
rt                237 sound/usb/hiface/pcm.c 			rt->stream_state = STREAM_RUNNING;
rt                239 sound/usb/hiface/pcm.c 			hiface_pcm_stream_stop(rt);
rt                307 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = out_urb->chip->pcm;
rt                313 sound/usb/hiface/pcm.c 	if (rt->panic || rt->stream_state == STREAM_STOPPING)
rt                323 sound/usb/hiface/pcm.c 	if (rt->stream_state == STREAM_STARTING) {
rt                324 sound/usb/hiface/pcm.c 		rt->stream_wait_cond = true;
rt                325 sound/usb/hiface/pcm.c 		wake_up(&rt->stream_wait_queue);
rt                329 sound/usb/hiface/pcm.c 	sub = &rt->playback;
rt                348 sound/usb/hiface/pcm.c 	rt->panic = true;
rt                353 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                358 sound/usb/hiface/pcm.c 	if (rt->panic)
rt                361 sound/usb/hiface/pcm.c 	mutex_lock(&rt->stream_mutex);
rt                365 sound/usb/hiface/pcm.c 		sub = &rt->playback;
rt                368 sound/usb/hiface/pcm.c 		struct device *device = &rt->chip->dev->dev;
rt                369 sound/usb/hiface/pcm.c 		mutex_unlock(&rt->stream_mutex);
rt                374 sound/usb/hiface/pcm.c 	if (rt->extra_freq) {
rt                383 sound/usb/hiface/pcm.c 			mutex_unlock(&rt->stream_mutex);
rt                390 sound/usb/hiface/pcm.c 	mutex_unlock(&rt->stream_mutex);
rt                396 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                400 sound/usb/hiface/pcm.c 	if (rt->panic)
rt                403 sound/usb/hiface/pcm.c 	mutex_lock(&rt->stream_mutex);
rt                405 sound/usb/hiface/pcm.c 		hiface_pcm_stream_stop(rt);
rt                414 sound/usb/hiface/pcm.c 	mutex_unlock(&rt->stream_mutex);
rt                432 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                437 sound/usb/hiface/pcm.c 	if (rt->panic)
rt                442 sound/usb/hiface/pcm.c 	mutex_lock(&rt->stream_mutex);
rt                444 sound/usb/hiface/pcm.c 	hiface_pcm_stream_stop(rt);
rt                449 sound/usb/hiface/pcm.c 	if (rt->stream_state == STREAM_DISABLED) {
rt                451 sound/usb/hiface/pcm.c 		ret = hiface_pcm_set_rate(rt, alsa_rt->rate);
rt                453 sound/usb/hiface/pcm.c 			mutex_unlock(&rt->stream_mutex);
rt                456 sound/usb/hiface/pcm.c 		ret = hiface_pcm_stream_start(rt);
rt                458 sound/usb/hiface/pcm.c 			mutex_unlock(&rt->stream_mutex);
rt                462 sound/usb/hiface/pcm.c 	mutex_unlock(&rt->stream_mutex);
rt                469 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                471 sound/usb/hiface/pcm.c 	if (rt->panic)
rt                499 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub);
rt                503 sound/usb/hiface/pcm.c 	if (rt->panic || !sub)
rt                548 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = chip->pcm;
rt                550 sound/usb/hiface/pcm.c 	if (rt) {
rt                551 sound/usb/hiface/pcm.c 		rt->panic = true;
rt                553 sound/usb/hiface/pcm.c 		mutex_lock(&rt->stream_mutex);
rt                554 sound/usb/hiface/pcm.c 		hiface_pcm_stream_stop(rt);
rt                555 sound/usb/hiface/pcm.c 		mutex_unlock(&rt->stream_mutex);
rt                561 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = chip->pcm;
rt                565 sound/usb/hiface/pcm.c 		kfree(rt->out_urbs[i].buffer);
rt                573 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt = pcm->private_data;
rt                575 sound/usb/hiface/pcm.c 	if (rt)
rt                576 sound/usb/hiface/pcm.c 		hiface_pcm_destroy(rt->chip);
rt                584 sound/usb/hiface/pcm.c 	struct pcm_runtime *rt;
rt                586 sound/usb/hiface/pcm.c 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
rt                587 sound/usb/hiface/pcm.c 	if (!rt)
rt                590 sound/usb/hiface/pcm.c 	rt->chip = chip;
rt                591 sound/usb/hiface/pcm.c 	rt->stream_state = STREAM_DISABLED;
rt                593 sound/usb/hiface/pcm.c 		rt->extra_freq = 1;
rt                595 sound/usb/hiface/pcm.c 	init_waitqueue_head(&rt->stream_wait_queue);
rt                596 sound/usb/hiface/pcm.c 	mutex_init(&rt->stream_mutex);
rt                597 sound/usb/hiface/pcm.c 	spin_lock_init(&rt->playback.lock);
rt                600 sound/usb/hiface/pcm.c 		ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
rt                612 sound/usb/hiface/pcm.c 	pcm->private_data = rt;
rt                618 sound/usb/hiface/pcm.c 	rt->instance = pcm;
rt                620 sound/usb/hiface/pcm.c 	chip->pcm = rt;
rt                625 sound/usb/hiface/pcm.c 		kfree(rt->out_urbs[i].buffer);
rt                626 sound/usb/hiface/pcm.c 	kfree(rt);
rt                 83 tools/firewire/nosy-dump.h 			uint32_t rt:2;
rt                363 tools/testing/selftests/net/udpgso.c 	struct rtmsg *rt;
rt                365 tools/testing/selftests/net/udpgso.c 		  NLMSG_ALIGN(sizeof(*rt)) +
rt                384 tools/testing/selftests/net/udpgso.c 	rt = (void *)(data + off);
rt                385 tools/testing/selftests/net/udpgso.c 	rt->rtm_family = is_ipv4 ? AF_INET : AF_INET6;
rt                386 tools/testing/selftests/net/udpgso.c 	rt->rtm_table = RT_TABLE_MAIN;
rt                387 tools/testing/selftests/net/udpgso.c 	rt->rtm_dst_len = alen << 3;
rt                388 tools/testing/selftests/net/udpgso.c 	rt->rtm_protocol = RTPROT_BOOT;
rt                389 tools/testing/selftests/net/udpgso.c 	rt->rtm_scope = RT_SCOPE_UNIVERSE;
rt                390 tools/testing/selftests/net/udpgso.c 	rt->rtm_type = RTN_UNICAST;
rt                391 tools/testing/selftests/net/udpgso.c 	off += NLMSG_ALIGN(sizeof(*rt));
rt                643 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                672 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
rt                676 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
rt                702 virt/kvm/arm/hyp/vgic-v3-sr.c 					   u32 vmcr, int rt)
rt                704 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 vid = vcpu_get_reg(vcpu, rt);
rt                725 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                727 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 vid = vcpu_get_reg(vcpu, rt);
rt                762 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                764 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
rt                767 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                769 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
rt                772 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                774 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
rt                784 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                786 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
rt                796 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                798 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
rt                801 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                803 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
rt                806 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                808 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
rt                823 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
rt                825 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
rt                843 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
rt                852 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, val);
rt                855 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
rt                857 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 val = vcpu_get_reg(vcpu, rt);
rt                866 virt/kvm/arm/hyp/vgic-v3-sr.c 					    u32 vmcr, int rt)
rt                868 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 0);
rt                872 virt/kvm/arm/hyp/vgic-v3-sr.c 					    u32 vmcr, int rt)
rt                874 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 1);
rt                878 virt/kvm/arm/hyp/vgic-v3-sr.c 					    u32 vmcr, int rt)
rt                880 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 2);
rt                884 virt/kvm/arm/hyp/vgic-v3-sr.c 					    u32 vmcr, int rt)
rt                886 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 3);
rt                890 virt/kvm/arm/hyp/vgic-v3-sr.c 					     u32 vmcr, int rt)
rt                892 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 0);
rt                896 virt/kvm/arm/hyp/vgic-v3-sr.c 					     u32 vmcr, int rt)
rt                898 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 1);
rt                902 virt/kvm/arm/hyp/vgic-v3-sr.c 					     u32 vmcr, int rt)
rt                904 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 2);
rt                908 virt/kvm/arm/hyp/vgic-v3-sr.c 					     u32 vmcr, int rt)
rt                910 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 3);
rt                914 virt/kvm/arm/hyp/vgic-v3-sr.c 					    u32 vmcr, int rt)
rt                930 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
rt                934 virt/kvm/arm/hyp/vgic-v3-sr.c 					  u32 vmcr, int rt)
rt                938 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, vmcr);
rt                942 virt/kvm/arm/hyp/vgic-v3-sr.c 					   u32 vmcr, int rt)
rt                944 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 val = vcpu_get_reg(vcpu, rt);
rt                955 virt/kvm/arm/hyp/vgic-v3-sr.c 					  u32 vmcr, int rt)
rt                958 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, val);
rt                962 virt/kvm/arm/hyp/vgic-v3-sr.c 					   u32 vmcr, int rt)
rt                980 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, val);
rt                984 virt/kvm/arm/hyp/vgic-v3-sr.c 					    u32 vmcr, int rt)
rt                986 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 val = vcpu_get_reg(vcpu, rt);
rt               1003 virt/kvm/arm/hyp/vgic-v3-sr.c 	int rt;
rt               1122 virt/kvm/arm/hyp/vgic-v3-sr.c 	rt = kvm_vcpu_sys_get_rt(vcpu);
rt               1123 virt/kvm/arm/hyp/vgic-v3-sr.c 	fn(vcpu, vmcr, rt);
rt                114 virt/kvm/arm/mmio.c 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
rt                128 virt/kvm/arm/mmio.c 	unsigned long rt;
rt                146 virt/kvm/arm/mmio.c 	rt = kvm_vcpu_dabt_get_rd(vcpu);
rt                150 virt/kvm/arm/mmio.c 	vcpu->arch.mmio_decode.rt = rt;
rt                160 virt/kvm/arm/mmio.c 	unsigned long rt;
rt                180 virt/kvm/arm/mmio.c 	rt = vcpu->arch.mmio_decode.rt;
rt                183 virt/kvm/arm/mmio.c 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
rt                100 virt/kvm/irqchip.c static void free_irq_routing_table(struct kvm_irq_routing_table *rt)
rt                104 virt/kvm/irqchip.c 	if (!rt)
rt                107 virt/kvm/irqchip.c 	for (i = 0; i < rt->nr_rt_entries; ++i) {
rt                111 virt/kvm/irqchip.c 		hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
rt                117 virt/kvm/irqchip.c 	kfree(rt);
rt                124 virt/kvm/irqchip.c 	struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
rt                125 virt/kvm/irqchip.c 	free_irq_routing_table(rt);
rt                129 virt/kvm/irqchip.c 			       struct kvm_irq_routing_table *rt,
rt                141 virt/kvm/irqchip.c 	hlist_for_each_entry(ei, &rt->map[gsi], link)
rt                153 virt/kvm/irqchip.c 		rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
rt                155 virt/kvm/irqchip.c 	hlist_add_head(&e->link, &rt->map[e->gsi]);