t                 378 arch/alpha/include/asm/bitops.h 	unsigned long t, a, r;
t                 380 arch/alpha/include/asm/bitops.h 	t = __kernel_cmpbge (x, 0x0101010101010101UL);
t                 381 arch/alpha/include/asm/bitops.h 	a = __flsm1_tab[t];
t                 382 arch/alpha/include/asm/bitops.h 	t = __kernel_extbl (x, a);
t                 383 arch/alpha/include/asm/bitops.h 	r = a*8 + __flsm1_tab[t] + (x != 0);
t                  25 arch/alpha/kernel/err_impl.h #define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)}
t                 366 arch/alpha/kernel/process.c thread_saved_pc(struct task_struct *t)
t                 368 arch/alpha/kernel/process.c 	unsigned long base = (unsigned long)task_stack_page(t);
t                 369 arch/alpha/kernel/process.c 	unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
t                  68 arch/alpha/kernel/srmcons.c srmcons_receive_chars(struct timer_list *t)
t                  70 arch/alpha/kernel/srmcons.c 	struct srmcons_private *srmconsp = from_timer(srmconsp, t, timer);
t                 100 arch/arc/include/asm/io.h #define __raw_readsx(t,f) \
t                 104 arch/arc/include/asm/io.h 	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
t                 105 arch/arc/include/asm/io.h 	u##t *buf = ptr;						\
t                 113 arch/arc/include/asm/io.h 			u##t x = __raw_read##f(addr);			\
t                 118 arch/arc/include/asm/io.h 			u##t x = __raw_read##f(addr);			\
t                 163 arch/arc/include/asm/io.h #define __raw_writesx(t,f)						\
t                 167 arch/arc/include/asm/io.h 	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
t                 168 arch/arc/include/asm/io.h 	const u##t *buf = ptr;						\
t                  17 arch/arc/include/asm/switch_to.h #define ARC_FPU_NEXT(t)
t                 901 arch/arc/kernel/unwind.c #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
t                  35 arch/arc/plat-eznps/mtm.c 	int log_nat, nat = 0, i, t;
t                  38 arch/arc/plat-eznps/mtm.c 	for (i = 0, t = cpu; i < NPS_NUM_HW_THREADS; i++, t++)
t                  39 arch/arc/plat-eznps/mtm.c 		nat += test_bit(t, cpumask_bits(cpu_possible_mask));
t                 272 arch/arm/common/bL_switcher.c 	struct bL_thread *t = arg;
t                 279 arch/arm/common/bL_switcher.c 	complete(&t->started);
t                 284 arch/arm/common/bL_switcher.c 		wait_event_interruptible(t->wq,
t                 285 arch/arm/common/bL_switcher.c 				t->wanted_cluster != -1 ||
t                 288 arch/arm/common/bL_switcher.c 		spin_lock(&t->lock);
t                 289 arch/arm/common/bL_switcher.c 		cluster = t->wanted_cluster;
t                 290 arch/arm/common/bL_switcher.c 		completer = t->completer;
t                 291 arch/arm/common/bL_switcher.c 		completer_cookie = t->completer_cookie;
t                 292 arch/arm/common/bL_switcher.c 		t->wanted_cluster = -1;
t                 293 arch/arm/common/bL_switcher.c 		t->completer = NULL;
t                 294 arch/arm/common/bL_switcher.c 		spin_unlock(&t->lock);
t                 347 arch/arm/common/bL_switcher.c 	struct bL_thread *t;
t                 354 arch/arm/common/bL_switcher.c 	t = &bL_threads[cpu];
t                 356 arch/arm/common/bL_switcher.c 	if (IS_ERR(t->task))
t                 357 arch/arm/common/bL_switcher.c 		return PTR_ERR(t->task);
t                 358 arch/arm/common/bL_switcher.c 	if (!t->task)
t                 361 arch/arm/common/bL_switcher.c 	spin_lock(&t->lock);
t                 362 arch/arm/common/bL_switcher.c 	if (t->completer) {
t                 363 arch/arm/common/bL_switcher.c 		spin_unlock(&t->lock);
t                 366 arch/arm/common/bL_switcher.c 	t->completer = completer;
t                 367 arch/arm/common/bL_switcher.c 	t->completer_cookie = completer_cookie;
t                 368 arch/arm/common/bL_switcher.c 	t->wanted_cluster = new_cluster_id;
t                 369 arch/arm/common/bL_switcher.c 	spin_unlock(&t->lock);
t                 370 arch/arm/common/bL_switcher.c 	wake_up(&t->wq);
t                 578 arch/arm/common/bL_switcher.c 		struct bL_thread *t = &bL_threads[cpu];
t                 579 arch/arm/common/bL_switcher.c 		spin_lock_init(&t->lock);
t                 580 arch/arm/common/bL_switcher.c 		init_waitqueue_head(&t->wq);
t                 581 arch/arm/common/bL_switcher.c 		init_completion(&t->started);
t                 582 arch/arm/common/bL_switcher.c 		t->wanted_cluster = -1;
t                 583 arch/arm/common/bL_switcher.c 		t->task = bL_switcher_thread_create(cpu, t);
t                 606 arch/arm/common/bL_switcher.c 	struct bL_thread *t;
t                 631 arch/arm/common/bL_switcher.c 		t = &bL_threads[cpu];
t                 632 arch/arm/common/bL_switcher.c 		task = t->task;
t                 633 arch/arm/common/bL_switcher.c 		t->task = NULL;
t                 641 arch/arm/common/bL_switcher.c 		init_completion(&t->started);
t                 642 arch/arm/common/bL_switcher.c 		t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
t                 643 arch/arm/common/bL_switcher.c 		task = bL_switcher_thread_create(cpu, t);
t                 645 arch/arm/common/bL_switcher.c 			wait_for_completion(&t->started);
t                 373 arch/arm/include/asm/assembler.h 	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
t                 413 arch/arm/include/asm/assembler.h 	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
t                  58 arch/arm/include/asm/cp15.h #define __read_sysreg(r, w, c, t) ({				\
t                  59 arch/arm/include/asm/cp15.h 	t __val;						\
t                  65 arch/arm/include/asm/cp15.h #define __write_sysreg(v, r, w, c, t)	asm volatile(w " " c : : "r" ((t)(v)))
t                 147 arch/arm/include/asm/domain.h #define TUSER(instr)	instr ## t
t                 115 arch/arm/include/asm/elf.h int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
t                 195 arch/arm/include/asm/memory.h #define __pv_stub_mov_hi(t)				\
t                 201 arch/arm/include/asm/memory.h 	: "=r" (t)					\
t                 217 arch/arm/include/asm/memory.h 	phys_addr_t t;
t                 220 arch/arm/include/asm/memory.h 		__pv_stub(x, t, "add", __PV_BITS_31_24);
t                 222 arch/arm/include/asm/memory.h 		__pv_stub_mov_hi(t);
t                 223 arch/arm/include/asm/memory.h 		__pv_add_carry_stub(x, t);
t                 225 arch/arm/include/asm/memory.h 	return t;
t                 230 arch/arm/include/asm/memory.h 	unsigned long t;
t                 238 arch/arm/include/asm/memory.h 	__pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
t                 239 arch/arm/include/asm/memory.h 	return t;
t                 181 arch/arm/include/uapi/asm/setup.h #define tag_next(t)	((struct tag *)((__u32 *)(t) + (t)->hdr.size))
t                 184 arch/arm/include/uapi/asm/setup.h #define for_each_tag(t,base)		\
t                 185 arch/arm/include/uapi/asm/setup.h 	for (t = base; t->hdr.size; t = tag_next(t))
t                  30 arch/arm/include/uapi/asm/swab.h 	__u32 t;
t                  39 arch/arm/include/uapi/asm/swab.h 		asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x));
t                  42 arch/arm/include/uapi/asm/swab.h 		t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */
t                  45 arch/arm/include/uapi/asm/swab.h 	t &= ~0x00FF0000;			/* bic r1,r1,#0x00FF0000 */
t                  46 arch/arm/include/uapi/asm/swab.h 	x ^= (t >> 8);				/* eor r0,r0,r1,lsr #8   */
t                 148 arch/arm/kernel/atags_parse.c 	struct tagtable *t;
t                 150 arch/arm/kernel/atags_parse.c 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
t                 151 arch/arm/kernel/atags_parse.c 		if (tag->hdr.tag == t->tag) {
t                 152 arch/arm/kernel/atags_parse.c 			t->parse(tag);
t                 156 arch/arm/kernel/atags_parse.c 	return t < &__tagtable_end;
t                 163 arch/arm/kernel/atags_parse.c static void __init parse_tags(const struct tag *t)
t                 165 arch/arm/kernel/atags_parse.c 	for (; t->hdr.size; t = tag_next(t))
t                 166 arch/arm/kernel/atags_parse.c 		if (!parse_tag(t))
t                 168 arch/arm/kernel/atags_parse.c 				t->hdr.tag);
t                  47 arch/arm/kernel/io.c 	unsigned char *t = to;
t                  50 arch/arm/kernel/io.c 		*t = readb(from);
t                  51 arch/arm/kernel/io.c 		t++;
t                  19 arch/arm/kernel/pj4-cp0.c static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
t                  21 arch/arm/kernel/pj4-cp0.c 	struct thread_info *thread = t;
t                 277 arch/arm/kernel/process.c int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
t                 279 arch/arm/kernel/process.c 	elf_core_copy_regs(elfregs, task_pt_regs(t));
t                 411 arch/arm/kernel/ptrace.c 	struct thread_struct *t = &tsk->thread;
t                 414 arch/arm/kernel/ptrace.c 		if (t->debug.hbp[i]) {
t                 415 arch/arm/kernel/ptrace.c 			unregister_hw_breakpoint(t->debug.hbp[i]);
t                 416 arch/arm/kernel/ptrace.c 			t->debug.hbp[i] = NULL;
t                  30 arch/arm/kernel/thumbee.c static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void *t)
t                  32 arch/arm/kernel/thumbee.c 	struct thread_info *thread = t;
t                  33 arch/arm/kernel/xscale-cp0.c static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
t                  35 arch/arm/kernel/xscale-cp0.c 	struct thread_info *thread = t;
t                  58 arch/arm/kernel/xscale-cp0.c static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
t                  60 arch/arm/kernel/xscale-cp0.c 	struct thread_info *thread = t;
t                 103 arch/arm/mach-davinci/time.c #define USING_COMPARE(t)		((t)->opts & TIMER_OPTS_USE_COMPARE)
t                 112 arch/arm/mach-davinci/time.c static int timer32_config(struct timer_s *t)
t                 117 arch/arm/mach-davinci/time.c 	if (USING_COMPARE(t)) {
t                 128 arch/arm/mach-davinci/time.c 		__raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
t                 129 arch/arm/mach-davinci/time.c 			t->base + dtip[event_timer].cmp_off);
t                 131 arch/arm/mach-davinci/time.c 		tcr = __raw_readl(t->base + TCR);
t                 134 arch/arm/mach-davinci/time.c 		tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
t                 135 arch/arm/mach-davinci/time.c 		__raw_writel(tcr, t->base + TCR);
t                 138 arch/arm/mach-davinci/time.c 		__raw_writel(0, t->base + t->tim_off);
t                 139 arch/arm/mach-davinci/time.c 		__raw_writel(t->period, t->base + t->prd_off);
t                 142 arch/arm/mach-davinci/time.c 		if (t->opts & TIMER_OPTS_ONESHOT)
t                 143 arch/arm/mach-davinci/time.c 			tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
t                 144 arch/arm/mach-davinci/time.c 		else if (t->opts & TIMER_OPTS_PERIODIC)
t                 145 arch/arm/mach-davinci/time.c 			tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
t                 147 arch/arm/mach-davinci/time.c 		__raw_writel(tcr, t->base + TCR);
t                 152 arch/arm/mach-davinci/time.c static inline u32 timer32_read(struct timer_s *t)
t                 154 arch/arm/mach-davinci/time.c 	return __raw_readl(t->base + t->tim_off);
t                 229 arch/arm/mach-davinci/time.c 		struct timer_s *t = &timers[i];
t                 230 arch/arm/mach-davinci/time.c 		int timer = ID_TO_TIMER(t->id);
t                 233 arch/arm/mach-davinci/time.c 		t->base = base[timer];
t                 234 arch/arm/mach-davinci/time.c 		if (!t->base)
t                 237 arch/arm/mach-davinci/time.c 		if (IS_TIMER_BOT(t->id)) {
t                 238 arch/arm/mach-davinci/time.c 			t->enamode_shift = 6;
t                 239 arch/arm/mach-davinci/time.c 			t->tim_off = TIM12;
t                 240 arch/arm/mach-davinci/time.c 			t->prd_off = PRD12;
t                 243 arch/arm/mach-davinci/time.c 			t->enamode_shift = 22;
t                 244 arch/arm/mach-davinci/time.c 			t->tim_off = TIM34;
t                 245 arch/arm/mach-davinci/time.c 			t->prd_off = PRD34;
t                 250 arch/arm/mach-davinci/time.c 		t->irqaction.name = t->name;
t                 251 arch/arm/mach-davinci/time.c 		t->irqaction.dev_id = (void *)t;
t                 253 arch/arm/mach-davinci/time.c 		if (t->irqaction.handler != NULL) {
t                 254 arch/arm/mach-davinci/time.c 			irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
t                 255 arch/arm/mach-davinci/time.c 			setup_irq(irq, &t->irqaction);
t                 265 arch/arm/mach-davinci/time.c 	struct timer_s *t = &timers[TID_CLOCKSOURCE];
t                 267 arch/arm/mach-davinci/time.c 	return (cycles_t)timer32_read(t);
t                 291 arch/arm/mach-davinci/time.c 	struct timer_s *t = &timers[TID_CLOCKEVENT];
t                 293 arch/arm/mach-davinci/time.c 	t->period = cycles;
t                 294 arch/arm/mach-davinci/time.c 	timer32_config(t);
t                 300 arch/arm/mach-davinci/time.c 	struct timer_s *t = &timers[TID_CLOCKEVENT];
t                 302 arch/arm/mach-davinci/time.c 	t->opts &= ~TIMER_OPTS_STATE_MASK;
t                 303 arch/arm/mach-davinci/time.c 	t->opts |= TIMER_OPTS_DISABLED;
t                 309 arch/arm/mach-davinci/time.c 	struct timer_s *t = &timers[TID_CLOCKEVENT];
t                 311 arch/arm/mach-davinci/time.c 	t->opts &= ~TIMER_OPTS_STATE_MASK;
t                 312 arch/arm/mach-davinci/time.c 	t->opts |= TIMER_OPTS_ONESHOT;
t                 318 arch/arm/mach-davinci/time.c 	struct timer_s *t = &timers[TID_CLOCKEVENT];
t                 320 arch/arm/mach-davinci/time.c 	t->period = davinci_clock_tick_rate / (HZ);
t                 321 arch/arm/mach-davinci/time.c 	t->opts &= ~TIMER_OPTS_STATE_MASK;
t                 322 arch/arm/mach-davinci/time.c 	t->opts |= TIMER_OPTS_PERIODIC;
t                 323 arch/arm/mach-davinci/time.c 	timer32_config(t);
t                  36 arch/arm/mach-ep93xx/crunch.c static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
t                  38 arch/arm/mach-ep93xx/crunch.c 	struct thread_info *thread = (struct thread_info *)t;
t                  56 arch/arm/mach-omap2/pm-debug.c 	s64 t;
t                  62 arch/arm/mach-omap2/pm-debug.c 	t = sched_clock();
t                  64 arch/arm/mach-omap2/pm-debug.c 	pwrdm->state_timer[prev] += t - pwrdm->timer;
t                  66 arch/arm/mach-omap2/pm-debug.c 	pwrdm->timer = t;
t                 179 arch/arm/mach-omap2/pm-debug.c 	s64 t;
t                 182 arch/arm/mach-omap2/pm-debug.c 	t = sched_clock();
t                 187 arch/arm/mach-omap2/pm-debug.c 	pwrdm->timer = t;
t                  49 arch/arm/mach-omap2/usb-tusb6010.c 	struct gpmc_timings	t;
t                  67 arch/arm/mach-omap2/usb-tusb6010.c 	gpmc_calc_timings(&t, &tusb_async, &dev_t);
t                  69 arch/arm/mach-omap2/usb-tusb6010.c 	return gpmc_cs_set_timings(async_cs, &t, &tusb_async);
t                  75 arch/arm/mach-omap2/usb-tusb6010.c 	struct gpmc_timings	t;
t                  94 arch/arm/mach-omap2/usb-tusb6010.c 	gpmc_calc_timings(&t, &tusb_sync, &dev_t);
t                  96 arch/arm/mach-omap2/usb-tusb6010.c 	return gpmc_cs_set_timings(sync_cs, &t, &tusb_sync);
t                 380 arch/arm/mach-orion5x/common.c void __init tag_fixup_mem32(struct tag *t, char **from)
t                 382 arch/arm/mach-orion5x/common.c 	for (; t->hdr.size; t = tag_next(t))
t                 383 arch/arm/mach-orion5x/common.c 		if (t->hdr.tag == ATAG_MEM &&
t                 384 arch/arm/mach-orion5x/common.c 		    (!t->u.mem.size || t->u.mem.size & ~PAGE_MASK ||
t                 385 arch/arm/mach-orion5x/common.c 		     t->u.mem.start & ~PAGE_MASK)) {
t                 388 arch/arm/mach-orion5x/common.c 			       t->u.mem.size / 1024, t->u.mem.start);
t                 389 arch/arm/mach-orion5x/common.c 			t->hdr.tag = 0;
t                 115 arch/arm/mach-rpc/include/mach/uncompress.h 	struct tag *t = (struct tag *)params;
t                 118 arch/arm/mach-rpc/include/mach/uncompress.h 	if (t->hdr.tag == ATAG_CORE) {
t                 119 arch/arm/mach-rpc/include/mach/uncompress.h 		for (; t->hdr.size; t = tag_next(t)) {
t                 120 arch/arm/mach-rpc/include/mach/uncompress.h 			if (t->hdr.tag == ATAG_VIDEOTEXT) {
t                 121 arch/arm/mach-rpc/include/mach/uncompress.h 				video_num_rows = t->u.videotext.video_lines;
t                 122 arch/arm/mach-rpc/include/mach/uncompress.h 				video_num_cols = t->u.videotext.video_cols;
t                 123 arch/arm/mach-rpc/include/mach/uncompress.h 				video_x = t->u.videotext.x;
t                 124 arch/arm/mach-rpc/include/mach/uncompress.h 				video_y = t->u.videotext.y;
t                 125 arch/arm/mach-rpc/include/mach/uncompress.h 			} else if (t->hdr.tag == ATAG_VIDEOLFB) {
t                 126 arch/arm/mach-rpc/include/mach/uncompress.h 				bytes_per_char_h = t->u.videolfb.lfb_depth;
t                 128 arch/arm/mach-rpc/include/mach/uncompress.h 			} else if (t->hdr.tag == ATAG_MEM) {
t                 130 arch/arm/mach-rpc/include/mach/uncompress.h 				nr_pages += (t->u.mem.size / PAGE_SIZE);
t                 101 arch/arm/mach-s3c64xx/mach-hmt.c 		int t = (brightness*4 + 16*1024 + 58)/116;
t                 102 arch/arm/mach-s3c64xx/mach-hmt.c 		brightness = 25 * ((t * t * t + 0x100000/2) / 0x100000);
t                 103 arch/arm/mm/context.c 			       void *t)
t                 107 arch/arm/mm/context.c 	struct thread_info *thread = t;
t                 630 arch/arm/mm/init.c 	struct task_struct *t, *s;
t                 632 arch/arm/mm/init.c 	for_each_process(t) {
t                 633 arch/arm/mm/init.c 		if (t->flags & PF_KTHREAD)
t                 635 arch/arm/mm/init.c 		for_each_thread(t, s)
t                 696 arch/arm/mm/mmu.c 		struct mem_type *t = &mem_types[i];
t                 697 arch/arm/mm/mmu.c 		if (t->prot_l1)
t                 698 arch/arm/mm/mmu.c 			t->prot_l1 |= PMD_DOMAIN(t->domain);
t                 699 arch/arm/mm/mmu.c 		if (t->prot_sect)
t                 700 arch/arm/mm/mmu.c 			t->prot_sect |= PMD_DOMAIN(t->domain);
t                 177 arch/arm/plat-samsung/include/plat/cpu-freq-core.h 					  struct cpufreq_frequency_table *t,
t                 505 arch/arm/probes/kprobes/test-core.c 	unsigned n, i, t, t0;
t                 511 arch/arm/probes/kprobes/test-core.c 		t = sched_clock() - t0;
t                 512 arch/arm/probes/kprobes/test-core.c 		if (t >= 250000000)
t                 515 arch/arm/probes/kprobes/test-core.c 	return t / n; /* Time for one iteration in nanoseconds */
t                 162 arch/arm/probes/uprobes/core.c bool arch_uprobe_xol_was_trapped(struct task_struct *t)
t                 164 arch/arm/probes/uprobes/core.c 	if (t->thread.trap_no != UPROBE_TRAP_NR)
t                 744 arch/arm/vfp/vfpdouble.c 		struct vfp_double *t = vdn;
t                 746 arch/arm/vfp/vfpdouble.c 		vdm = t;
t                 802 arch/arm/vfp/vfpdouble.c 		struct vfp_double *t = vdn;
t                 804 arch/arm/vfp/vfpdouble.c 		vdm = t;
t                 789 arch/arm/vfp/vfpsingle.c 		struct vfp_single *t = vsn;
t                 791 arch/arm/vfp/vfpsingle.c 		vsm = t;
t                 846 arch/arm/vfp/vfpsingle.c 		struct vfp_single *t = vsn;
t                 848 arch/arm/vfp/vfpsingle.c 		vsm = t;
t                 138 arch/arm64/include/asm/pgtable-hwdef.h #define PMD_ATTRINDX(t)		(_AT(pmdval_t, (t)) << 2)
t                 170 arch/arm64/include/asm/pgtable-hwdef.h #define PTE_ATTRINDX(t)		(_AT(pteval_t, (t)) << 2)
t                 191 arch/arm64/include/asm/pgtable-hwdef.h #define PTE_S2_MEMATTR(t)	(_AT(pteval_t, (t)) << 2)
t                 165 arch/arm64/include/asm/processor.h #define task_user_tls(t)						\
t                 168 arch/arm64/include/asm/processor.h 	if (is_compat_thread(task_thread_info(t)))			\
t                 169 arch/arm64/include/asm/processor.h 		__tls = &(t)->thread.uw.tp2_value;			\
t                 171 arch/arm64/include/asm/processor.h 		__tls = &(t)->thread.uw.tp_value;			\
t                 175 arch/arm64/include/asm/processor.h #define task_user_tls(t)	(&(t)->thread.uw.tp_value)
t                 751 arch/arm64/include/asm/sysreg.h 	 __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
t                 755 arch/arm64/include/asm/sysreg.h 	__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
t                1194 arch/arm64/kernel/fpsimd.c void fpsimd_flush_task_state(struct task_struct *t)
t                1196 arch/arm64/kernel/fpsimd.c 	t->thread.fpsimd_cpu = NR_CPUS;
t                1205 arch/arm64/kernel/fpsimd.c 	set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
t                  90 arch/arm64/kernel/probes/uprobes.c bool arch_uprobe_xol_was_trapped(struct task_struct *t)
t                  97 arch/arm64/kernel/probes/uprobes.c 	if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
t                 212 arch/arm64/kernel/ptrace.c 	struct thread_struct *t = &tsk->thread;
t                 215 arch/arm64/kernel/ptrace.c 		if (t->debug.hbp_break[i]) {
t                 216 arch/arm64/kernel/ptrace.c 			unregister_hw_breakpoint(t->debug.hbp_break[i]);
t                 217 arch/arm64/kernel/ptrace.c 			t->debug.hbp_break[i] = NULL;
t                 222 arch/arm64/kernel/ptrace.c 		if (t->debug.hbp_watch[i]) {
t                 223 arch/arm64/kernel/ptrace.c 			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
t                 224 arch/arm64/kernel/ptrace.c 			t->debug.hbp_watch[i] = NULL;
t                   5 arch/h8300/lib/muldi3.c #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
t                   6 arch/h8300/lib/muldi3.c #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
t                 879 arch/ia64/hp/common/sba_iommu.c 		u32 t = get_iovp_order(byte_cnt) + iovp_shift;
t                 881 arch/ia64/hp/common/sba_iommu.c 		iovp |= t;
t                 882 arch/ia64/hp/common/sba_iommu.c 		ASSERT(t <= 31);   /* 2GB! Max value of "size" field */
t                 391 arch/ia64/include/asm/bitops.h static inline int fls(unsigned int t)
t                 393 arch/ia64/include/asm/bitops.h 	unsigned long x = t & 0xffffffffu;
t                 400 arch/ia64/include/asm/processor.h #define ia64_is_local_fpu_owner(t)								\
t                 402 arch/ia64/include/asm/processor.h 	struct task_struct *__ia64_islfo_task = (t);						\
t                 411 arch/ia64/include/asm/processor.h #define ia64_set_local_fpu_owner(t) do {						\
t                 412 arch/ia64/include/asm/processor.h 	struct task_struct *__ia64_slfo_task = (t);					\
t                 418 arch/ia64/include/asm/processor.h #define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)
t                  89 arch/ia64/include/asm/ptrace.h # define task_pt_regs(t)		(((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
t                  41 arch/ia64/include/asm/switch_to.h #define IA64_HAS_EXTRA_STATE(t)							\
t                  42 arch/ia64/include/asm/switch_to.h 	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
t                 116 arch/ia64/include/asm/unwind.h extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
t                 118 arch/ia64/include/asm/unwind.h extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
t                  48 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 276 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 314 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 352 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 390 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 630 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 668 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                 706 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	t        :  1;  /* RO */
t                1600 arch/ia64/kernel/mca.c 	struct task_struct *g, *t;
t                1619 arch/ia64/kernel/mca.c 		t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
t                1620 arch/ia64/kernel/mca.c 		s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
t                1631 arch/ia64/kernel/mca.c 		do_each_thread (g, t) {
t                1632 arch/ia64/kernel/mca.c 			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
t                1633 arch/ia64/kernel/mca.c 			show_stack(t, NULL);
t                1634 arch/ia64/kernel/mca.c 		} while_each_thread (g, t);
t                 208 arch/ia64/kernel/perfmon.c #define SET_ACTIVATION(t) 	do {} while(0)
t                 209 arch/ia64/kernel/perfmon.c #define GET_ACTIVATION(t) 	do {} while(0)
t                 210 arch/ia64/kernel/perfmon.c #define INC_ACTIVATION(t) 	do {} while(0)
t                 213 arch/ia64/kernel/perfmon.c #define SET_PMU_OWNER(t, c)	do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
t                 349 arch/ia64/kernel/perfmon.c #define PFM_GET_CTX(t)	 	((pfm_context_t *)(t)->thread.pfm_context)
t                 370 arch/ia64/kernel/perfmon.c #define PFM_SET_WORK_PENDING(t, v)	do { (t)->thread.pfm_needs_checking = v; } while(0);
t                 371 arch/ia64/kernel/perfmon.c #define PFM_GET_WORK_PENDING(t)		(t)->thread.pfm_needs_checking
t                4085 arch/ia64/kernel/perfmon.c 	struct task_struct *g, *t;
t                4090 arch/ia64/kernel/perfmon.c 	do_each_thread (g, t) {
t                4091 arch/ia64/kernel/perfmon.c 		if (t->thread.pfm_context == ctx) {
t                4095 arch/ia64/kernel/perfmon.c 	} while_each_thread (g, t);
t                 286 arch/ia64/kernel/smpboot.c 	} t[NUM_ROUNDS];
t                 326 arch/ia64/kernel/smpboot.c 			t[i].rt = rt;
t                 327 arch/ia64/kernel/smpboot.c 			t[i].master = master_time_stamp;
t                 328 arch/ia64/kernel/smpboot.c 			t[i].diff = delta;
t                 329 arch/ia64/kernel/smpboot.c 			t[i].lat = adjust_latency/4;
t                 338 arch/ia64/kernel/smpboot.c 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
t                 473 arch/ia64/kernel/unwind.c 		struct task_struct *t = info->task;
t                 476 arch/ia64/kernel/unwind.c 			ia64_sync_fph(t);
t                 478 arch/ia64/kernel/unwind.c 			ia64_flush_fph(t);
t                 479 arch/ia64/kernel/unwind.c 		addr = t->thread.fph + (regnum - 32);
t                 731 arch/ia64/kernel/unwind.c spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
t                 737 arch/ia64/kernel/unwind.c 			reg->when = t;
t                 772 arch/ia64/kernel/unwind.c 		int t;
t                 782 arch/ia64/kernel/unwind.c 		for (t = 0; t < sr->region_len; ++t) {
t                 783 arch/ia64/kernel/unwind.c 			if ((t & 3) == 0)
t                 785 arch/ia64/kernel/unwind.c 			kind = (mask >> 2*(3-(t & 3))) & 3;
t                 788 arch/ia64/kernel/unwind.c 						sr->region_start + t);
t                 961 arch/ia64/kernel/unwind.c desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
t                 964 arch/ia64/kernel/unwind.c 		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
t                 968 arch/ia64/kernel/unwind.c desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
t                 970 arch/ia64/kernel/unwind.c 	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
t                1000 arch/ia64/kernel/unwind.c desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
t                1006 arch/ia64/kernel/unwind.c 	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
t                1026 arch/ia64/kernel/unwind.c desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
t                1028 arch/ia64/kernel/unwind.c 	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
t                1072 arch/ia64/kernel/unwind.c desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
t                1074 arch/ia64/kernel/unwind.c 	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
t                1085 arch/ia64/kernel/unwind.c desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
t                1089 arch/ia64/kernel/unwind.c 	if (!desc_is_active(qp, t, sr))
t                1099 arch/ia64/kernel/unwind.c desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
t                1105 arch/ia64/kernel/unwind.c 	if (!desc_is_active(qp, t, sr))
t                1115 arch/ia64/kernel/unwind.c 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
t                1120 arch/ia64/kernel/unwind.c desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
t                1125 arch/ia64/kernel/unwind.c 	if (!desc_is_active(qp, t, sr))
t                1130 arch/ia64/kernel/unwind.c 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
t                1135 arch/ia64/kernel/unwind.c desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
t                1140 arch/ia64/kernel/unwind.c 	if (!desc_is_active(qp, t, sr))
t                1145 arch/ia64/kernel/unwind.c 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
t                1167 arch/ia64/kernel/unwind.c #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
t                1168 arch/ia64/kernel/unwind.c #define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
t                1172 arch/ia64/kernel/unwind.c #define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
t                1173 arch/ia64/kernel/unwind.c #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
t                1174 arch/ia64/kernel/unwind.c #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
t                1184 arch/ia64/kernel/unwind.c #define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
t                1190 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
t                1191 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
t                1192 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
t                1193 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
t                1194 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
t                1195 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
t                1196 arch/ia64/kernel/unwind.c #define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
t                1197 arch/ia64/kernel/unwind.c #define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
t                2001 arch/ia64/kernel/unwind.c init_frame_info (struct unw_frame_info *info, struct task_struct *t,
t                2019 arch/ia64/kernel/unwind.c 	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
t                2020 arch/ia64/kernel/unwind.c 	stklimit = (unsigned long) t + IA64_STK_OFFSET;
t                2035 arch/ia64/kernel/unwind.c 	info->task = t;
t                2046 arch/ia64/kernel/unwind.c 		   __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
t                2052 arch/ia64/kernel/unwind.c unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
t                2056 arch/ia64/kernel/unwind.c 	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
t                2072 arch/ia64/kernel/unwind.c unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
t                2074 arch/ia64/kernel/unwind.c 	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
t                2077 arch/ia64/kernel/unwind.c 	unw_init_frame_info(info, t, sw);
t                  89 arch/ia64/kernel/unwind_decoder.c   unw_word t, off;
t                  92 arch/ia64/kernel/unwind_decoder.c   t = unw_decode_uleb128 (&dp);
t                  96 arch/ia64/kernel/unwind_decoder.c 	  UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
t                  98 arch/ia64/kernel/unwind_decoder.c 	  UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
t                 106 arch/ia64/kernel/unwind_decoder.c   unw_word t;
t                 109 arch/ia64/kernel/unwind_decoder.c   t = unw_decode_uleb128 (&dp);
t                 114 arch/ia64/kernel/unwind_decoder.c     UNW_DEC_RESTORE(X2, t, abreg, arg);
t                 116 arch/ia64/kernel/unwind_decoder.c     UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
t                 124 arch/ia64/kernel/unwind_decoder.c   unw_word t, off;
t                 127 arch/ia64/kernel/unwind_decoder.c   t = unw_decode_uleb128 (&dp);
t                 134 arch/ia64/kernel/unwind_decoder.c     UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
t                 136 arch/ia64/kernel/unwind_decoder.c     UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
t                 144 arch/ia64/kernel/unwind_decoder.c   unw_word t;
t                 147 arch/ia64/kernel/unwind_decoder.c   t = unw_decode_uleb128 (&dp);
t                 155 arch/ia64/kernel/unwind_decoder.c     UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
t                 157 arch/ia64/kernel/unwind_decoder.c     UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
t                 272 arch/ia64/kernel/unwind_decoder.c   unw_word t, size;
t                 277 arch/ia64/kernel/unwind_decoder.c       t = unw_decode_uleb128 (&dp);
t                 282 arch/ia64/kernel/unwind_decoder.c 	  UNW_DEC_MEM_STACK_F(P7, t, size, arg);
t                 285 arch/ia64/kernel/unwind_decoder.c 	case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
t                 286 arch/ia64/kernel/unwind_decoder.c 	case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
t                 287 arch/ia64/kernel/unwind_decoder.c 	case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
t                 288 arch/ia64/kernel/unwind_decoder.c 	case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
t                 289 arch/ia64/kernel/unwind_decoder.c 	case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
t                 290 arch/ia64/kernel/unwind_decoder.c 	case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
t                 291 arch/ia64/kernel/unwind_decoder.c 	case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
t                 292 arch/ia64/kernel/unwind_decoder.c 	case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
t                 293 arch/ia64/kernel/unwind_decoder.c 	case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
t                 294 arch/ia64/kernel/unwind_decoder.c 	case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
t                 295 arch/ia64/kernel/unwind_decoder.c 	case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
t                 296 arch/ia64/kernel/unwind_decoder.c 	case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
t                 297 arch/ia64/kernel/unwind_decoder.c 	case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
t                 298 arch/ia64/kernel/unwind_decoder.c 	case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
t                 299 arch/ia64/kernel/unwind_decoder.c 	case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
t                 310 arch/ia64/kernel/unwind_decoder.c 	    t = unw_decode_uleb128 (&dp);
t                 313 arch/ia64/kernel/unwind_decoder.c 	      case  1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
t                 314 arch/ia64/kernel/unwind_decoder.c 	      case  2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
t                 315 arch/ia64/kernel/unwind_decoder.c 	      case  3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
t                 316 arch/ia64/kernel/unwind_decoder.c 	      case  4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
t                 317 arch/ia64/kernel/unwind_decoder.c 	      case  5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
t                 318 arch/ia64/kernel/unwind_decoder.c 	      case  6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
t                 319 arch/ia64/kernel/unwind_decoder.c 	      case  7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
t                 320 arch/ia64/kernel/unwind_decoder.c 	      case  8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
t                 321 arch/ia64/kernel/unwind_decoder.c 	      case  9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
t                 322 arch/ia64/kernel/unwind_decoder.c 	      case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
t                 323 arch/ia64/kernel/unwind_decoder.c 	      case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
t                 324 arch/ia64/kernel/unwind_decoder.c 	      case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
t                 325 arch/ia64/kernel/unwind_decoder.c 	      case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
t                 326 arch/ia64/kernel/unwind_decoder.c 	      case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
t                 327 arch/ia64/kernel/unwind_decoder.c 	      case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
t                 328 arch/ia64/kernel/unwind_decoder.c 	      case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
t                 329 arch/ia64/kernel/unwind_decoder.c 	      case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
t                 330 arch/ia64/kernel/unwind_decoder.c 	      case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
t                 331 arch/ia64/kernel/unwind_decoder.c 	      case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
t                 382 arch/ia64/kernel/unwind_decoder.c   unw_word t;
t                 384 arch/ia64/kernel/unwind_decoder.c   t = unw_decode_uleb128 (&dp);
t                 385 arch/ia64/kernel/unwind_decoder.c   UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
t                 392 arch/ia64/kernel/unwind_decoder.c   unw_word t, ecount, label;
t                 396 arch/ia64/kernel/unwind_decoder.c       t = unw_decode_uleb128 (&dp);
t                 398 arch/ia64/kernel/unwind_decoder.c       UNW_DEC_EPILOGUE(B3, t, ecount, arg);
t                 125 arch/ia64/mm/tlb.c 	unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
t                 127 arch/ia64/mm/tlb.c 	if (time_before(t, ss->serve))
t                 134 arch/ia64/mm/tlb.c 		if (time_before(t, serve))
t                  30 arch/m68k/68000/m68328.c int m68328_hwclk(int set, struct rtc_time *t);
t                  29 arch/m68k/68000/m68EZ328.c int m68328_hwclk(int set, struct rtc_time *t);
t                  37 arch/m68k/68000/m68VZ328.c int m68328_hwclk(int set, struct rtc_time *t);
t                 124 arch/m68k/68000/timers.c int m68328_hwclk(int set, struct rtc_time *t)
t                 128 arch/m68k/68000/timers.c 		t->tm_year = 1;
t                 129 arch/m68k/68000/timers.c 		t->tm_mon = 0;
t                 130 arch/m68k/68000/timers.c 		t->tm_mday = 1;
t                 131 arch/m68k/68000/timers.c 		t->tm_hour = (now >> 24) % 24;
t                 132 arch/m68k/68000/timers.c 		t->tm_min = (now >> 16) % 60;
t                 133 arch/m68k/68000/timers.c 		t->tm_sec = now % 60;
t                 206 arch/m68k/apollo/config.c int dn_dummy_hwclk(int op, struct rtc_time *t) {
t                 210 arch/m68k/apollo/config.c     t->tm_sec=rtc->second;
t                 211 arch/m68k/apollo/config.c     t->tm_min=rtc->minute;
t                 212 arch/m68k/apollo/config.c     t->tm_hour=rtc->hours;
t                 213 arch/m68k/apollo/config.c     t->tm_mday=rtc->day_of_month;
t                 214 arch/m68k/apollo/config.c     t->tm_wday=rtc->day_of_week;
t                 215 arch/m68k/apollo/config.c     t->tm_mon = rtc->month - 1;
t                 216 arch/m68k/apollo/config.c     t->tm_year=rtc->year;
t                 217 arch/m68k/apollo/config.c     if (t->tm_year < 70)
t                 218 arch/m68k/apollo/config.c 	t->tm_year += 100;
t                 220 arch/m68k/apollo/config.c     rtc->second=t->tm_sec;
t                 221 arch/m68k/apollo/config.c     rtc->minute=t->tm_min;
t                 222 arch/m68k/apollo/config.c     rtc->hours=t->tm_hour;
t                 223 arch/m68k/apollo/config.c     rtc->day_of_month=t->tm_mday;
t                 224 arch/m68k/apollo/config.c     if(t->tm_wday!=-1)
t                 225 arch/m68k/apollo/config.c       rtc->day_of_week=t->tm_wday;
t                 226 arch/m68k/apollo/config.c     rtc->month = t->tm_mon + 1;
t                 227 arch/m68k/apollo/config.c     rtc->year = t->tm_year % 100;
t                 139 arch/m68k/atari/time.c int atari_mste_hwclk( int op, struct rtc_time *t )
t                 152 arch/m68k/atari/time.c         val.sec_ones = t->tm_sec % 10;
t                 153 arch/m68k/atari/time.c         val.sec_tens = t->tm_sec / 10;
t                 154 arch/m68k/atari/time.c         val.min_ones = t->tm_min % 10;
t                 155 arch/m68k/atari/time.c         val.min_tens = t->tm_min / 10;
t                 156 arch/m68k/atari/time.c         hour = t->tm_hour;
t                 165 arch/m68k/atari/time.c         val.day_ones = t->tm_mday % 10;
t                 166 arch/m68k/atari/time.c         val.day_tens = t->tm_mday / 10;
t                 167 arch/m68k/atari/time.c         val.mon_ones = (t->tm_mon+1) % 10;
t                 168 arch/m68k/atari/time.c         val.mon_tens = (t->tm_mon+1) / 10;
t                 169 arch/m68k/atari/time.c         year = t->tm_year - 80;
t                 172 arch/m68k/atari/time.c         val.weekday = t->tm_wday;
t                 180 arch/m68k/atari/time.c         t->tm_sec = val.sec_ones + val.sec_tens * 10;
t                 181 arch/m68k/atari/time.c         t->tm_min = val.min_ones + val.min_tens * 10;
t                 189 arch/m68k/atari/time.c 	t->tm_hour = hour;
t                 190 arch/m68k/atari/time.c 	t->tm_mday = val.day_ones + val.day_tens * 10;
t                 191 arch/m68k/atari/time.c         t->tm_mon  = val.mon_ones + val.mon_tens * 10 - 1;
t                 192 arch/m68k/atari/time.c         t->tm_year = val.year_ones + val.year_tens * 10 + 80;
t                 193 arch/m68k/atari/time.c         t->tm_wday = val.weekday;
t                 198 arch/m68k/atari/time.c int atari_tt_hwclk( int op, struct rtc_time *t )
t                 211 arch/m68k/atari/time.c         sec  = t->tm_sec;
t                 212 arch/m68k/atari/time.c         min  = t->tm_min;
t                 213 arch/m68k/atari/time.c         hour = t->tm_hour;
t                 214 arch/m68k/atari/time.c         day  = t->tm_mday;
t                 215 arch/m68k/atari/time.c         mon  = t->tm_mon + 1;
t                 216 arch/m68k/atari/time.c         year = t->tm_year - atari_rtc_year_offset;
t                 217 arch/m68k/atari/time.c         wday = t->tm_wday + (t->tm_wday >= 0);
t                 308 arch/m68k/atari/time.c         t->tm_sec  = sec;
t                 309 arch/m68k/atari/time.c         t->tm_min  = min;
t                 310 arch/m68k/atari/time.c         t->tm_hour = hour;
t                 311 arch/m68k/atari/time.c         t->tm_mday = day;
t                 312 arch/m68k/atari/time.c         t->tm_mon  = mon - 1;
t                 313 arch/m68k/atari/time.c         t->tm_year = year + atari_rtc_year_offset;
t                 314 arch/m68k/atari/time.c         t->tm_wday = wday - 1;
t                 289 arch/m68k/bvme6000/config.c int bvme6000_hwclk(int op, struct rtc_time *t)
t                 298 arch/m68k/bvme6000/config.c 		rtc->t0cr_rtmr = t->tm_year%4;
t                 300 arch/m68k/bvme6000/config.c 		rtc->bcd_sec = bin2bcd(t->tm_sec);
t                 301 arch/m68k/bvme6000/config.c 		rtc->bcd_min = bin2bcd(t->tm_min);
t                 302 arch/m68k/bvme6000/config.c 		rtc->bcd_hr  = bin2bcd(t->tm_hour);
t                 303 arch/m68k/bvme6000/config.c 		rtc->bcd_dom = bin2bcd(t->tm_mday);
t                 304 arch/m68k/bvme6000/config.c 		rtc->bcd_mth = bin2bcd(t->tm_mon + 1);
t                 305 arch/m68k/bvme6000/config.c 		rtc->bcd_year = bin2bcd(t->tm_year%100);
t                 306 arch/m68k/bvme6000/config.c 		if (t->tm_wday >= 0)
t                 307 arch/m68k/bvme6000/config.c 			rtc->bcd_dow = bin2bcd(t->tm_wday+1);
t                 308 arch/m68k/bvme6000/config.c 		rtc->t0cr_rtmr = t->tm_year%4 | 0x08;
t                 313 arch/m68k/bvme6000/config.c 			t->tm_sec  = bcd2bin(rtc->bcd_sec);
t                 314 arch/m68k/bvme6000/config.c 			t->tm_min  = bcd2bin(rtc->bcd_min);
t                 315 arch/m68k/bvme6000/config.c 			t->tm_hour = bcd2bin(rtc->bcd_hr);
t                 316 arch/m68k/bvme6000/config.c 			t->tm_mday = bcd2bin(rtc->bcd_dom);
t                 317 arch/m68k/bvme6000/config.c 			t->tm_mon  = bcd2bin(rtc->bcd_mth)-1;
t                 318 arch/m68k/bvme6000/config.c 			t->tm_year = bcd2bin(rtc->bcd_year);
t                 319 arch/m68k/bvme6000/config.c 			if (t->tm_year < 70)
t                 320 arch/m68k/bvme6000/config.c 				t->tm_year += 100;
t                 321 arch/m68k/bvme6000/config.c 			t->tm_wday = bcd2bin(rtc->bcd_dow)-1;
t                 322 arch/m68k/bvme6000/config.c 		} while (t->tm_sec != bcd2bin(rtc->bcd_sec));
t                 203 arch/m68k/hp300/config.c static int hp300_hwclk(int op, struct rtc_time *t)
t                 206 arch/m68k/hp300/config.c 		t->tm_sec  = hp300_rtc_read(RTC_REG_SEC1) * 10 +
t                 208 arch/m68k/hp300/config.c 		t->tm_min  = hp300_rtc_read(RTC_REG_MIN1) * 10 +
t                 210 arch/m68k/hp300/config.c 		t->tm_hour = (hp300_rtc_read(RTC_REG_HOUR1) & 3) * 10 +
t                 212 arch/m68k/hp300/config.c 		t->tm_wday = -1;
t                 213 arch/m68k/hp300/config.c 		t->tm_mday = hp300_rtc_read(RTC_REG_DAY1) * 10 +
t                 215 arch/m68k/hp300/config.c 		t->tm_mon  = hp300_rtc_read(RTC_REG_MON1) * 10 +
t                 217 arch/m68k/hp300/config.c 		t->tm_year = hp300_rtc_read(RTC_REG_YEAR1) * 10 +
t                 219 arch/m68k/hp300/config.c 		if (t->tm_year <= 69)
t                 220 arch/m68k/hp300/config.c 			t->tm_year += 100;
t                 222 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_SEC1, t->tm_sec / 10);
t                 223 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_SEC2, t->tm_sec % 10);
t                 224 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_MIN1, t->tm_min / 10);
t                 225 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_MIN2, t->tm_min % 10);
t                 227 arch/m68k/hp300/config.c 				((t->tm_hour / 10) & 3) | RTC_HOUR1_24HMODE);
t                 228 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_HOUR2, t->tm_hour % 10);
t                 229 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_DAY1, t->tm_mday / 10);
t                 230 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_DAY2, t->tm_mday % 10);
t                 231 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_MON1, (t->tm_mon + 1) / 10);
t                 232 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_MON2, (t->tm_mon + 1) % 10);
t                 233 arch/m68k/hp300/config.c 		if (t->tm_year >= 100)
t                 234 arch/m68k/hp300/config.c 			t->tm_year -= 100;
t                 235 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_YEAR1, t->tm_year / 10);
t                 236 arch/m68k/hp300/config.c 		hp300_rtc_write(RTC_REG_YEAR2, t->tm_year % 10);
t                24765 arch/m68k/ifpsp060/src/fpsp.S # if it's a fmove out instruction, we don't have to fix a7
t                14725 arch/m68k/ifpsp060/src/pfpsp.S # if it's a fmove out instruction, we don't have to fix a7
t                  45 arch/m68k/include/asm/atomic.h 	int t, tmp;							\
t                  52 arch/m68k/include/asm/atomic.h 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
t                  54 arch/m68k/include/asm/atomic.h 	return t;							\
t                  60 arch/m68k/include/asm/atomic.h 	int t, tmp;							\
t                  67 arch/m68k/include/asm/atomic.h 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
t                  78 arch/m68k/include/asm/atomic.h 	int t;								\
t                  81 arch/m68k/include/asm/atomic.h 	t = (v->counter c_op i);					\
t                  84 arch/m68k/include/asm/atomic.h 	return t;							\
t                  91 arch/m68k/include/asm/atomic.h 	int t;								\
t                  94 arch/m68k/include/asm/atomic.h 	t = v->counter;							\
t                  98 arch/m68k/include/asm/atomic.h 	return t;							\
t                 148 arch/m68k/include/asm/uaccess_mm.h 			__typeof__(*(ptr)) t;				\
t                 171 arch/m68k/include/asm/uaccess_mm.h 		(x) = __gu_val.t;					\
t                  25 arch/m68k/lib/muldi3.c #define __ll_lowpart(t) ((USItype) (t) % __ll_B)
t                  26 arch/m68k/lib/muldi3.c #define __ll_highpart(t) ((USItype) (t) / __ll_B)
t                 610 arch/m68k/mac/misc.c int mac_hwclk(int op, struct rtc_time *t)
t                 636 arch/m68k/mac/misc.c 		t->tm_wday = 0;
t                 638 arch/m68k/mac/misc.c 			 &t->tm_year, &t->tm_mon, &t->tm_mday,
t                 639 arch/m68k/mac/misc.c 			 &t->tm_hour, &t->tm_min, &t->tm_sec);
t                 640 arch/m68k/mac/misc.c 		pr_debug("%s: read %ptR\n", __func__, t);
t                 642 arch/m68k/mac/misc.c 		pr_debug("%s: tried to write %ptR\n", __func__, t);
t                 648 arch/m68k/mac/misc.c 			via_set_rtc_time(t);
t                 653 arch/m68k/mac/misc.c 			cuda_set_rtc_time(t);
t                 658 arch/m68k/mac/misc.c 			pmu_set_rtc_time(t);
t                 173 arch/m68k/mvme147/config.c int mvme147_hwclk(int op, struct rtc_time *t)
t                 178 arch/m68k/mvme147/config.c 		t->tm_year = bcd2int (m147_rtc->bcd_year);
t                 179 arch/m68k/mvme147/config.c 		t->tm_mon  = bcd2int(m147_rtc->bcd_mth) - 1;
t                 180 arch/m68k/mvme147/config.c 		t->tm_mday = bcd2int (m147_rtc->bcd_dom);
t                 181 arch/m68k/mvme147/config.c 		t->tm_hour = bcd2int (m147_rtc->bcd_hr);
t                 182 arch/m68k/mvme147/config.c 		t->tm_min  = bcd2int (m147_rtc->bcd_min);
t                 183 arch/m68k/mvme147/config.c 		t->tm_sec  = bcd2int (m147_rtc->bcd_sec);
t                 185 arch/m68k/mvme147/config.c 		if (t->tm_year < 70)
t                 186 arch/m68k/mvme147/config.c 			t->tm_year += 100;
t                 438 arch/m68k/mvme16x/config.c int mvme16x_hwclk(int op, struct rtc_time *t)
t                 443 arch/m68k/mvme16x/config.c 		t->tm_year = bcd2int (rtc->bcd_year);
t                 444 arch/m68k/mvme16x/config.c 		t->tm_mon  = bcd2int(rtc->bcd_mth) - 1;
t                 445 arch/m68k/mvme16x/config.c 		t->tm_mday = bcd2int (rtc->bcd_dom);
t                 446 arch/m68k/mvme16x/config.c 		t->tm_hour = bcd2int (rtc->bcd_hr);
t                 447 arch/m68k/mvme16x/config.c 		t->tm_min  = bcd2int (rtc->bcd_min);
t                 448 arch/m68k/mvme16x/config.c 		t->tm_sec  = bcd2int (rtc->bcd_sec);
t                 450 arch/m68k/mvme16x/config.c 		if (t->tm_year < 70)
t                 451 arch/m68k/mvme16x/config.c 			t->tm_year += 100;
t                 217 arch/m68k/q40/config.c static int q40_hwclk(int op, struct rtc_time *t)
t                 223 arch/m68k/q40/config.c 		Q40_RTC_SECS = bin2bcd(t->tm_sec);
t                 224 arch/m68k/q40/config.c 		Q40_RTC_MINS = bin2bcd(t->tm_min);
t                 225 arch/m68k/q40/config.c 		Q40_RTC_HOUR = bin2bcd(t->tm_hour);
t                 226 arch/m68k/q40/config.c 		Q40_RTC_DATE = bin2bcd(t->tm_mday);
t                 227 arch/m68k/q40/config.c 		Q40_RTC_MNTH = bin2bcd(t->tm_mon + 1);
t                 228 arch/m68k/q40/config.c 		Q40_RTC_YEAR = bin2bcd(t->tm_year%100);
t                 229 arch/m68k/q40/config.c 		if (t->tm_wday >= 0)
t                 230 arch/m68k/q40/config.c 			Q40_RTC_DOW = bin2bcd(t->tm_wday+1);
t                 237 arch/m68k/q40/config.c 		t->tm_year = bcd2bin (Q40_RTC_YEAR);
t                 238 arch/m68k/q40/config.c 		t->tm_mon  = bcd2bin (Q40_RTC_MNTH)-1;
t                 239 arch/m68k/q40/config.c 		t->tm_mday = bcd2bin (Q40_RTC_DATE);
t                 240 arch/m68k/q40/config.c 		t->tm_hour = bcd2bin (Q40_RTC_HOUR);
t                 241 arch/m68k/q40/config.c 		t->tm_min  = bcd2bin (Q40_RTC_MINS);
t                 242 arch/m68k/q40/config.c 		t->tm_sec  = bcd2bin (Q40_RTC_SECS);
t                 246 arch/m68k/q40/config.c 		if (t->tm_year < 70)
t                 247 arch/m68k/q40/config.c 			t->tm_year += 100;
t                 248 arch/m68k/q40/config.c 		t->tm_wday = bcd2bin(Q40_RTC_DOW)-1;
t                  42 arch/m68k/sun3/config.c extern int sun3_hwclk(int set, struct rtc_time *t);
t                  27 arch/m68k/sun3/intersil.c int sun3_hwclk(int set, struct rtc_time *t)
t                  41 arch/m68k/sun3/intersil.c 		todintersil->hour = t->tm_hour;
t                  42 arch/m68k/sun3/intersil.c 		todintersil->minute = t->tm_min;
t                  43 arch/m68k/sun3/intersil.c 		todintersil->second = t->tm_sec;
t                  44 arch/m68k/sun3/intersil.c 		todintersil->month = t->tm_mon + 1;
t                  45 arch/m68k/sun3/intersil.c 		todintersil->day = t->tm_mday;
t                  46 arch/m68k/sun3/intersil.c 		todintersil->year = (t->tm_year - 68) % 100;
t                  47 arch/m68k/sun3/intersil.c 		todintersil->weekday = t->tm_wday;
t                  50 arch/m68k/sun3/intersil.c 		t->tm_sec = todintersil->csec;
t                  51 arch/m68k/sun3/intersil.c 		t->tm_hour = todintersil->hour;
t                  52 arch/m68k/sun3/intersil.c 		t->tm_min = todintersil->minute;
t                  53 arch/m68k/sun3/intersil.c 		t->tm_sec = todintersil->second;
t                  54 arch/m68k/sun3/intersil.c 		t->tm_mon = todintersil->month - 1;
t                  55 arch/m68k/sun3/intersil.c 		t->tm_mday = todintersil->day;
t                  56 arch/m68k/sun3/intersil.c 		t->tm_year = todintersil->year + 68;
t                  57 arch/m68k/sun3/intersil.c 		t->tm_wday = todintersil->weekday;
t                  58 arch/m68k/sun3/intersil.c 		if (t->tm_year < 70)
t                  59 arch/m68k/sun3/intersil.c 			t->tm_year += 100;
t                  40 arch/m68k/sun3x/time.c int sun3x_hwclk(int set, struct rtc_time *t)
t                  50 arch/m68k/sun3x/time.c 		h->sec = bin2bcd(t->tm_sec);
t                  51 arch/m68k/sun3x/time.c 		h->min = bin2bcd(t->tm_min);
t                  52 arch/m68k/sun3x/time.c 		h->hour = bin2bcd(t->tm_hour);
t                  53 arch/m68k/sun3x/time.c 		h->wday = bin2bcd(t->tm_wday);
t                  54 arch/m68k/sun3x/time.c 		h->mday = bin2bcd(t->tm_mday);
t                  55 arch/m68k/sun3x/time.c 		h->month = bin2bcd(t->tm_mon + 1);
t                  56 arch/m68k/sun3x/time.c 		h->year = bin2bcd(t->tm_year % 100);
t                  60 arch/m68k/sun3x/time.c 		t->tm_sec = bcd2bin(h->sec);
t                  61 arch/m68k/sun3x/time.c 		t->tm_min = bcd2bin(h->min);
t                  62 arch/m68k/sun3x/time.c 		t->tm_hour = bcd2bin(h->hour);
t                  63 arch/m68k/sun3x/time.c 		t->tm_wday = bcd2bin(h->wday);
t                  64 arch/m68k/sun3x/time.c 		t->tm_mday = bcd2bin(h->mday);
t                  65 arch/m68k/sun3x/time.c 		t->tm_mon = bcd2bin(h->month) - 1;
t                  66 arch/m68k/sun3x/time.c 		t->tm_year = bcd2bin(h->year);
t                  68 arch/m68k/sun3x/time.c 		if (t->tm_year < 70)
t                  69 arch/m68k/sun3x/time.c 			t->tm_year += 100;
t                   5 arch/m68k/sun3x/time.h extern int sun3x_hwclk(int set, struct rtc_time *t);
t                  49 arch/microblaze/include/asm/mmu.h 	unsigned long    t:1;	/* Normal or I/O  type */
t                  48 arch/microblaze/kernel/ptrace.c 					struct task_struct *t)
t                  73 arch/microblaze/kernel/ptrace.c 	regs = task_pt_regs(t);
t                   9 arch/microblaze/lib/muldi3.c #define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
t                  10 arch/microblaze/lib/muldi3.c #define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
t                 121 arch/mips/alchemy/common/clock.c 	unsigned long t;
t                 129 arch/mips/alchemy/common/clock.c 		t = 396000000;
t                 131 arch/mips/alchemy/common/clock.c 		t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
t                 133 arch/mips/alchemy/common/clock.c 			t &= 0x3f;
t                 134 arch/mips/alchemy/common/clock.c 		t *= parent_rate;
t                 137 arch/mips/alchemy/common/clock.c 	return t;
t                 328 arch/mips/alchemy/common/clock.c static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
t                 340 arch/mips/alchemy/common/clock.c 	switch (t) {
t                 692 arch/mips/alchemy/common/clock.c 	unsigned long v, t;
t                 695 arch/mips/alchemy/common/clock.c 	t = parent_rate / (((v >> sh) & 0xff) + 1);
t                 697 arch/mips/alchemy/common/clock.c 		t /= 2;
t                 699 arch/mips/alchemy/common/clock.c 	return t;
t                1042 arch/mips/alchemy/common/clock.c 	struct clk_aliastable *t = alchemy_clk_aliases;
t                1098 arch/mips/alchemy/common/clock.c 	while (t->base) {
t                1099 arch/mips/alchemy/common/clock.c 		if (t->cputype == ctype)
t                1100 arch/mips/alchemy/common/clock.c 			clk_add_alias(t->alias, NULL, t->base, NULL);
t                1101 arch/mips/alchemy/common/clock.c 		t++;
t                  85 arch/mips/alchemy/common/time.c 	unsigned long t;
t                 102 arch/mips/alchemy/common/time.c 	t = 0xffffff;
t                 103 arch/mips/alchemy/common/time.c 	while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T1S) && --t)
t                 105 arch/mips/alchemy/common/time.c 	if (!t)
t                 110 arch/mips/alchemy/common/time.c 	t = 0xffffff;
t                 111 arch/mips/alchemy/common/time.c 	while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
t                 113 arch/mips/alchemy/common/time.c 	if (!t)
t                 117 arch/mips/alchemy/common/time.c 	t = 0xffffff;
t                 118 arch/mips/alchemy/common/time.c 	while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
t                 120 arch/mips/alchemy/common/time.c 	if (!t)
t                 154 arch/mips/alchemy/common/time.c 	int t;
t                 156 arch/mips/alchemy/common/time.c 	t = alchemy_get_cputype();
t                 157 arch/mips/alchemy/common/time.c 	if (t == ALCHEMY_CPU_UNKNOWN ||
t                 158 arch/mips/alchemy/common/time.c 	    alchemy_time_init(alchemy_m2inttab[t]))
t                  92 arch/mips/alchemy/devboards/db1200.c 		unsigned short t = bcsr_read(BCSR_HEXLEDS);
t                  93 arch/mips/alchemy/devboards/db1200.c 		bcsr_write(BCSR_HEXLEDS, ~t);
t                  94 arch/mips/alchemy/devboards/db1200.c 		if (bcsr_read(BCSR_HEXLEDS) != t) {
t                  95 arch/mips/alchemy/devboards/db1200.c 			bcsr_write(BCSR_HEXLEDS, t);
t                 106 arch/mips/alchemy/devboards/db1200.c 		unsigned short t = bcsr_read(BCSR_HEXLEDS);
t                 107 arch/mips/alchemy/devboards/db1200.c 		bcsr_write(BCSR_HEXLEDS, ~t);
t                 108 arch/mips/alchemy/devboards/db1200.c 		if (bcsr_read(BCSR_HEXLEDS) != t) {
t                 109 arch/mips/alchemy/devboards/db1200.c 			bcsr_write(BCSR_HEXLEDS, t);
t                 155 arch/mips/ath79/clock.c 	u32 t;
t                 157 arch/mips/ath79/clock.c 	t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
t                 158 arch/mips/ath79/clock.c 	if (t & AR933X_BOOTSTRAP_REF_CLK_40)
t                 176 arch/mips/ath79/clock.c 		u32 t;
t                 180 arch/mips/ath79/clock.c 		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
t                 182 arch/mips/ath79/clock.c 		ref_div = t;
t                 187 arch/mips/ath79/clock.c 		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
t                 189 arch/mips/ath79/clock.c 		if (t == 0)
t                 190 arch/mips/ath79/clock.c 			t = 1;
t                 192 arch/mips/ath79/clock.c 		out_div = (1 << t);
t                 215 arch/mips/ath79/clock.c 	u64 t;
t                 218 arch/mips/ath79/clock.c 	t = ref;
t                 219 arch/mips/ath79/clock.c 	t *= nint;
t                 220 arch/mips/ath79/clock.c 	do_div(t, ref_div);
t                 221 arch/mips/ath79/clock.c 	ret = t;
t                 223 arch/mips/ath79/clock.c 	t = ref;
t                 224 arch/mips/ath79/clock.c 	t *= nfrac;
t                 225 arch/mips/ath79/clock.c 	do_div(t, ref_div * frac);
t                 226 arch/mips/ath79/clock.c 	ret += t;
t                  91 arch/mips/ath79/common.c 	u32 t;
t                 113 arch/mips/ath79/common.c 	t = ath79_reset_rr(reg);
t                 114 arch/mips/ath79/common.c 	ath79_reset_wr(reg, t | mask);
t                 123 arch/mips/ath79/common.c 	u32 t;
t                 145 arch/mips/ath79/common.c 	t = ath79_reset_rr(reg);
t                 146 arch/mips/ath79/common.c 	ath79_reset_wr(reg, t & ~mask);
t                  23 arch/mips/ath79/early_printk.c 	u32 t;
t                  26 arch/mips/ath79/early_printk.c 		t = __raw_readl(reg);
t                  27 arch/mips/ath79/early_printk.c 		if ((t & mask) == val)
t                  64 arch/mips/ath79/early_printk.c 	u32 t;
t                  95 arch/mips/ath79/early_printk.c 	t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
t                  96 arch/mips/ath79/early_printk.c 	t |= uart_en;
t                  97 arch/mips/ath79/early_printk.c 	__raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
t                 667 arch/mips/cavium-octeon/octeon-irq.c static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
t                 669 arch/mips/cavium-octeon/octeon-irq.c 	irqd_set_trigger_type(data, t);
t                 671 arch/mips/cavium-octeon/octeon-irq.c 	if (t & IRQ_TYPE_EDGE_BOTH)
t                 683 arch/mips/cavium-octeon/octeon-irq.c 	u32 t = irqd_get_trigger_type(data);
t                 689 arch/mips/cavium-octeon/octeon-irq.c 	cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
t                 690 arch/mips/cavium-octeon/octeon-irq.c 	cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
t                 711 arch/mips/cavium-octeon/octeon-irq.c static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
t                 713 arch/mips/cavium-octeon/octeon-irq.c 	irqd_set_trigger_type(data, t);
t                 716 arch/mips/cavium-octeon/octeon-irq.c 	if (t & IRQ_TYPE_EDGE_BOTH)
t                2145 arch/mips/cavium-octeon/octeon-irq.c static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
t                2147 arch/mips/cavium-octeon/octeon-irq.c 	irqd_set_trigger_type(data, t);
t                 665 arch/mips/cavium-octeon/setup.c 	u64 t;
t                 732 arch/mips/cavium-octeon/setup.c 	t = read_c0_cvmctl();
t                 733 arch/mips/cavium-octeon/setup.c 	if ((t & (1ull << 27)) == 0) {
t                 234 arch/mips/include/asm/asmmacro.h 	 .word	0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
t                 238 arch/mips/include/asm/asmmacro.h 	 .word	0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
t                  12 arch/mips/include/asm/cmp.h extern void cmp_boot_secondary(int cpu, struct task_struct *t);
t                 134 arch/mips/include/asm/msa.h static inline void save_msa(struct task_struct *t)
t                 137 arch/mips/include/asm/msa.h 		_save_msa(t);
t                 140 arch/mips/include/asm/msa.h static inline void restore_msa(struct task_struct *t)
t                 143 arch/mips/include/asm/msa.h 		_restore_msa(t);
t                 237 arch/mips/include/asm/octeon/cvmx-fau.h 		cvmx_fau_tagwait64_t t;
t                 241 arch/mips/include/asm/octeon/cvmx-fau.h 	return result.t;
t                 261 arch/mips/include/asm/octeon/cvmx-fau.h 		cvmx_fau_tagwait32_t t;
t                 266 arch/mips/include/asm/octeon/cvmx-fau.h 	return result.t;
t                 285 arch/mips/include/asm/octeon/cvmx-fau.h 		cvmx_fau_tagwait16_t t;
t                 290 arch/mips/include/asm/octeon/cvmx-fau.h 	return result.t;
t                 308 arch/mips/include/asm/octeon/cvmx-fau.h 		cvmx_fau_tagwait8_t t;
t                 312 arch/mips/include/asm/octeon/cvmx-fau.h 	return result.t;
t                 343 arch/mips/include/asm/uaccess.h 		__typeof__(*(addr))	t;				\
t                 364 arch/mips/include/asm/uaccess.h 	(val) = __gu_tmp.t;						\
t                  15 arch/mips/include/asm/watch.h void mips_install_watch_registers(struct task_struct *t);
t                 310 arch/mips/kernel/elf.c 	struct task_struct *t = current;
t                 312 arch/mips/kernel/elf.c 	t->thread.fpu.fcr31 = c->fpu_csr31;
t                 318 arch/mips/kernel/elf.c 			t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
t                 320 arch/mips/kernel/elf.c 			t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
t                 495 arch/mips/kernel/process.c 	struct thread_struct *t = &tsk->thread;
t                 498 arch/mips/kernel/process.c 	if (t->reg31 == (unsigned long) ret_from_fork)
t                 499 arch/mips/kernel/process.c 		return t->reg31;
t                 502 arch/mips/kernel/process.c 	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
t                 749 arch/mips/kernel/process.c 	struct task_struct *t;
t                 785 arch/mips/kernel/process.c 	for_each_thread(task, t) {
t                 788 arch/mips/kernel/process.c 			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
t                 790 arch/mips/kernel/process.c 			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
t                 791 arch/mips/kernel/process.c 			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
t                 796 arch/mips/kernel/process.c 			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
t                 798 arch/mips/kernel/process.c 			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
t                 812 arch/mips/kernel/process.c 	for_each_thread(task, t)
t                 813 arch/mips/kernel/process.c 		cpumask_set_cpu(task_cpu(t), &process_cpus);
t                  93 arch/mips/kernel/vpe-cmp.c 	struct tc *t;
t                 130 arch/mips/kernel/vpe-cmp.c 	t = alloc_tc(aprp_cpu_index());
t                 131 arch/mips/kernel/vpe-cmp.c 	if (!t) {
t                 141 arch/mips/kernel/vpe-cmp.c 		kfree(t);
t                 149 arch/mips/kernel/vpe-cmp.c 	list_add(&t->tc, &v->tc);
t                 152 arch/mips/kernel/vpe-cmp.c 	t->pvpe = v;	/* set the parent vpe */
t                  31 arch/mips/kernel/vpe-mt.c 	struct tc *t;
t                  57 arch/mips/kernel/vpe-mt.c 	t = list_first_entry(&v->tc, struct tc, tc);
t                  62 arch/mips/kernel/vpe-mt.c 	settc(t->index);
t                  72 arch/mips/kernel/vpe-mt.c 			t->index);
t                 115 arch/mips/kernel/vpe-mt.c 			      | (t->index << VPECONF0_XTC_SHIFT));
t                 209 arch/mips/kernel/vpe-mt.c 	struct tc *t;
t                 214 arch/mips/kernel/vpe-mt.c 	t = list_entry(v->tc.next, struct tc, tc);
t                 215 arch/mips/kernel/vpe-mt.c 	if (t != NULL) {
t                 216 arch/mips/kernel/vpe-mt.c 		settc(t->index);
t                 230 arch/mips/kernel/vpe-mt.c 	struct tc *t;
t                 233 arch/mips/kernel/vpe-mt.c 	t = list_entry(v->tc.next, struct tc, tc);
t                 234 arch/mips/kernel/vpe-mt.c 	if (t == NULL)
t                 242 arch/mips/kernel/vpe-mt.c 	settc(t->index);
t                 333 arch/mips/kernel/vpe-mt.c 	struct tc *t;
t                 398 arch/mips/kernel/vpe-mt.c 		t = alloc_tc(tc);
t                 399 arch/mips/kernel/vpe-mt.c 		if (!t) {
t                 422 arch/mips/kernel/vpe-mt.c 			list_add(&t->tc, &v->tc);
t                 449 arch/mips/kernel/vpe-mt.c 		t->pvpe = v;	/* set the parent vpe */
t                 470 arch/mips/kernel/vpe-mt.c 				t->pvpe = get_vpe(0);	/* set the parent vpe */
t                  76 arch/mips/kernel/vpe.c 	struct tc *res, *t;
t                  80 arch/mips/kernel/vpe.c 	list_for_each_entry(t, &vpecontrol.tc_list, list) {
t                  81 arch/mips/kernel/vpe.c 		if (t->index == index) {
t                  82 arch/mips/kernel/vpe.c 			res = t;
t                  18 arch/mips/kernel/watch.c void mips_install_watch_registers(struct task_struct *t)
t                  20 arch/mips/kernel/watch.c 	struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
t                 120 arch/mips/kernel/watch.c 	unsigned int t;
t                 130 arch/mips/kernel/watch.c 	t = read_c0_watchlo0();
t                 132 arch/mips/kernel/watch.c 	c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW;
t                 138 arch/mips/kernel/watch.c 	t = read_c0_watchhi0();
t                 139 arch/mips/kernel/watch.c 	write_c0_watchhi0(t | MIPS_WATCHHI_MASK);
t                 141 arch/mips/kernel/watch.c 	t = read_c0_watchhi0();
t                 142 arch/mips/kernel/watch.c 	c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK);
t                 143 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                 148 arch/mips/kernel/watch.c 	t = read_c0_watchlo1();
t                 150 arch/mips/kernel/watch.c 	c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW;
t                 154 arch/mips/kernel/watch.c 	t = read_c0_watchhi1();
t                 155 arch/mips/kernel/watch.c 	write_c0_watchhi1(t | MIPS_WATCHHI_MASK);
t                 157 arch/mips/kernel/watch.c 	t = read_c0_watchhi1();
t                 158 arch/mips/kernel/watch.c 	c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK);
t                 159 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                 164 arch/mips/kernel/watch.c 	t = read_c0_watchlo2();
t                 166 arch/mips/kernel/watch.c 	c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW;
t                 170 arch/mips/kernel/watch.c 	t = read_c0_watchhi2();
t                 171 arch/mips/kernel/watch.c 	write_c0_watchhi2(t | MIPS_WATCHHI_MASK);
t                 173 arch/mips/kernel/watch.c 	t = read_c0_watchhi2();
t                 174 arch/mips/kernel/watch.c 	c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK);
t                 175 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                 180 arch/mips/kernel/watch.c 	t = read_c0_watchlo3();
t                 182 arch/mips/kernel/watch.c 	c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW;
t                 186 arch/mips/kernel/watch.c 	t = read_c0_watchhi3();
t                 187 arch/mips/kernel/watch.c 	write_c0_watchhi3(t | MIPS_WATCHHI_MASK);
t                 189 arch/mips/kernel/watch.c 	t = read_c0_watchhi3();
t                 190 arch/mips/kernel/watch.c 	c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK);
t                 191 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                 196 arch/mips/kernel/watch.c 	t = read_c0_watchhi4();
t                 197 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                 201 arch/mips/kernel/watch.c 	t = read_c0_watchhi5();
t                 202 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                 206 arch/mips/kernel/watch.c 	t = read_c0_watchhi6();
t                 207 arch/mips/kernel/watch.c 	if ((t & MIPS_WATCHHI_M) == 0)
t                1420 arch/mips/math-emu/cp1emu.c 	union ieee754##p s, union ieee754##p t)				\
t                1423 arch/mips/math-emu/cp1emu.c 	s = f1(s, t);							\
t                  53 arch/mips/math-emu/dp_maddf.c 	u64 t;
t                 210 arch/mips/math-emu/dp_maddf.c 	t = DPXMULT(lxm, hym);
t                 212 arch/mips/math-emu/dp_maddf.c 	at = lrm + (t << 32);
t                 216 arch/mips/math-emu/dp_maddf.c 	hrm = hrm + (t >> 32);
t                 218 arch/mips/math-emu/dp_maddf.c 	t = DPXMULT(hxm, lym);
t                 220 arch/mips/math-emu/dp_maddf.c 	at = lrm + (t << 32);
t                 224 arch/mips/math-emu/dp_maddf.c 	hrm = hrm + (t >> 32);
t                 310 arch/mips/math-emu/dp_maddf.c 		t = 0;
t                 311 arch/mips/math-emu/dp_maddf.c 		while ((hzm >> (62 - t)) == 0)
t                 312 arch/mips/math-emu/dp_maddf.c 			t++;
t                 314 arch/mips/math-emu/dp_maddf.c 		assert(t <= 62);
t                 315 arch/mips/math-emu/dp_maddf.c 		if (t) {
t                 316 arch/mips/math-emu/dp_maddf.c 			hzm = hzm << t | lzm >> (64 - t);
t                 317 arch/mips/math-emu/dp_maddf.c 			lzm = lzm << t;
t                 318 arch/mips/math-emu/dp_maddf.c 			ze -= t;
t                  23 arch/mips/math-emu/dp_mul.c 	u64 t;
t                 128 arch/mips/math-emu/dp_mul.c 	t = DPXMULT(lxm, hym);
t                 130 arch/mips/math-emu/dp_mul.c 	at = lrm + (t << 32);
t                 134 arch/mips/math-emu/dp_mul.c 	hrm = hrm + (t >> 32);
t                 136 arch/mips/math-emu/dp_mul.c 	t = DPXMULT(hxm, lym);
t                 138 arch/mips/math-emu/dp_mul.c 	at = lrm + (t << 32);
t                 142 arch/mips/math-emu/dp_mul.c 	hrm = hrm + (t >> 32);
t                  23 arch/mips/math-emu/dp_sqrt.c 	union ieee754dp y, z, t;
t                  93 arch/mips/math-emu/dp_sqrt.c 	t = ieee754dp_div(x, y);
t                  94 arch/mips/math-emu/dp_sqrt.c 	y = ieee754dp_add(y, t);
t                 100 arch/mips/math-emu/dp_sqrt.c 	t = ieee754dp_mul(y, y);
t                 101 arch/mips/math-emu/dp_sqrt.c 	z = t;
t                 102 arch/mips/math-emu/dp_sqrt.c 	t.bexp += 0x001;
t                 103 arch/mips/math-emu/dp_sqrt.c 	t = ieee754dp_add(t, z);
t                 107 arch/mips/math-emu/dp_sqrt.c 	t = ieee754dp_div(z, ieee754dp_add(t, x));
t                 108 arch/mips/math-emu/dp_sqrt.c 	t.bexp += 0x001;
t                 109 arch/mips/math-emu/dp_sqrt.c 	y = ieee754dp_add(y, t);
t                 118 arch/mips/math-emu/dp_sqrt.c 	t = ieee754dp_div(x, y);
t                 120 arch/mips/math-emu/dp_sqrt.c 	if (ieee754_csr.sx & IEEE754_INEXACT || t.bits != y.bits) {
t                 124 arch/mips/math-emu/dp_sqrt.c 			t.bits -= 1;
t                 135 arch/mips/math-emu/dp_sqrt.c 			t.bits += 1;
t                 140 arch/mips/math-emu/dp_sqrt.c 		y = ieee754dp_add(y, t);
t                  23 arch/mips/math-emu/sp_mul.c 	unsigned int t;
t                 127 arch/mips/math-emu/sp_mul.c 	t = lxm * hym; /* 16 * 16 => 32 */
t                 128 arch/mips/math-emu/sp_mul.c 	at = lrm + (t << 16);
t                 131 arch/mips/math-emu/sp_mul.c 	hrm = hrm + (t >> 16);
t                 133 arch/mips/math-emu/sp_mul.c 	t = hxm * lym; /* 16 * 16 => 32 */
t                 134 arch/mips/math-emu/sp_mul.c 	at = lrm + (t << 16);
t                 137 arch/mips/math-emu/sp_mul.c 	hrm = hrm + (t >> 16);
t                  14 arch/mips/math-emu/sp_sqrt.c 	int ix, s, q, m, t, i;
t                  78 arch/mips/math-emu/sp_sqrt.c 		t = s + r;
t                  79 arch/mips/math-emu/sp_sqrt.c 		if (t <= ix) {
t                  80 arch/mips/math-emu/sp_sqrt.c 			s = t + r;
t                  81 arch/mips/math-emu/sp_sqrt.c 			ix -= t;
t                 425 arch/mips/mm/cerr-sb1.c 	uint64_t t;
t                 434 arch/mips/mm/cerr-sb1.c 		t = dword & mask_72_64[i];
t                 435 arch/mips/mm/cerr-sb1.c 		w = (uint32_t)(t >> 32);
t                 438 arch/mips/mm/cerr-sb1.c 		w = (uint32_t)(t & 0xFFFFFFFF);
t                1760 arch/mips/mm/tlbex.c 	int t = scratch >= 0 ? scratch : pte;
t                1769 arch/mips/mm/tlbex.c 				uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
t                1770 arch/mips/mm/tlbex.c 				cur = t;
t                1772 arch/mips/mm/tlbex.c 			uasm_i_andi(p, t, cur, 1);
t                1773 arch/mips/mm/tlbex.c 			uasm_il_beqz(p, r, t, lid);
t                1774 arch/mips/mm/tlbex.c 			if (pte == t)
t                1780 arch/mips/mm/tlbex.c 			uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
t                1781 arch/mips/mm/tlbex.c 			cur = t;
t                1783 arch/mips/mm/tlbex.c 		uasm_i_andi(p, t, cur,
t                1785 arch/mips/mm/tlbex.c 		uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
t                1786 arch/mips/mm/tlbex.c 		uasm_il_bnez(p, r, t, lid);
t                1787 arch/mips/mm/tlbex.c 		if (pte == t)
t                1812 arch/mips/mm/tlbex.c 	int t = scratch >= 0 ? scratch : pte;
t                1816 arch/mips/mm/tlbex.c 		uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
t                1817 arch/mips/mm/tlbex.c 		cur = t;
t                1819 arch/mips/mm/tlbex.c 	uasm_i_andi(p, t, cur,
t                1821 arch/mips/mm/tlbex.c 	uasm_i_xori(p, t, t,
t                1823 arch/mips/mm/tlbex.c 	uasm_il_bnez(p, r, t, lid);
t                1824 arch/mips/mm/tlbex.c 	if (pte == t)
t                1857 arch/mips/mm/tlbex.c 		int t = scratch >= 0 ? scratch : pte;
t                1858 arch/mips/mm/tlbex.c 		uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
t                1859 arch/mips/mm/tlbex.c 		uasm_i_andi(p, t, t, 1);
t                1860 arch/mips/mm/tlbex.c 		uasm_il_beqz(p, r, t, lid);
t                1861 arch/mips/mm/tlbex.c 		if (pte == t)
t                 406 arch/mips/pci/msi-xlp.c 	int t, msixvec, lirq, xirq, ret;
t                 421 arch/mips/pci/msi-xlp.c 	t = fls(md->msix_alloc_mask);
t                 422 arch/mips/pci/msi-xlp.c 	if (t == XLP_MSIXVEC_PER_LINK) {
t                 426 arch/mips/pci/msi-xlp.c 	md->msix_alloc_mask |= (1u << t);
t                 429 arch/mips/pci/msi-xlp.c 	xirq += t;
t                  71 arch/mips/pci/pci-ar71xx.c 	u32 t;
t                  73 arch/mips/pci/pci-ar71xx.c 	t = ar71xx_pci_ble_table[size & 3][where & 3];
t                  74 arch/mips/pci/pci-ar71xx.c 	BUG_ON(t == 0xf);
t                  75 arch/mips/pci/pci-ar71xx.c 	t <<= (local) ? 20 : 4;
t                  77 arch/mips/pci/pci-ar71xx.c 	return t;
t                 258 arch/mips/pci/pci-ar71xx.c 	u32 t;
t                 263 arch/mips/pci/pci-ar71xx.c 	t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
t                 264 arch/mips/pci/pci-ar71xx.c 	__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
t                 275 arch/mips/pci/pci-ar71xx.c 	u32 t;
t                 280 arch/mips/pci/pci-ar71xx.c 	t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
t                 281 arch/mips/pci/pci-ar71xx.c 	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
t                 332 arch/mips/pci/pci-ar71xx.c 	u32 t;
t                 371 arch/mips/pci/pci-ar71xx.c 	t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
t                 373 arch/mips/pci/pci-ar71xx.c 	ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t);
t                 253 arch/mips/pci/pci-ar724x.c 	u32 t;
t                 261 arch/mips/pci/pci-ar724x.c 		t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
t                 262 arch/mips/pci/pci-ar724x.c 		__raw_writel(t | AR724X_PCI_INT_DEV0,
t                 274 arch/mips/pci/pci-ar724x.c 	u32 t;
t                 282 arch/mips/pci/pci-ar724x.c 		t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
t                 283 arch/mips/pci/pci-ar724x.c 		__raw_writel(t & ~AR724X_PCI_INT_DEV0,
t                 289 arch/mips/pci/pci-ar724x.c 		t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
t                 290 arch/mips/pci/pci-ar724x.c 		__raw_writel(t | AR724X_PCI_INT_DEV0,
t                 156 arch/mips/pci/pci-rt3883.c 	u32 t;
t                 160 arch/mips/pci/pci-rt3883.c 	t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
t                 161 arch/mips/pci/pci-rt3883.c 	rt3883_pci_w32(rpc, t | BIT(d->hwirq), RT3883_PCI_REG_PCIENA);
t                 169 arch/mips/pci/pci-rt3883.c 	u32 t;
t                 173 arch/mips/pci/pci-rt3883.c 	t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
t                 174 arch/mips/pci/pci-rt3883.c 	rt3883_pci_w32(rpc, t & ~BIT(d->hwirq), RT3883_PCI_REG_PCIENA);
t                 310 arch/mips/pci/pci-rt3883.c 	u32 t;
t                 325 arch/mips/pci/pci-rt3883.c 		t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
t                 326 arch/mips/pci/pci-rt3883.c 		t &= ~BIT(31);
t                 327 arch/mips/pci/pci-rt3883.c 		rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
t                 329 arch/mips/pci/pci-rt3883.c 		t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1);
t                 330 arch/mips/pci/pci-rt3883.c 		t &= 0x80ffffff;
t                 331 arch/mips/pci/pci-rt3883.c 		rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1);
t                 333 arch/mips/pci/pci-rt3883.c 		t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1);
t                 334 arch/mips/pci/pci-rt3883.c 		t |= 0xa << 24;
t                 335 arch/mips/pci/pci-rt3883.c 		rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1);
t                 337 arch/mips/pci/pci-rt3883.c 		t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
t                 338 arch/mips/pci/pci-rt3883.c 		t |= BIT(31);
t                 339 arch/mips/pci/pci-rt3883.c 		rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
t                 371 arch/mips/pci/pci-rt3883.c 	t = (RT3883_P2P_BR_DEVNUM << RT3883_PCICFG_P2P_BR_DEVNUM_S);
t                 372 arch/mips/pci/pci-rt3883.c 	rt3883_pci_w32(rpc, t, RT3883_PCI_REG_PCICFG);
t                 381 arch/mips/pci/pci-rt3883.c 		t = rt3883_pci_r32(rpc, RT3883_PCI_REG_STATUS(1));
t                 383 arch/mips/pci/pci-rt3883.c 		rpc->pcie_ready = t & BIT(0);
t                 387 arch/mips/pci/pci-rt3883.c 			t = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL);
t                 388 arch/mips/pci/pci-rt3883.c 			t |= RT3883_RSTCTRL_PCIE;
t                 389 arch/mips/pci/pci-rt3883.c 			rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
t                 390 arch/mips/pci/pci-rt3883.c 			t &= ~RT3883_RSTCTRL_PCIE;
t                 391 arch/mips/pci/pci-rt3883.c 			rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
t                 394 arch/mips/pci/pci-rt3883.c 			t = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1);
t                 395 arch/mips/pci/pci-rt3883.c 			t &= ~RT3883_CLKCFG1_PCIE_CLK_EN;
t                 396 arch/mips/pci/pci-rt3883.c 			rt_sysc_w32(t, RT3883_SYSC_REG_CLKCFG1);
t                 398 arch/mips/pci/pci-rt3883.c 			t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
t                 399 arch/mips/pci/pci-rt3883.c 			t &= ~0xf000c080;
t                 400 arch/mips/pci/pci-rt3883.c 			rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
t                 370 arch/mips/ralink/mt7620.c 	u64 t;
t                 372 arch/mips/ralink/mt7620.c 	t = ref_rate;
t                 373 arch/mips/ralink/mt7620.c 	t *= mul;
t                 374 arch/mips/ralink/mt7620.c 	do_div(t, div);
t                 376 arch/mips/ralink/mt7620.c 	return t;
t                  43 arch/mips/ralink/rt288x.c 	u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
t                  44 arch/mips/ralink/rt288x.c 	t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
t                  46 arch/mips/ralink/rt288x.c 	switch (t) {
t                 100 arch/mips/ralink/rt305x.c 	u32 t;
t                 102 arch/mips/ralink/rt305x.c 	t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
t                 103 arch/mips/ralink/rt305x.c 	t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
t                 106 arch/mips/ralink/rt305x.c 	switch (t) {
t                 123 arch/mips/ralink/rt305x.c 		panic("rt5350: invalid DRAM size: %u", t);
t                 135 arch/mips/ralink/rt305x.c 	u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
t                 138 arch/mips/ralink/rt305x.c 		t = (t >> RT305X_SYSCFG_CPUCLK_SHIFT) &
t                 140 arch/mips/ralink/rt305x.c 		switch (t) {
t                 150 arch/mips/ralink/rt305x.c 		t = (t >> RT3352_SYSCFG0_CPUCLK_SHIFT) &
t                 152 arch/mips/ralink/rt305x.c 		switch (t) {
t                 163 arch/mips/ralink/rt305x.c 		t = (t >> RT5350_SYSCFG0_CPUCLK_SHIFT) &
t                 165 arch/mips/ralink/rt305x.c 		switch (t) {
t                  65 arch/mips/ralink/timer.c 		u32 t = TMR0CTL_MODE_PERIODIC | TMR0CTL_PRESCALE_VAL;
t                  66 arch/mips/ralink/timer.c 		rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
t                  85 arch/mips/ralink/timer.c 	u32 t;
t                  89 arch/mips/ralink/timer.c 	t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
t                  90 arch/mips/ralink/timer.c 	t |= TMR0CTL_ENABLE;
t                  91 arch/mips/ralink/timer.c 	rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
t                 139 arch/mips/sibyte/swarm/rtc_m41t81.c int m41t81_set_time(time64_t t)
t                 145 arch/mips/sibyte/swarm/rtc_m41t81.c 	rtc_time64_to_tm(t, &tm);
t                 108 arch/mips/sibyte/swarm/rtc_xicor1241.c int xicor_set_time(time64_t t)
t                 114 arch/mips/sibyte/swarm/rtc_xicor1241.c 	rtc_time64_to_tm(t, &tm);
t                  12 arch/nds32/include/asm/sfp-machine.h #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
t                  13 arch/nds32/include/asm/sfp-machine.h #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
t                  63 arch/nds32/math-emu/fpuemu.c 	void (*t)(void *ft, void *fa, void *fb);
t                  79 arch/nds32/math-emu/fpuemu.c 				func.t = fadds;
t                  83 arch/nds32/math-emu/fpuemu.c 				func.t = fsubs;
t                  87 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fmadds;
t                  91 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fmsubs;
t                  95 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fnmadds;
t                  99 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fnmsubs;
t                 103 arch/nds32/math-emu/fpuemu.c 				func.t = fmuls;
t                 107 arch/nds32/math-emu/fpuemu.c 				func.t = fdivs;
t                 172 arch/nds32/math-emu/fpuemu.c 				func.t = faddd;
t                 176 arch/nds32/math-emu/fpuemu.c 				func.t = fsubd;
t                 180 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fmaddd;
t                 184 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fmsubd;
t                 188 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fnmaddd;
t                 192 arch/nds32/math-emu/fpuemu.c 				func.t = fpemu_fnmsubd;
t                 196 arch/nds32/math-emu/fpuemu.c 				func.t = fmuld;
t                 200 arch/nds32/math-emu/fpuemu.c 				func.t = fdivd;
t                 284 arch/nds32/math-emu/fpuemu.c 			func.t(ft, fa, fb);
t                 323 arch/nds32/math-emu/fpuemu.c 			func.t(ft, fa, fb);
t                 176 arch/nds32/mm/alignment.c 	unsigned char *s, *t;
t                 182 arch/nds32/mm/alignment.c 	t = (void *)&ret;
t                 185 arch/nds32/mm/alignment.c 		*t++ = *s++;
t                 187 arch/nds32/mm/alignment.c 	if (((*(t - 1)) & 0x80) && (i < 4)) {
t                 190 arch/nds32/mm/alignment.c 			*t++ = 0xff;
t                 119 arch/parisc/include/asm/assembly.h 	.macro shlw r, sa, t
t                 124 arch/parisc/include/asm/assembly.h 	.macro shld r, sa, t
t                 129 arch/parisc/include/asm/assembly.h 	.macro shr r, sa, t
t                 134 arch/parisc/include/asm/assembly.h 	.macro shrd r, sa, t
t                 199 arch/parisc/include/asm/compat.h static inline int __is_compat_task(struct task_struct *t)
t                 201 arch/parisc/include/asm/compat.h 	return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
t                  73 arch/parisc/include/asm/psw.h 	unsigned int t:1;
t                 123 arch/parisc/include/asm/uaccess.h 		__typeof__(*(ptr))	t;		\
t                 135 arch/parisc/include/asm/uaccess.h 	(val) = __gu_tmp.t;				\
t                  59 arch/parisc/include/asm/unwind.h 	struct task_struct *t;
t                  74 arch/parisc/include/asm/unwind.h void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 
t                  77 arch/parisc/include/asm/unwind.h 			struct task_struct *t);
t                 160 arch/parisc/kernel/module.c 	int s, t;
t                 163 arch/parisc/kernel/module.c 	t = (as16 << 1) & 0xffff;
t                 165 arch/parisc/kernel/module.c 	return (t ^ s ^ (s >> 1)) | (s >> 15);
t                 559 arch/parisc/kernel/module.c #define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
t                 704 arch/parisc/kernel/module.c #define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
t                  59 arch/parisc/kernel/ptrace.c 	pa_psw(task)->t = 0;
t                 107 arch/parisc/kernel/ptrace.c 	pa_psw(task)->t = 0;
t                 119 arch/parisc/kernel/ptrace.c 	pa_psw(task)->t = 1;
t                 201 arch/parisc/kernel/traps.c void show_stack(struct task_struct *t, unsigned long *sp)
t                 203 arch/parisc/kernel/traps.c 	parisc_show_stack(t, NULL);
t                 298 arch/parisc/kernel/unwind.c 			if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
t                 299 arch/parisc/kernel/unwind.c 			    info->prev_sp < ((unsigned long) task_thread_info(info->t)
t                 379 arch/parisc/kernel/unwind.c void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 
t                 383 arch/parisc/kernel/unwind.c 	info->t = t;
t                 390 arch/parisc/kernel/unwind.c 	    t ? (int)t->pid : -1, info->sp, info->ip);
t                 393 arch/parisc/kernel/unwind.c void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
t                 395 arch/parisc/kernel/unwind.c 	struct pt_regs *r = &t->thread.regs;
t                 404 arch/parisc/kernel/unwind.c 	unwind_frame_init(info, t, r2);
t                 449 arch/parisc/kernel/unwind.c 	    next_frame->t ? (int)next_frame->t->pid : -1, 
t                 277 arch/parisc/math-emu/fpudispatch.c 	u_int r1,r2,t;		/* operand register offsets */ 
t                 293 arch/parisc/math-emu/fpudispatch.c 	t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
t                 294 arch/parisc/math-emu/fpudispatch.c 	if (t == 0 && class != 2)	/* don't allow fr0 as a dest */
t                 309 arch/parisc/math-emu/fpudispatch.c 					t &= ~3;  /* force to even reg #s */
t                 311 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+3] = fpregs[r1+3];
t                 312 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+2] = fpregs[r1+2];
t                 314 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 316 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1];
t                 324 arch/parisc/math-emu/fpudispatch.c 					t &= ~3;  /* force to even reg #s */
t                 326 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+3] = fpregs[r1+3];
t                 327 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+2] = fpregs[r1+2];
t                 329 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 332 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1] & 0x7fffffff;
t                 340 arch/parisc/math-emu/fpudispatch.c 					t &= ~3;  /* force to even reg #s */
t                 342 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+3] = fpregs[r1+3];
t                 343 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+2] = fpregs[r1+2];
t                 345 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 348 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1] ^ 0x80000000;
t                 356 arch/parisc/math-emu/fpudispatch.c 					t &= ~3;  /* force to even reg #s */
t                 358 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+3] = fpregs[r1+3];
t                 359 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+2] = fpregs[r1+2];
t                 361 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 364 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1] | 0x80000000;
t                 371 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 374 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 383 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 386 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 415 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 418 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 426 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 429 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 432 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 435 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 441 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 444 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 447 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 450 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 456 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 459 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 462 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 465 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 471 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 474 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 477 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 480 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 486 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 489 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 492 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 495 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 501 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 504 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 507 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 510 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 628 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 631 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 640 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 643 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 652 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 655 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 664 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 667 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 676 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 679 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 696 arch/parisc/math-emu/fpudispatch.c 	u_int r1,r2,t;		/* operand register offsets */
t                 708 arch/parisc/math-emu/fpudispatch.c 	t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
t                 709 arch/parisc/math-emu/fpudispatch.c 	if (t == 0 && class != 2)
t                 723 arch/parisc/math-emu/fpudispatch.c 			t &= ~1;
t                 738 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 740 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1];
t                 749 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 751 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1] & 0x7fffffff;
t                 760 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 762 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1] ^ 0x80000000;
t                 771 arch/parisc/math-emu/fpudispatch.c 					fpregs[t+1] = fpregs[r1+1];
t                 773 arch/parisc/math-emu/fpudispatch.c 					fpregs[t] = fpregs[r1] | 0x80000000;
t                 780 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t], status));
t                 783 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t], status));
t                 792 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t], status));
t                 795 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t], status));
t                 809 arch/parisc/math-emu/fpudispatch.c 			t &= ~1;
t                 822 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 825 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 833 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 836 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 839 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 842 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 848 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 851 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 854 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 857 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 863 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 866 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 869 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 872 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 878 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 881 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 884 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 887 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 893 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 896 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 899 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 902 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 908 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 911 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 914 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                 917 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1024 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1027 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1033 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1036 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1052 arch/parisc/math-emu/fpudispatch.c 					    if (t & 1)
t                1068 arch/parisc/math-emu/fpudispatch.c 					       &fpregs[r2],&fpregs[t],status));
t                1071 arch/parisc/math-emu/fpudispatch.c 					       &fpregs[r2],&fpregs[t],status));
t                1078 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1081 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1087 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1090 arch/parisc/math-emu/fpudispatch.c 						&fpregs[t],status));
t                1339 arch/parisc/math-emu/fpudispatch.c 	u_int rm1, rm2, ra, t; /* operands */
t                1354 arch/parisc/math-emu/fpudispatch.c 		t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
t                1355 arch/parisc/math-emu/fpudispatch.c 		if (t == 0)
t                1360 arch/parisc/math-emu/fpudispatch.c 					&fpregs[ra], &fpregs[0], &fpregs[t]));
t                1363 arch/parisc/math-emu/fpudispatch.c 					&fpregs[ra], &fpregs[0], &fpregs[t]));
t                1376 arch/parisc/math-emu/fpudispatch.c 		t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
t                1377 arch/parisc/math-emu/fpudispatch.c 		if (t == 0)
t                1382 arch/parisc/math-emu/fpudispatch.c 					&fpregs[ra], &fpregs[0], &fpregs[t]));
t                1385 arch/parisc/math-emu/fpudispatch.c 					&fpregs[ra], &fpregs[0], &fpregs[t]));
t                 221 arch/parisc/mm/fault.c 	const char *t = NULL;
t                 224 arch/parisc/mm/fault.c 		t = trap_description[code];
t                 226 arch/parisc/mm/fault.c 	return t ? t : "Unknown trap";
t                  20 arch/powerpc/include/asm/asm-compat.h #define PPC_LLARX(t, a, b, eh)	PPC_LDARX(t, a, b, eh)
t                  53 arch/powerpc/include/asm/asm-compat.h #define PPC_LLARX(t, a, b, eh)	PPC_LWARX(t, a, b, eh)
t                  30 arch/powerpc/include/asm/atomic.h 	int t;
t                  32 arch/powerpc/include/asm/atomic.h 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
t                  34 arch/powerpc/include/asm/atomic.h 	return t;
t                  45 arch/powerpc/include/asm/atomic.h 	int t;								\
t                  53 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)					\
t                  61 arch/powerpc/include/asm/atomic.h 	int t;								\
t                  69 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)					\
t                  73 arch/powerpc/include/asm/atomic.h 	return t;							\
t                  79 arch/powerpc/include/asm/atomic.h 	int res, t;							\
t                  87 arch/powerpc/include/asm/atomic.h 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
t                 128 arch/powerpc/include/asm/atomic.h 	int t;
t                 136 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 144 arch/powerpc/include/asm/atomic.h 	int t;
t                 152 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 156 arch/powerpc/include/asm/atomic.h 	return t;
t                 161 arch/powerpc/include/asm/atomic.h 	int t;
t                 169 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 177 arch/powerpc/include/asm/atomic.h 	int t;
t                 185 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 189 arch/powerpc/include/asm/atomic.h 	return t;
t                 215 arch/powerpc/include/asm/atomic.h 	int t;
t                 229 arch/powerpc/include/asm/atomic.h 	: "=&r" (t)
t                 233 arch/powerpc/include/asm/atomic.h 	return t;
t                 275 arch/powerpc/include/asm/atomic.h 	int t;
t                 288 arch/powerpc/include/asm/atomic.h 2:"	: "=&b" (t)
t                 292 arch/powerpc/include/asm/atomic.h 	return t;
t                 302 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 304 arch/powerpc/include/asm/atomic.h 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
t                 306 arch/powerpc/include/asm/atomic.h 	return t;
t                 317 arch/powerpc/include/asm/atomic.h 	s64 t;								\
t                 324 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)					\
t                 333 arch/powerpc/include/asm/atomic.h 	s64 t;								\
t                 340 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)					\
t                 344 arch/powerpc/include/asm/atomic.h 	return t;							\
t                 351 arch/powerpc/include/asm/atomic.h 	s64 res, t;							\
t                 358 arch/powerpc/include/asm/atomic.h 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
t                 399 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 406 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 414 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 421 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 425 arch/powerpc/include/asm/atomic.h 	return t;
t                 430 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 437 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 445 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 452 arch/powerpc/include/asm/atomic.h 	: "=&r" (t), "+m" (v->counter)
t                 456 arch/powerpc/include/asm/atomic.h 	return t;
t                 468 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 479 arch/powerpc/include/asm/atomic.h 2:"	: "=&r" (t)
t                 483 arch/powerpc/include/asm/atomic.h 	return t;
t                 507 arch/powerpc/include/asm/atomic.h 	s64 t;
t                 520 arch/powerpc/include/asm/atomic.h 	: "=&r" (t)
t                 524 arch/powerpc/include/asm/atomic.h 	return t;
t                 116 arch/powerpc/include/asm/bitops.h 	unsigned long old, t;				\
t                 126 arch/powerpc/include/asm/bitops.h 	: "=&r" (old), "=&r" (t)			\
t                 170 arch/powerpc/include/asm/bitops.h 	unsigned long old, t;
t                 181 arch/powerpc/include/asm/bitops.h 	: "=&r" (old), "=&r" (t)
t                 487 arch/powerpc/include/asm/cpm1.h #define RCCR_TIMEP(t)	(((t) & 0x3F)<<8)	/* RISC Timer Period */
t                  43 arch/powerpc/include/asm/local.h 	long t;								\
t                  47 arch/powerpc/include/asm/local.h 	t = (l->v c_op a);						\
t                  50 arch/powerpc/include/asm/local.h 	return t;							\
t                  81 arch/powerpc/include/asm/local.h 	long t;
t                  85 arch/powerpc/include/asm/local.h 	t = l->v;
t                  86 arch/powerpc/include/asm/local.h 	if (t == o)
t                  90 arch/powerpc/include/asm/local.h 	return t;
t                  95 arch/powerpc/include/asm/local.h 	long t;
t                  99 arch/powerpc/include/asm/local.h 	t = l->v;
t                 103 arch/powerpc/include/asm/local.h 	return t;
t                 385 arch/powerpc/include/asm/ppc-opcode.h #define ___PPC_RT(t)	___PPC_RS(t)
t                 393 arch/powerpc/include/asm/ppc-opcode.h #define __PPC_RT(t)	___PPC_RT(__REG_##t)
t                 398 arch/powerpc/include/asm/ppc-opcode.h #define __PPC_T_TLB(t)	(((t) & 0x3) << 21)
t                 408 arch/powerpc/include/asm/ppc-opcode.h #define __PPC_CT(t)	(((t) & 0x0f) << 21)
t                 435 arch/powerpc/include/asm/ppc-opcode.h #define PPC_DARN(t, l)		stringify_in_c(.long PPC_INST_DARN |  \
t                 436 arch/powerpc/include/asm/ppc-opcode.h 						___PPC_RT(t)	   |  \
t                 442 arch/powerpc/include/asm/ppc-opcode.h #define PPC_LQARX(t, a, b, eh)	stringify_in_c(.long PPC_INST_LQARX | \
t                 443 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a) | \
t                 445 arch/powerpc/include/asm/ppc-opcode.h #define PPC_LDARX(t, a, b, eh)	stringify_in_c(.long PPC_INST_LDARX | \
t                 446 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a) | \
t                 448 arch/powerpc/include/asm/ppc-opcode.h #define PPC_LWARX(t, a, b, eh)	stringify_in_c(.long PPC_INST_LWARX | \
t                 449 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a) | \
t                 451 arch/powerpc/include/asm/ppc-opcode.h #define PPC_STQCX(t, a, b)	stringify_in_c(.long PPC_INST_STQCX | \
t                 452 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a) | \
t                 454 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MADDHD(t, a, b, c)	stringify_in_c(.long PPC_INST_MADDHD | \
t                 455 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a)  | \
t                 457 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MADDHDU(t, a, b, c)	stringify_in_c(.long PPC_INST_MADDHDU | \
t                 458 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a)   | \
t                 460 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MADDLD(t, a, b, c)	stringify_in_c(.long PPC_INST_MADDLD | \
t                 461 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RT(t) | ___PPC_RA(a)  | \
t                 483 arch/powerpc/include/asm/ppc-opcode.h #define PPC_TLBILX(t, a, b)	stringify_in_c(.long PPC_INST_TLBILX | \
t                 484 arch/powerpc/include/asm/ppc-opcode.h 					__PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
t                 511 arch/powerpc/include/asm/ppc-opcode.h #define PPC_ERATILX(t, a, b)	stringify_in_c(.long PPC_INST_ERATILX | \
t                 512 arch/powerpc/include/asm/ppc-opcode.h 					__PPC_T_TLB(t) | __PPC_RA0(a) | \
t                 516 arch/powerpc/include/asm/ppc-opcode.h #define PPC_ERATSX(t, a, w)	stringify_in_c(.long PPC_INST_ERATSX | \
t                 517 arch/powerpc/include/asm/ppc-opcode.h 					__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
t                 518 arch/powerpc/include/asm/ppc-opcode.h #define PPC_ERATSX_DOT(t, a, w)	stringify_in_c(.long PPC_INST_ERATSX_DOT | \
t                 519 arch/powerpc/include/asm/ppc-opcode.h 					__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
t                 520 arch/powerpc/include/asm/ppc-opcode.h #define PPC_SLBFEE_DOT(t, b)	stringify_in_c(.long PPC_INST_SLBFEE | \
t                 521 arch/powerpc/include/asm/ppc-opcode.h 					__PPC_RT(t) | __PPC_RB(b))
t                 522 arch/powerpc/include/asm/ppc-opcode.h #define __PPC_SLBFEE_DOT(t, b)	stringify_in_c(.long PPC_INST_SLBFEE |	\
t                 523 arch/powerpc/include/asm/ppc-opcode.h 					       ___PPC_RT(t) | ___PPC_RB(b))
t                 527 arch/powerpc/include/asm/ppc-opcode.h #define LBZCIX(t,a,b)		stringify_in_c(.long PPC_INST_LBZCIX | \
t                 528 arch/powerpc/include/asm/ppc-opcode.h 				       __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
t                 537 arch/powerpc/include/asm/ppc-opcode.h #define VSX_XX3(t, a, b)	(__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
t                 542 arch/powerpc/include/asm/ppc-opcode.h #define MFVRD(a, t)		stringify_in_c(.long PPC_INST_MFVSRD | \
t                 543 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX1((t)+32, a, R0))
t                 544 arch/powerpc/include/asm/ppc-opcode.h #define MTVRD(t, a)		stringify_in_c(.long PPC_INST_MTVSRD | \
t                 545 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX1((t)+32, a, R0))
t                 546 arch/powerpc/include/asm/ppc-opcode.h #define VPMSUMW(t, a, b)	stringify_in_c(.long PPC_INST_VPMSUMW | \
t                 547 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX3((t), a, b))
t                 548 arch/powerpc/include/asm/ppc-opcode.h #define VPMSUMD(t, a, b)	stringify_in_c(.long PPC_INST_VPMSUMD | \
t                 549 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX3((t), a, b))
t                 550 arch/powerpc/include/asm/ppc-opcode.h #define XXLOR(t, a, b)		stringify_in_c(.long PPC_INST_XXLOR | \
t                 551 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX3((t), a, b))
t                 552 arch/powerpc/include/asm/ppc-opcode.h #define XXSWAPD(t, a)		stringify_in_c(.long PPC_INST_XXSWAPD | \
t                 553 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX3((t), a, a))
t                 554 arch/powerpc/include/asm/ppc-opcode.h #define XVCPSGNDP(t, a, b)	stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
t                 555 arch/powerpc/include/asm/ppc-opcode.h 					       VSX_XX3((t), (a), (b))))
t                  37 arch/powerpc/include/asm/sfp-machine.h #define __ll_lowpart(t)		((UWtype) (t) & (__ll_B - 1))
t                  38 arch/powerpc/include/asm/sfp-machine.h #define __ll_highpart(t)	((UWtype) (t) >> (W_TYPE_SIZE / 2))
t                  96 arch/powerpc/include/asm/sstep.h #define GETTYPE(t)	((t) & INSTR_TYPE_MASK)
t                  98 arch/powerpc/include/asm/sstep.h #define MKOP(t, f, s)	((t) | (f) | SIZE(s))
t                  38 arch/powerpc/include/asm/switch_to.h static inline void save_fpu(struct task_struct *t) { }
t                  39 arch/powerpc/include/asm/switch_to.h static inline void flush_fp_to_thread(struct task_struct *t) { }
t                  52 arch/powerpc/include/asm/switch_to.h static inline void save_altivec(struct task_struct *t) { }
t                  53 arch/powerpc/include/asm/switch_to.h static inline void __giveup_altivec(struct task_struct *t) { }
t                  75 arch/powerpc/include/asm/switch_to.h static inline void __giveup_spe(struct task_struct *t) { }
t                  78 arch/powerpc/include/asm/switch_to.h static inline void clear_task_ebb(struct task_struct *t)
t                  82 arch/powerpc/include/asm/switch_to.h     t->thread.ebbrr = 0;
t                  83 arch/powerpc/include/asm/switch_to.h     t->thread.ebbhr = 0;
t                  84 arch/powerpc/include/asm/switch_to.h     t->thread.bescr = 0;
t                  85 arch/powerpc/include/asm/switch_to.h     t->thread.mmcr2 = 0;
t                  86 arch/powerpc/include/asm/switch_to.h     t->thread.mmcr0 = 0;
t                  87 arch/powerpc/include/asm/switch_to.h     t->thread.siar = 0;
t                  88 arch/powerpc/include/asm/switch_to.h     t->thread.sdar = 0;
t                  89 arch/powerpc/include/asm/switch_to.h     t->thread.sier = 0;
t                  90 arch/powerpc/include/asm/switch_to.h     t->thread.used_ebb = 0;
t                  96 arch/powerpc/include/asm/switch_to.h extern int set_thread_tidr(struct task_struct *t);
t                2145 arch/powerpc/kernel/cputable.c 	struct cpu_spec *t = &the_cpu_spec;
t                2147 arch/powerpc/kernel/cputable.c 	t = PTRRELOC(t);
t                2152 arch/powerpc/kernel/cputable.c 	memcpy(t, s, sizeof(*t));
t                2160 arch/powerpc/kernel/cputable.c 	struct cpu_spec *t = &the_cpu_spec;
t                2163 arch/powerpc/kernel/cputable.c 	t = PTRRELOC(t);
t                2164 arch/powerpc/kernel/cputable.c 	old = *t;
t                2170 arch/powerpc/kernel/cputable.c 	memcpy(t, s, sizeof(*t));
t                2178 arch/powerpc/kernel/cputable.c 		t->num_pmcs = old.num_pmcs;
t                2179 arch/powerpc/kernel/cputable.c 		t->pmc_type = old.pmc_type;
t                2180 arch/powerpc/kernel/cputable.c 		t->oprofile_type = old.oprofile_type;
t                2181 arch/powerpc/kernel/cputable.c 		t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv;
t                2182 arch/powerpc/kernel/cputable.c 		t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr;
t                2183 arch/powerpc/kernel/cputable.c 		t->oprofile_mmcra_clear = old.oprofile_mmcra_clear;
t                2200 arch/powerpc/kernel/cputable.c 			t->oprofile_cpu_type = old.oprofile_cpu_type;
t                2201 arch/powerpc/kernel/cputable.c 			t->oprofile_type = old.oprofile_type;
t                2202 arch/powerpc/kernel/cputable.c 			t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
t                2213 arch/powerpc/kernel/cputable.c 		*PTRRELOC(&powerpc_base_platform) = t->platform;
t                2222 arch/powerpc/kernel/cputable.c 	if (t->cpu_setup) {
t                2223 arch/powerpc/kernel/cputable.c 		t->cpu_setup(offset, t);
t                2227 arch/powerpc/kernel/cputable.c 	return t;
t                2255 arch/powerpc/kernel/cputable.c 	struct cpu_spec *t = &the_cpu_spec;
t                2259 arch/powerpc/kernel/cputable.c 	t = PTRRELOC(t);
t                2263 arch/powerpc/kernel/cputable.c 			t->cpu_name = s->cpu_name;
t                  80 arch/powerpc/kernel/eeh_event.c 	struct task_struct *t;
t                  83 arch/powerpc/kernel/eeh_event.c 	t = kthread_run(eeh_event_handler, NULL, "eehd");
t                  84 arch/powerpc/kernel/eeh_event.c 	if (IS_ERR(t)) {
t                  85 arch/powerpc/kernel/eeh_event.c 		ret = PTR_ERR(t);
t                 382 arch/powerpc/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
t                 384 arch/powerpc/kernel/hw_breakpoint.c 	unregister_hw_breakpoint(t->ptrace_bps[0]);
t                 385 arch/powerpc/kernel/hw_breakpoint.c 	t->ptrace_bps[0] = NULL;
t                1051 arch/powerpc/kernel/process.c static inline void save_sprs(struct thread_struct *t)
t                1055 arch/powerpc/kernel/process.c 		t->vrsave = mfspr(SPRN_VRSAVE);
t                1059 arch/powerpc/kernel/process.c 		t->dscr = mfspr(SPRN_DSCR);
t                1062 arch/powerpc/kernel/process.c 		t->bescr = mfspr(SPRN_BESCR);
t                1063 arch/powerpc/kernel/process.c 		t->ebbhr = mfspr(SPRN_EBBHR);
t                1064 arch/powerpc/kernel/process.c 		t->ebbrr = mfspr(SPRN_EBBRR);
t                1066 arch/powerpc/kernel/process.c 		t->fscr = mfspr(SPRN_FSCR);
t                1074 arch/powerpc/kernel/process.c 		t->tar = mfspr(SPRN_TAR);
t                1078 arch/powerpc/kernel/process.c 	thread_pkey_regs_save(t);
t                1512 arch/powerpc/kernel/process.c int set_thread_tidr(struct task_struct *t)
t                1517 arch/powerpc/kernel/process.c 	if (t != current)
t                1520 arch/powerpc/kernel/process.c 	if (t->thread.tidr)
t                1523 arch/powerpc/kernel/process.c 	t->thread.tidr = (u16)task_pid_nr(t);
t                1524 arch/powerpc/kernel/process.c 	mtspr(SPRN_TIDR, t->thread.tidr);
t                1533 arch/powerpc/kernel/process.c release_thread(struct task_struct *t)
t                 878 arch/powerpc/kernel/traps.c 	unsigned int ra, rb, t, i, sel, instr, rc;
t                 915 arch/powerpc/kernel/traps.c 	t = (instr >> 21) & 0x1f;
t                 917 arch/powerpc/kernel/traps.c 		vdst = (u8 *)&current->thread.vr_state.vr[t];
t                 919 arch/powerpc/kernel/traps.c 		vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
t                  84 arch/powerpc/kernel/uprobes.c bool arch_uprobe_xol_was_trapped(struct task_struct *t)
t                  86 arch/powerpc/kernel/uprobes.c 	if (t->thread.trap_nr != UPROBE_TRAP_NR)
t                 413 arch/powerpc/kvm/book3s_emulate.c 				ulong b, t;
t                 417 arch/powerpc/kvm/book3s_emulate.c 				if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
t                 419 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
t                 430 arch/powerpc/kvm/book3s_emulate.c 				ulong t, rb_val;
t                 433 arch/powerpc/kvm/book3s_emulate.c 				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
t                 434 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
t                 441 arch/powerpc/kvm/book3s_emulate.c 				ulong t, rb_val;
t                 444 arch/powerpc/kvm/book3s_emulate.c 				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
t                 445 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
t                 288 arch/powerpc/kvm/book3s_hv_builtin.c 	int me, ee, i, t;
t                 341 arch/powerpc/kvm/book3s_hv_builtin.c 			for (t = 1; t < threads_per_core; ++t) {
t                 342 arch/powerpc/kvm/book3s_hv_builtin.c 				if (sip->napped[t])
t                 343 arch/powerpc/kvm/book3s_hv_builtin.c 					kvmhv_rm_send_ipi(cpu0 + t);
t                 592 arch/powerpc/kvm/book3s_paired_singles.c 				    void (*func)(u64 *t,
t                 786 arch/powerpc/kvm/book3s_pr.c 	struct thread_struct *t = &current->thread;
t                 809 arch/powerpc/kvm/book3s_pr.c 		if (t->regs->msr & MSR_FP)
t                 811 arch/powerpc/kvm/book3s_pr.c 		t->fp_save_area = NULL;
t                 818 arch/powerpc/kvm/book3s_pr.c 		t->vr_save_area = NULL;
t                 849 arch/powerpc/kvm/book3s_pr.c 	struct thread_struct *t = &current->thread;
t                 891 arch/powerpc/kvm/book3s_pr.c 		t->fp_save_area = &vcpu->arch.fp;
t                 901 arch/powerpc/kvm/book3s_pr.c 		t->vr_save_area = &vcpu->arch.vr;
t                 906 arch/powerpc/kvm/book3s_pr.c 	t->regs->msr |= msr;
t                 601 arch/powerpc/kvm/booke.c void kvmppc_watchdog_func(struct timer_list *t)
t                 603 arch/powerpc/kvm/booke.c 	struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
t                  25 arch/powerpc/lib/test_emulate_step.c #define TEST_LWZX(t, a, b)	(PPC_INST_LWZX | ___PPC_RT(t) |		\
t                  29 arch/powerpc/lib/test_emulate_step.c #define TEST_LDARX(t, a, b, eh)	(PPC_INST_LDARX | ___PPC_RT(t) |	\
t                  34 arch/powerpc/lib/test_emulate_step.c #define TEST_LFSX(t, a, b)	(PPC_INST_LFSX | ___PPC_RT(t) |		\
t                  38 arch/powerpc/lib/test_emulate_step.c #define TEST_LFDX(t, a, b)	(PPC_INST_LFDX | ___PPC_RT(t) |		\
t                  42 arch/powerpc/lib/test_emulate_step.c #define TEST_LVX(t, a, b)	(PPC_INST_LVX | ___PPC_RT(t) |		\
t                  48 arch/powerpc/lib/test_emulate_step.c #define TEST_ADD(t, a, b)	(PPC_INST_ADD | ___PPC_RT(t) |		\
t                  50 arch/powerpc/lib/test_emulate_step.c #define TEST_ADD_DOT(t, a, b)	(PPC_INST_ADD | ___PPC_RT(t) |		\
t                  52 arch/powerpc/lib/test_emulate_step.c #define TEST_ADDC(t, a, b)	(PPC_INST_ADDC | ___PPC_RT(t) |		\
t                  54 arch/powerpc/lib/test_emulate_step.c #define TEST_ADDC_DOT(t, a, b)	(PPC_INST_ADDC | ___PPC_RT(t) |		\
t                  76 arch/powerpc/mm/book3s32/mmu_context.c int init_new_context(struct task_struct *t, struct mm_struct *mm)
t                 342 arch/powerpc/mm/book3s64/radix_tlb.c 	struct tlbiel_pid *t = info;
t                 344 arch/powerpc/mm/book3s64/radix_tlb.c 	if (t->ric == RIC_FLUSH_TLB)
t                 345 arch/powerpc/mm/book3s64/radix_tlb.c 		_tlbiel_pid(t->pid, RIC_FLUSH_TLB);
t                 346 arch/powerpc/mm/book3s64/radix_tlb.c 	else if (t->ric == RIC_FLUSH_PWC)
t                 347 arch/powerpc/mm/book3s64/radix_tlb.c 		_tlbiel_pid(t->pid, RIC_FLUSH_PWC);
t                 349 arch/powerpc/mm/book3s64/radix_tlb.c 		_tlbiel_pid(t->pid, RIC_FLUSH_ALL);
t                 356 arch/powerpc/mm/book3s64/radix_tlb.c 	struct tlbiel_pid t = { .pid = pid, .ric = ric };
t                 358 arch/powerpc/mm/book3s64/radix_tlb.c 	on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1);
t                 480 arch/powerpc/mm/book3s64/radix_tlb.c 	struct tlbiel_va *t = info;
t                 482 arch/powerpc/mm/book3s64/radix_tlb.c 	if (t->ric == RIC_FLUSH_TLB)
t                 483 arch/powerpc/mm/book3s64/radix_tlb.c 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB);
t                 484 arch/powerpc/mm/book3s64/radix_tlb.c 	else if (t->ric == RIC_FLUSH_PWC)
t                 485 arch/powerpc/mm/book3s64/radix_tlb.c 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC);
t                 487 arch/powerpc/mm/book3s64/radix_tlb.c 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL);
t                 495 arch/powerpc/mm/book3s64/radix_tlb.c 	struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric };
t                 496 arch/powerpc/mm/book3s64/radix_tlb.c 	on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1);
t                 512 arch/powerpc/mm/book3s64/radix_tlb.c 	struct tlbiel_va_range *t = info;
t                 514 arch/powerpc/mm/book3s64/radix_tlb.c 	_tlbiel_va_range(t->start, t->end, t->pid, t->page_size,
t                 515 arch/powerpc/mm/book3s64/radix_tlb.c 				    t->psize, t->also_pwc);
t                 546 arch/powerpc/mm/book3s64/radix_tlb.c 	struct tlbiel_va_range t = { .start = start, .end = end,
t                 550 arch/powerpc/mm/book3s64/radix_tlb.c 	on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1);
t                 367 arch/powerpc/mm/nohash/mmu_context.c int init_new_context(struct task_struct *t, struct mm_struct *mm)
t                  78 arch/powerpc/net/bpf_jit.h #define PPC_BPF_LDARX(t, a, b, eh) EMIT(PPC_INST_LDARX | ___PPC_RT(t) |	      \
t                  81 arch/powerpc/net/bpf_jit.h #define PPC_BPF_LWARX(t, a, b, eh) EMIT(PPC_INST_LWARX | ___PPC_RT(t) |	      \
t                   8 arch/powerpc/perf/req-gen/_begin.h #define CAT2_STR_(t, s) __stringify(t/s)
t                   9 arch/powerpc/perf/req-gen/_begin.h #define CAT2_STR(t, s) CAT2_STR_(t, s)
t                 100 arch/powerpc/platforms/chrp/setup.c 	unsigned int t;
t                 116 arch/powerpc/platforms/chrp/setup.c 			t = in_le32(gg2_pci_config_base+
t                 119 arch/powerpc/platforms/chrp/setup.c 			if (!(t & 1))
t                 121 arch/powerpc/platforms/chrp/setup.c 			switch ((t>>8) & 0x1f) {
t                 145 arch/powerpc/platforms/chrp/setup.c 				   gg2_memtypes[sdramen ? 1 : ((t>>1) & 3)]);
t                 148 arch/powerpc/platforms/chrp/setup.c 		t = in_le32(gg2_pci_config_base+GG2_PCI_CC_CTRL);
t                 150 arch/powerpc/platforms/chrp/setup.c 			   gg2_cachesizes[(t>>7) & 3],
t                 151 arch/powerpc/platforms/chrp/setup.c 			   gg2_cachetypes[(t>>2) & 3],
t                 152 arch/powerpc/platforms/chrp/setup.c 			   gg2_cachemodes[t & 3]);
t                 360 arch/powerpc/platforms/powermac/low_i2c.c static void kw_i2c_timeout(struct timer_list *t)
t                 362 arch/powerpc/platforms/powermac/low_i2c.c 	struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer);
t                  22 arch/powerpc/platforms/powermac/pfunc_core.c #define LOG_BLOB(t,b,c)
t                  77 arch/powerpc/platforms/powermac/udbg_adb.c 	int k, t, on;
t                  82 arch/powerpc/platforms/powermac/udbg_adb.c 		t = 0;
t                  86 arch/powerpc/platforms/powermac/udbg_adb.c 			if (--t < 0) {
t                  90 arch/powerpc/platforms/powermac/udbg_adb.c 				t = 200000;
t                 493 arch/powerpc/platforms/ps3/repository.c 		enum ps3_interrupt_type t;
t                 497 arch/powerpc/platforms/ps3/repository.c 			repo->dev_index, res_index, &t, &id);
t                 505 arch/powerpc/platforms/ps3/repository.c 		if (t == intr_type) {
t                 531 arch/powerpc/platforms/ps3/repository.c 		enum ps3_reg_type t;
t                 536 arch/powerpc/platforms/ps3/repository.c 			repo->dev_index, res_index, &t, &a, &l);
t                 544 arch/powerpc/platforms/ps3/repository.c 		if (t == reg_type) {
t                2147 arch/powerpc/xmon/xmon.c #define SWAP(a, b, t)	((t) = (a), (a) = (b), (b) = (t))
t                2152 arch/powerpc/xmon/xmon.c 	int t;
t                2156 arch/powerpc/xmon/xmon.c 		SWAP(val[0], val[1], t);
t                2159 arch/powerpc/xmon/xmon.c 		SWAP(val[0], val[3], t);
t                2160 arch/powerpc/xmon/xmon.c 		SWAP(val[1], val[2], t);
t                2163 arch/powerpc/xmon/xmon.c 		SWAP(val[0], val[7], t);
t                2164 arch/powerpc/xmon/xmon.c 		SWAP(val[1], val[6], t);
t                2165 arch/powerpc/xmon/xmon.c 		SWAP(val[2], val[5], t);
t                2166 arch/powerpc/xmon/xmon.c 		SWAP(val[3], val[4], t);
t                  32 arch/riscv/include/asm/futex.h 	  [u] "+m" (*uaddr), [t] "=&r" (tmp)			\
t                 106 arch/riscv/include/asm/futex.h 	: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
t                1006 arch/s390/crypto/aes_s390.c 		u8 t[GHASH_DIGEST_SIZE];/* Tag */
t                1080 arch/s390/crypto/aes_s390.c 		if (crypto_memneq(tag, param.t, taglen))
t                1083 arch/s390/crypto/aes_s390.c 		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
t                  14 arch/s390/include/asm/compat.h #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
t                  15 arch/s390/include/asm/compat.h 				typeof(0?(__force t)0:0ULL), u64))
t                  17 arch/s390/include/asm/compat.h #define __SC_DELOUSE(t,v) ({ \
t                  18 arch/s390/include/asm/compat.h 	BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
t                  19 arch/s390/include/asm/compat.h 	(__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
t                 136 arch/s390/include/asm/cpu_mf.h 			unsigned int t:1;	/* 2 - Timestamp format	      */
t                 297 arch/s390/include/asm/cpu_mf.h 	if (te->t)
t                 164 arch/s390/include/asm/debug.h 	unsigned int t = tag;
t                 168 arch/s390/include/asm/debug.h 	return debug_event_common(id, level, &t, sizeof(unsigned int));
t                 186 arch/s390/include/asm/debug.h 	unsigned long t = tag;
t                 190 arch/s390/include/asm/debug.h 	return debug_event_common(id, level, &t, sizeof(unsigned long));
t                 289 arch/s390/include/asm/debug.h 	unsigned int t = tag;
t                 293 arch/s390/include/asm/debug.h 	return debug_exception_common(id, level, &t, sizeof(unsigned int));
t                 312 arch/s390/include/asm/debug.h 	unsigned long t = tag;
t                 316 arch/s390/include/asm/debug.h 	return debug_exception_common(id, level, &t, sizeof(unsigned long));
t                 149 arch/s390/include/asm/fcx.h #define TSB_FORMAT(t)			((t)->flags & 7)
t                  11 arch/s390/include/asm/syscall_wrapper.h #define __SC_COMPAT_TYPE(t, a) \
t                  12 arch/s390/include/asm/syscall_wrapper.h 	__typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
t                  14 arch/s390/include/asm/syscall_wrapper.h #define __SC_COMPAT_CAST(t, a)						\
t                  18 arch/s390/include/asm/syscall_wrapper.h 	BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) &&		\
t                  19 arch/s390/include/asm/syscall_wrapper.h 		     !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t) &&		\
t                  20 arch/s390/include/asm/syscall_wrapper.h 		     !__TYPE_IS_LL(t));					\
t                  21 arch/s390/include/asm/syscall_wrapper.h 	if (__TYPE_IS_L(t))						\
t                  23 arch/s390/include/asm/syscall_wrapper.h 	if (__TYPE_IS_UL(t))						\
t                  25 arch/s390/include/asm/syscall_wrapper.h 	if (__TYPE_IS_PTR(t))						\
t                  27 arch/s390/include/asm/syscall_wrapper.h 	if (__TYPE_IS_LL(t))						\
t                  29 arch/s390/include/asm/syscall_wrapper.h 	(t)__ReS;							\
t                  20 arch/s390/include/asm/sysinfo.h 	unsigned char t:1;
t                  37 arch/s390/include/uapi/asm/guarded_storage.h 			__u8 t	: 1;
t                  31 arch/s390/include/uapi/asm/runtime_instr.h 	__u32 t			: 1;
t                 159 arch/s390/include/uapi/asm/vtoc.h 	__u16 t;	/* RTA of the first track of free extent */
t                 128 arch/s390/kernel/sysinfo.c 		seq_printf(m, "Capacity Transient:   %d\n", info->t);
t                 253 arch/s390/mm/page-states.c 	unsigned long flags, order, t;
t                 264 arch/s390/mm/page-states.c 		for_each_migratetype_order(order, t) {
t                 265 arch/s390/mm/page-states.c 			list_for_each(l, &zone->free_area[order].free_list[t]) {
t                  36 arch/sh/boards/mach-dreamcast/rtc.c 	time64_t t;
t                  47 arch/sh/boards/mach-dreamcast/rtc.c 	t = (u32)(val1 - TWENTY_YEARS);
t                  49 arch/sh/boards/mach-dreamcast/rtc.c 	rtc_time64_to_tm(t, tm);
t                 614 arch/sh/boards/mach-se/7724/setup.c 	int t = 10000;
t                 616 arch/sh/boards/mach-se/7724/setup.c 	while (t--) {
t                  59 arch/sh/drivers/heartbeat.c static void heartbeat_timer(struct timer_list *t)
t                  61 arch/sh/drivers/heartbeat.c 	struct heartbeat_data *hd = from_timer(hd, t, timer);
t                  88 arch/sh/drivers/pci/common.c static void pcibios_enable_err(struct timer_list *t)
t                  90 arch/sh/drivers/pci/common.c 	struct pci_channel *hose = from_timer(hose, t, err_timer);
t                  97 arch/sh/drivers/pci/common.c static void pcibios_enable_serr(struct timer_list *t)
t                  99 arch/sh/drivers/pci/common.c 	struct pci_channel *hose = from_timer(hose, t, serr_timer);
t                  26 arch/sh/drivers/push-switch.c static void switch_timer(struct timer_list *t)
t                  28 arch/sh/drivers/push-switch.c 	struct push_switch *psw = from_timer(psw, t, debounce);
t                 265 arch/sh/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
t                 268 arch/sh/kernel/hw_breakpoint.c 		unregister_hw_breakpoint(t->ptrace_bps[i]);
t                 269 arch/sh/kernel/hw_breakpoint.c 		t->ptrace_bps[i] = NULL;
t                  60 arch/sh/math-emu/math.c 	{u32 t[2]; FP_PACK_DP(t,f); ((u32*)&r)[0]=t[1]; ((u32*)&r)[1]=t[0];}
t                  62 arch/sh/math-emu/math.c 	{u32 t[2]; t[0]=((u32*)&r)[1]; t[1]=((u32*)&r)[0]; FP_UNPACK_DP(f,t);}
t                  29 arch/sh/math-emu/sfp-util.h #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
t                  30 arch/sh/math-emu/sfp-util.h #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
t                  24 arch/sparc/include/asm/bitext.h int bit_map_string_get(struct bit_map *t, int len, int align);
t                  25 arch/sparc/include/asm/bitext.h void bit_map_clear(struct bit_map *t, int offset, int len);
t                  26 arch/sparc/include/asm/bitext.h void bit_map_init(struct bit_map *t, unsigned long *map, int size);
t                  15 arch/sparc/include/asm/string.h #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
t                  19 arch/sparc/include/uapi/asm/signal.h #define    SUBSIG_BADTRAP(t)  (0x80 + (t))
t                 296 arch/sparc/kernel/cpumap.c static void increment_rover(struct cpuinfo_tree *t, int node_index,
t                 299 arch/sparc/kernel/cpumap.c 	struct cpuinfo_node *node = &t->nodes[node_index];
t                 302 arch/sparc/kernel/cpumap.c 	top_level = t->nodes[root_index].level;
t                 314 arch/sparc/kernel/cpumap.c 		node = &t->nodes[node->parent_index];
t                 318 arch/sparc/kernel/cpumap.c static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
t                 340 arch/sparc/kernel/cpumap.c 	for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
t                 342 arch/sparc/kernel/cpumap.c 		new_index = t->nodes[index].rover;
t                 344 arch/sparc/kernel/cpumap.c 			increment_rover(t, index, root_index, rover_inc_table);
t                  49 arch/sparc/kernel/kgdb_32.c 	struct thread_info *t = task_thread_info(p);
t                  55 arch/sparc/kernel/kgdb_32.c 	gdb_regs[GDB_G6] = (unsigned long) t;
t                  59 arch/sparc/kernel/kgdb_32.c 	gdb_regs[GDB_SP] = t->ksp;
t                  62 arch/sparc/kernel/kgdb_32.c 	win = (struct reg_window32 *) t->ksp;
t                  73 arch/sparc/kernel/kgdb_32.c 	gdb_regs[GDB_PSR] = t->kpsr;
t                  74 arch/sparc/kernel/kgdb_32.c 	gdb_regs[GDB_WIM] = t->kwim;
t                  76 arch/sparc/kernel/kgdb_32.c 	gdb_regs[GDB_PC] = t->kpc;
t                  77 arch/sparc/kernel/kgdb_32.c 	gdb_regs[GDB_NPC] = t->kpc + 4;
t                  47 arch/sparc/kernel/kgdb_64.c 	struct thread_info *t = task_thread_info(p);
t                  56 arch/sparc/kernel/kgdb_64.c 	gdb_regs[GDB_G6] = (unsigned long) t;
t                  60 arch/sparc/kernel/kgdb_64.c 	gdb_regs[GDB_SP] = t->ksp;
t                  63 arch/sparc/kernel/kgdb_64.c 	win = (struct reg_window *) (t->ksp + STACK_BIAS);
t                  72 arch/sparc/kernel/kgdb_64.c 	if (t->new_child)
t                  80 arch/sparc/kernel/kgdb_64.c 	cwp = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP];
t                 224 arch/sparc/kernel/ldc.c 	unsigned long t;
t                 226 arch/sparc/kernel/ldc.c 	t = tx_advance(lp, lp->tx_tail);
t                 227 arch/sparc/kernel/ldc.c 	if (t == lp->tx_head)
t                 230 arch/sparc/kernel/ldc.c 	*new_tail = t;
t                 279 arch/sparc/kernel/ldc.c 	unsigned long h, t;
t                 282 arch/sparc/kernel/ldc.c 	t = tx_advance(lp, lp->tx_tail);
t                 283 arch/sparc/kernel/ldc.c 	if (t == h)
t                 286 arch/sparc/kernel/ldc.c 	*new_tail = t;
t                 979 arch/sparc/kernel/mdesc.c 			u64 t = mdesc_arc_target(hp, a);
t                 983 arch/sparc/kernel/mdesc.c 			name = mdesc_node_name(hp, t);
t                 987 arch/sparc/kernel/mdesc.c 			id = mdesc_get_property(hp, t, "id", NULL);
t                1017 arch/sparc/kernel/mdesc.c 		u64 t = mdesc_arc_target(hp, a);
t                1021 arch/sparc/kernel/mdesc.c 		name = mdesc_node_name(hp, t);
t                1025 arch/sparc/kernel/mdesc.c 		id = mdesc_get_property(hp, t, "id", NULL);
t                1204 arch/sparc/kernel/mdesc.c 		u64 j, t = mdesc_arc_target(hp, a);
t                1207 arch/sparc/kernel/mdesc.c 		t_name = mdesc_node_name(hp, t);
t                1209 arch/sparc/kernel/mdesc.c 			fill_in_one_cache(c, hp, t);
t                1213 arch/sparc/kernel/mdesc.c 		mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
t                 413 arch/sparc/kernel/process_64.c 	struct thread_info *t = task_thread_info(tsk);
t                 415 arch/sparc/kernel/process_64.c 	if (t->utraps) {
t                 416 arch/sparc/kernel/process_64.c 		if (t->utraps[0] < 2)
t                 417 arch/sparc/kernel/process_64.c 			kfree (t->utraps);
t                 419 arch/sparc/kernel/process_64.c 			t->utraps[0]--;
t                 425 arch/sparc/kernel/process_64.c 	struct thread_info *t = current_thread_info();
t                 428 arch/sparc/kernel/process_64.c 	mm = t->task->mm;
t                 435 arch/sparc/kernel/process_64.c 	t->fpsaved[0] = 0;
t                 481 arch/sparc/kernel/process_64.c 				       struct thread_info *t)
t                 486 arch/sparc/kernel/process_64.c 		t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
t                 487 arch/sparc/kernel/process_64.c 		memcpy(&t->reg_window[i], &t->reg_window[i+1],
t                 494 arch/sparc/kernel/process_64.c 	struct thread_info *t = current_thread_info();
t                 501 arch/sparc/kernel/process_64.c 			struct reg_window *rwin = &t->reg_window[window];
t                 505 arch/sparc/kernel/process_64.c 			sp = t->rwbuf_stkptrs[window];
t                 513 arch/sparc/kernel/process_64.c 				shift_window_buffer(window, get_thread_wsaved() - 1, t);
t                 532 arch/sparc/kernel/process_64.c 	struct thread_info *t = current_thread_info();
t                 541 arch/sparc/kernel/process_64.c 			struct reg_window *rwin = &t->reg_window[window];
t                 545 arch/sparc/kernel/process_64.c 			orig_sp = sp = t->rwbuf_stkptrs[window];
t                 618 arch/sparc/kernel/process_64.c 	struct thread_info *t = task_thread_info(p);
t                 629 arch/sparc/kernel/process_64.c 	t->new_child = 1;
t                 630 arch/sparc/kernel/process_64.c 	t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
t                 631 arch/sparc/kernel/process_64.c 	t->kregs = (struct pt_regs *) (child_trap_frame +
t                 633 arch/sparc/kernel/process_64.c 	t->fpsaved[0] = 0;
t                 637 arch/sparc/kernel/process_64.c 		__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
t                 639 arch/sparc/kernel/process_64.c 		t->current_ds = ASI_P;
t                 640 arch/sparc/kernel/process_64.c 		t->kregs->u_regs[UREG_G1] = sp; /* function */
t                 641 arch/sparc/kernel/process_64.c 		t->kregs->u_regs[UREG_G2] = arg;
t                 647 arch/sparc/kernel/process_64.c 	if (t->flags & _TIF_32BIT) {
t                 651 arch/sparc/kernel/process_64.c 	t->kregs->u_regs[UREG_FP] = sp;
t                 652 arch/sparc/kernel/process_64.c 	__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
t                 654 arch/sparc/kernel/process_64.c 	t->current_ds = ASI_AIUS;
t                 661 arch/sparc/kernel/process_64.c 		t->kregs->u_regs[UREG_FP] = csp;
t                 663 arch/sparc/kernel/process_64.c 	if (t->utraps)
t                 664 arch/sparc/kernel/process_64.c 		t->utraps[0]++;
t                 667 arch/sparc/kernel/process_64.c 	t->kregs->u_regs[UREG_I0] = current->pid;
t                 668 arch/sparc/kernel/process_64.c 	t->kregs->u_regs[UREG_I1] = 1;
t                 674 arch/sparc/kernel/process_64.c 		t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
t                 817 arch/sparc/kernel/prom_irqtrans.c 			struct irq_trans *t = &pci_irq_trans_table[i];
t                 819 arch/sparc/kernel/prom_irqtrans.c 			if (!strcmp(model, t->name)) {
t                 820 arch/sparc/kernel/prom_irqtrans.c 				t->init(dp);
t                 102 arch/sparc/kernel/sigutil_32.c 	struct thread_info *t = current_thread_info();
t                 114 arch/sparc/kernel/sigutil_32.c 		err |= copy_from_user(&t->reg_window[i],
t                 117 arch/sparc/kernel/sigutil_32.c 		err |= __get_user(t->rwbuf_stkptrs[i],
t                 123 arch/sparc/kernel/sigutil_32.c 	t->w_saved = wsaved;
t                 125 arch/sparc/kernel/sigutil_32.c 	if (t->w_saved)
t                  76 arch/sparc/kernel/sigutil_64.c 	struct thread_info *t = current_thread_info();
t                  88 arch/sparc/kernel/sigutil_64.c 		err |= copy_from_user(&t->reg_window[i],
t                  91 arch/sparc/kernel/sigutil_64.c 		err |= __get_user(t->rwbuf_stkptrs[i],
t                 213 arch/sparc/kernel/smp_64.c 	} t[NUM_ROUNDS];
t                 238 arch/sparc/kernel/smp_64.c 			t[i].rt = rt;
t                 239 arch/sparc/kernel/smp_64.c 			t[i].master = master_time_stamp;
t                 240 arch/sparc/kernel/smp_64.c 			t[i].diff = delta;
t                 241 arch/sparc/kernel/smp_64.c 			t[i].lat = adjust_latency/4;
t                 250 arch/sparc/kernel/smp_64.c 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
t                1108 arch/sparc/kernel/smp_64.c 	struct tlb_pending_info *t = info;
t                1110 arch/sparc/kernel/smp_64.c 	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
t                  19 arch/sparc/kernel/stacktrace.c 	struct task_struct *t;
t                  32 arch/sparc/kernel/stacktrace.c 	t = tp->task;
t                  62 arch/sparc/kernel/stacktrace.c 				ret_stack = ftrace_graph_get_ret_stack(t,
t                2834 arch/sparc/kernel/traps_64.c void notrace init_cur_cpu_trap(struct thread_info *t)
t                2839 arch/sparc/kernel/traps_64.c 	p->thread = t;
t                 304 arch/sparc/kernel/uprobes.c bool arch_uprobe_xol_was_trapped(struct task_struct *t)
t                 807 arch/sparc/kernel/viohs.c static void vio_port_timer(struct timer_list *t)
t                 809 arch/sparc/kernel/viohs.c 	struct vio_driver_state *vio = from_timer(vio, t, timer);
t                  28 arch/sparc/lib/bitext.c int bit_map_string_get(struct bit_map *t, int len, int align)
t                  35 arch/sparc/lib/bitext.c 	if (t->num_colors) {
t                  38 arch/sparc/lib/bitext.c 		align = t->num_colors;
t                  47 arch/sparc/lib/bitext.c 	if (align < 0 || align >= t->size)
t                  49 arch/sparc/lib/bitext.c 	if (len <= 0 || len > t->size)
t                  53 arch/sparc/lib/bitext.c 	spin_lock(&t->lock);
t                  54 arch/sparc/lib/bitext.c 	if (len < t->last_size)
t                  55 arch/sparc/lib/bitext.c 		offset = t->first_free;
t                  57 arch/sparc/lib/bitext.c 		offset = t->last_off & ~align1;
t                  60 arch/sparc/lib/bitext.c 		off_new = find_next_zero_bit(t->map, t->size, offset);
t                  64 arch/sparc/lib/bitext.c 		if (offset >= t->size)
t                  66 arch/sparc/lib/bitext.c 		if (count + len > t->size) {
t                  67 arch/sparc/lib/bitext.c 			spin_unlock(&t->lock);
t                  70 arch/sparc/lib/bitext.c   t->size, t->used, offset, len, align, count);
t                  74 arch/sparc/lib/bitext.c 		if (offset + len > t->size) {
t                  75 arch/sparc/lib/bitext.c 			count += t->size - offset;
t                  81 arch/sparc/lib/bitext.c 		while (test_bit(offset + i, t->map) == 0) {
t                  84 arch/sparc/lib/bitext.c 				bitmap_set(t->map, offset, len);
t                  85 arch/sparc/lib/bitext.c 				if (offset == t->first_free)
t                  86 arch/sparc/lib/bitext.c 					t->first_free = find_next_zero_bit
t                  87 arch/sparc/lib/bitext.c 							(t->map, t->size,
t                  88 arch/sparc/lib/bitext.c 							 t->first_free + len);
t                  89 arch/sparc/lib/bitext.c 				if ((t->last_off = offset + len) >= t->size)
t                  90 arch/sparc/lib/bitext.c 					t->last_off = 0;
t                  91 arch/sparc/lib/bitext.c 				t->used += len;
t                  92 arch/sparc/lib/bitext.c 				t->last_size = len;
t                  93 arch/sparc/lib/bitext.c 				spin_unlock(&t->lock);
t                  98 arch/sparc/lib/bitext.c 		if ((offset += i + 1) >= t->size)
t                 103 arch/sparc/lib/bitext.c void bit_map_clear(struct bit_map *t, int offset, int len)
t                 107 arch/sparc/lib/bitext.c 	if (t->used < len)
t                 109 arch/sparc/lib/bitext.c 	spin_lock(&t->lock);
t                 111 arch/sparc/lib/bitext.c 		if (test_bit(offset + i, t->map) == 0)
t                 113 arch/sparc/lib/bitext.c 		__clear_bit(offset + i, t->map);
t                 115 arch/sparc/lib/bitext.c 	if (offset < t->first_free)
t                 116 arch/sparc/lib/bitext.c 		t->first_free = offset;
t                 117 arch/sparc/lib/bitext.c 	t->used -= len;
t                 118 arch/sparc/lib/bitext.c 	spin_unlock(&t->lock);
t                 121 arch/sparc/lib/bitext.c void bit_map_init(struct bit_map *t, unsigned long *map, int size)
t                 124 arch/sparc/lib/bitext.c 	memset(t, 0, sizeof *t);
t                 125 arch/sparc/lib/bitext.c 	spin_lock_init(&t->lock);
t                 126 arch/sparc/lib/bitext.c 	t->map = map;
t                 127 arch/sparc/lib/bitext.c 	t->size = size;
t                 278 arch/um/drivers/net_kern.c static void uml_net_user_timer_expire(struct timer_list *t)
t                 281 arch/um/drivers/net_kern.c 	struct uml_net_private *lp = from_timer(lp, t, tl);
t                1446 arch/um/drivers/vector_kern.c static void vector_timer_expire(struct timer_list *t)
t                1448 arch/um/drivers/vector_kern.c 	struct vector_private *vp = from_timer(vp, t, tl);
t                  62 arch/um/include/shared/kern_util.h extern int singlestepping(void *t);
t                  65 arch/um/include/shared/os.h 	unsigned int t : 1;	/* O_TRUNC */
t                  72 arch/um/include/shared/os.h 					  .t = 0, .a = 0, .e = 0, .cl = 0 })
t                 112 arch/um/include/shared/os.h 	flags.t = 1;
t                 379 arch/um/kernel/process.c int singlestepping(void * t)
t                 381 arch/um/kernel/process.c 	struct task_struct *task = t ? t : current;
t                 443 arch/um/kernel/process.c int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
t                  26 arch/um/kernel/reboot.c 		struct task_struct *t;
t                  28 arch/um/kernel/reboot.c 		t = find_lock_task_mm(p);
t                  29 arch/um/kernel/reboot.c 		if (!t)
t                  31 arch/um/kernel/reboot.c 		pid = t->mm->context.id.u.pid;
t                  32 arch/um/kernel/reboot.c 		task_unlock(t);
t                 192 arch/um/os-Linux/file.c 	if (flags.t)
t                  45 arch/um/os-Linux/time.c 	timer_t *t = &event_high_res_timer;
t                  47 arch/um/os-Linux/time.c 	if (timer_create(CLOCK_MONOTONIC, NULL, t) == -1)
t                  62 arch/unicore32/include/asm/elf.h int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
t                 252 arch/unicore32/kernel/process.c int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
t                 254 arch/unicore32/kernel/process.c 	elf_core_copy_regs(elfregs, task_pt_regs(t));
t                 775 arch/x86/crypto/camellia_glue.c 	u64 t = l;					\
t                 777 arch/x86/crypto/camellia_glue.c 	r = (r << bits) | (t >> (64 - bits));		\
t                  18 arch/x86/entry/vdso/vclock_gettime.c extern time_t __vdso_time(time_t *t);
t                  28 arch/x86/entry/vdso/vclock_gettime.c time_t __vdso_time(time_t *t)
t                  30 arch/x86/entry/vdso/vclock_gettime.c 	return __cvdso_time(t);
t                  33 arch/x86/entry/vdso/vclock_gettime.c time_t time(time_t *t)	__attribute__((weak, alias("__vdso_time")));
t                4236 arch/x86/events/intel/core.c EVENT_ATTR_STR(cycles-t,	cycles_t,	"event=0x3c,in_tx=1");
t                 598 arch/x86/events/intel/pt.c #define TOPA_ENTRY(t, i)				\
t                 600 arch/x86/events/intel/pt.c 		? &topa_to_page(t)->table[(t)->last]	\
t                 601 arch/x86/events/intel/pt.c 		: &topa_to_page(t)->table[(i)])
t                 602 arch/x86/events/intel/pt.c #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
t                 603 arch/x86/events/intel/pt.c #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
t                 867 arch/x86/events/intel/uncore_snb.c #define for_each_imc_pci_id(x, t) \
t                 868 arch/x86/events/intel/uncore_snb.c 	for (x = (t); (x)->pci_id; x++)
t                 123 arch/x86/include/asm/desc.h #define load_TLS(t, cpu)			native_load_tls(t, cpu)
t                 276 arch/x86/include/asm/desc.h static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
t                 282 arch/x86/include/asm/desc.h 		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
t                 172 arch/x86/include/asm/elf.h static inline void elf_common_init(struct thread_struct *t,
t                 180 arch/x86/include/asm/elf.h 	t->fsbase = t->gsbase = 0;
t                 181 arch/x86/include/asm/elf.h 	t->fsindex = t->gsindex = 0;
t                 182 arch/x86/include/asm/elf.h 	t->ds = t->es = ds;
t                 332 arch/x86/include/asm/mce.h extern const char *smca_get_long_name(enum smca_bank_types t);
t                  73 arch/x86/include/asm/msr.h #define msr_tracepoint_active(t) static_key_false(&(t).key)
t                  78 arch/x86/include/asm/msr.h #define msr_tracepoint_active(t) false
t                  48 arch/x86/include/asm/nmi.h #define register_nmi_handler(t, fn, fg, n, init...)	\
t                  55 arch/x86/include/asm/nmi.h 	__register_nmi_handler((t), &fn##_na);		\
t                 269 arch/x86/include/asm/paravirt.h static inline void load_TLS(struct thread_struct *t, unsigned cpu)
t                 271 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(cpu.load_tls, t, cpu);
t                 128 arch/x86/include/asm/paravirt_types.h 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
t                  88 arch/x86/include/asm/perf_event_p4.h 		u32 t = p4_config_unpack_escr((v));	\
t                  89 arch/x86/include/asm/perf_event_p4.h 		t = t &  P4_ESCR_EVENTMASK_MASK;	\
t                  90 arch/x86/include/asm/perf_event_p4.h 		t = t >> P4_ESCR_EVENTMASK_SHIFT;	\
t                  91 arch/x86/include/asm/perf_event_p4.h 		t;					\
t                  96 arch/x86/include/asm/perf_event_p4.h 		u32 t = p4_config_unpack_escr((v));	\
t                  97 arch/x86/include/asm/perf_event_p4.h 		t = t &  P4_ESCR_EVENT_MASK;		\
t                  98 arch/x86/include/asm/perf_event_p4.h 		t = t >> P4_ESCR_EVENT_SHIFT;		\
t                  99 arch/x86/include/asm/perf_event_p4.h 		t;					\
t                 171 arch/x86/include/asm/string_32.h #define memcpy(t, f, n)				\
t                 173 arch/x86/include/asm/string_32.h 	 ? __constant_memcpy3d((t), (f), (n))	\
t                 174 arch/x86/include/asm/string_32.h 	 : __memcpy3d((t), (f), (n)))
t                 182 arch/x86/include/asm/string_32.h #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
t                 181 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	t:1;				/* RO */
t                 782 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	t:1;				/* RO */
t                 821 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	t:1;				/* RO */
t                1293 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	t:1;				/* RO */
t                1340 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	t:1;				/* RO */
t                4209 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	t:1;				/* RO */
t                  54 arch/x86/include/asm/vm86.h #define free_vm86(t) do {				\
t                  55 arch/x86/include/asm/vm86.h 	struct thread_struct *__t = (t);		\
t                  88 arch/x86/include/asm/vm86.h #define free_vm86(t) do { } while(0)
t                 851 arch/x86/kernel/apm_32.c 		static unsigned long t;
t                 856 arch/x86/kernel/apm_32.c 		if (++t < 5) {
t                 858 arch/x86/kernel/apm_32.c 			t = jiffies;
t                 934 arch/x86/kernel/apm_32.c 			unsigned int t;
t                 936 arch/x86/kernel/apm_32.c 			t = jiffies;
t                 940 arch/x86/kernel/apm_32.c 				if (t != jiffies) {
t                1858 arch/x86/kernel/cpu/common.c 	struct tss_struct *t;
t                1866 arch/x86/kernel/cpu/common.c 	t = &per_cpu(cpu_tss_rw, cpu);
t                1904 arch/x86/kernel/cpu/common.c 	if (!t->x86_tss.ist[0]) {
t                1905 arch/x86/kernel/cpu/common.c 		t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
t                1906 arch/x86/kernel/cpu/common.c 		t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
t                1907 arch/x86/kernel/cpu/common.c 		t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
t                1908 arch/x86/kernel/cpu/common.c 		t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
t                1911 arch/x86/kernel/cpu/common.c 	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
t                1918 arch/x86/kernel/cpu/common.c 		t->io_bitmap[i] = ~0UL;
t                1953 arch/x86/kernel/cpu/common.c 	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
t                1988 arch/x86/kernel/cpu/common.c 	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
t                 102 arch/x86/kernel/cpu/mce/amd.c static const char *smca_get_name(enum smca_bank_types t)
t                 104 arch/x86/kernel/cpu/mce/amd.c 	if (t >= N_SMCA_BANK_TYPES)
t                 107 arch/x86/kernel/cpu/mce/amd.c 	return smca_names[t].name;
t                 110 arch/x86/kernel/cpu/mce/amd.c const char *smca_get_long_name(enum smca_bank_types t)
t                 112 arch/x86/kernel/cpu/mce/amd.c 	if (t >= N_SMCA_BANK_TYPES)
t                 115 arch/x86/kernel/cpu/mce/amd.c 	return smca_names[t].long_name;
t                 848 arch/x86/kernel/cpu/mce/core.c static int mce_timed_out(u64 *t, const char *msg)
t                 861 arch/x86/kernel/cpu/mce/core.c 	if ((s64)*t < SPINUNIT) {
t                 867 arch/x86/kernel/cpu/mce/core.c 	*t -= SPINUNIT;
t                1401 arch/x86/kernel/cpu/mce/core.c static void __start_timer(struct timer_list *t, unsigned long interval)
t                1408 arch/x86/kernel/cpu/mce/core.c 	if (!timer_pending(t) || time_before(when, t->expires))
t                1409 arch/x86/kernel/cpu/mce/core.c 		mod_timer(t, round_jiffies(when));
t                1414 arch/x86/kernel/cpu/mce/core.c static void mce_timer_fn(struct timer_list *t)
t                1419 arch/x86/kernel/cpu/mce/core.c 	WARN_ON(cpu_t != t);
t                1443 arch/x86/kernel/cpu/mce/core.c 	__start_timer(t, iv);
t                1451 arch/x86/kernel/cpu/mce/core.c 	struct timer_list *t = this_cpu_ptr(&mce_timer);
t                1454 arch/x86/kernel/cpu/mce/core.c 	__start_timer(t, interval);
t                1807 arch/x86/kernel/cpu/mce/core.c static void mce_start_timer(struct timer_list *t)
t                1815 arch/x86/kernel/cpu/mce/core.c 	__start_timer(t, iv);
t                1820 arch/x86/kernel/cpu/mce/core.c 	struct timer_list *t = this_cpu_ptr(&mce_timer);
t                1822 arch/x86/kernel/cpu/mce/core.c 	timer_setup(t, mce_timer_fn, TIMER_PINNED);
t                1827 arch/x86/kernel/cpu/mce/core.c 	struct timer_list *t = this_cpu_ptr(&mce_timer);
t                1829 arch/x86/kernel/cpu/mce/core.c 	timer_setup(t, mce_timer_fn, TIMER_PINNED);
t                1830 arch/x86/kernel/cpu/mce/core.c 	mce_start_timer(t);
t                2381 arch/x86/kernel/cpu/mce/core.c 	struct timer_list *t = this_cpu_ptr(&mce_timer);
t                2392 arch/x86/kernel/cpu/mce/core.c 	mce_start_timer(t);
t                2398 arch/x86/kernel/cpu/mce/core.c 	struct timer_list *t = this_cpu_ptr(&mce_timer);
t                2401 arch/x86/kernel/cpu/mce/core.c 	del_timer_sync(t);
t                  32 arch/x86/kernel/cpu/mce/genpool.c static bool is_duplicate_mce_record(struct mce_evt_llist *t, struct mce_evt_llist *l)
t                  37 arch/x86/kernel/cpu/mce/genpool.c 	m1 = &t->mce;
t                  59 arch/x86/kernel/cpu/mce/genpool.c 	struct mce_evt_llist *node, *t;
t                  66 arch/x86/kernel/cpu/mce/genpool.c 	llist_for_each_entry_safe(node, t, head, llnode) {
t                  67 arch/x86/kernel/cpu/mce/genpool.c 		if (!is_duplicate_mce_record(node, t))
t                 529 arch/x86/kernel/cpu/microcode/core.c static int __wait_for_cpus(atomic_t *t, long long timeout)
t                 533 arch/x86/kernel/cpu/microcode/core.c 	atomic_inc(t);
t                 535 arch/x86/kernel/cpu/microcode/core.c 	while (atomic_read(t) < all_cpus) {
t                 538 arch/x86/kernel/cpu/microcode/core.c 				all_cpus - atomic_read(t));
t                 599 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *p, *t;
t                 605 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_process_thread(p, t) {
t                 606 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
t                 607 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		    (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
t                 701 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *p, *t;
t                 704 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_process_thread(p, t) {
t                 705 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
t                 706 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		    (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
t                 707 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			seq_printf(s, "%d\n", t->pid);
t                2162 arch/x86/kernel/cpu/resctrl/rdtgroup.c static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
t                2165 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		(r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
t                2168 arch/x86/kernel/cpu/resctrl/rdtgroup.c static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
t                2171 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		(r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
t                2185 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *p, *t;
t                2188 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_process_thread(p, t) {
t                2189 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		if (!from || is_closid_match(t, from) ||
t                2190 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		    is_rmid_match(t, from)) {
t                2191 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			t->closid = to->closid;
t                2192 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			t->rmid = to->mon.rmid;
t                2204 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			if (mask && t->on_cpu)
t                2205 arch/x86/kernel/cpu/resctrl/rdtgroup.c 				cpumask_set_cpu(task_cpu(t), mask);
t                  37 arch/x86/kernel/doublefault.c 			struct x86_hw_tss *t = (struct x86_hw_tss *)tss;
t                  40 arch/x86/kernel/doublefault.c 			       t->ip, t->sp);
t                  43 arch/x86/kernel/doublefault.c 				t->ax, t->bx, t->cx, t->dx);
t                  45 arch/x86/kernel/doublefault.c 				t->si, t->di);
t                 400 arch/x86/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
t                 403 arch/x86/kernel/hw_breakpoint.c 		unregister_hw_breakpoint(t->ptrace_bps[i]);
t                 404 arch/x86/kernel/hw_breakpoint.c 		t->ptrace_bps[i] = NULL;
t                 407 arch/x86/kernel/hw_breakpoint.c 	t->debugreg6 = 0;
t                 408 arch/x86/kernel/hw_breakpoint.c 	t->ptrace_dr7 = 0;
t                 218 arch/x86/kernel/idt.c idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys)
t                 222 arch/x86/kernel/idt.c 	for (; size > 0; t++, size--) {
t                 223 arch/x86/kernel/idt.c 		idt_init_desc(&desc, t);
t                 224 arch/x86/kernel/idt.c 		write_idt_entry(idt, t->vector, &desc);
t                 226 arch/x86/kernel/idt.c 			set_bit(t->vector, system_vectors);
t                  29 arch/x86/kernel/ioport.c 	struct thread_struct *t = &current->thread;
t                  44 arch/x86/kernel/ioport.c 	if (!t->io_bitmap_ptr) {
t                  51 arch/x86/kernel/ioport.c 		t->io_bitmap_ptr = bitmap;
t                  75 arch/x86/kernel/ioport.c 		bitmap_clear(t->io_bitmap_ptr, from, num);
t                  77 arch/x86/kernel/ioport.c 		bitmap_set(t->io_bitmap_ptr, from, num);
t                  85 arch/x86/kernel/ioport.c 		if (t->io_bitmap_ptr[i] != ~0UL)
t                  89 arch/x86/kernel/ioport.c 	bytes_updated = max(bytes, t->io_bitmap_max);
t                  91 arch/x86/kernel/ioport.c 	t->io_bitmap_max = bytes;
t                  94 arch/x86/kernel/ioport.c 	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
t                 119 arch/x86/kernel/ioport.c 	struct thread_struct *t = &current->thread;
t                 125 arch/x86/kernel/ioport.c 	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
t                 137 arch/x86/kernel/ioport.c 	t->iopl = level << X86_EFLAGS_IOPL_BIT;
t                 138 arch/x86/kernel/ioport.c 	set_iopl_mask(t->iopl);
t                 874 arch/x86/kernel/pci-calgary_64.c static void calgary_watchdog(struct timer_list *t)
t                 876 arch/x86/kernel/pci-calgary_64.c 	struct iommu_table *tbl = from_timer(tbl, t, watchdog_timer);
t                 112 arch/x86/kernel/process.c 	struct thread_struct *t = &tsk->thread;
t                 113 arch/x86/kernel/process.c 	unsigned long *bp = t->io_bitmap_ptr;
t                 114 arch/x86/kernel/process.c 	struct fpu *fpu = &t->fpu;
t                 119 arch/x86/kernel/process.c 		t->io_bitmap_ptr = NULL;
t                 124 arch/x86/kernel/process.c 		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
t                 125 arch/x86/kernel/process.c 		t->io_bitmap_max = 0;
t                 130 arch/x86/kernel/process.c 	free_vm86(t);
t                 637 arch/x86/kernel/ptrace.c 	struct thread_struct *t = &tsk->thread;
t                 638 arch/x86/kernel/ptrace.c 	struct perf_event *bp = t->ptrace_bps[nr];
t                 659 arch/x86/kernel/ptrace.c 			t->ptrace_bps[nr] = bp;
t                  40 arch/x86/kernel/tce_64.c 	u64 t;
t                  43 arch/x86/kernel/tce_64.c 	t = (1 << TCE_READ_SHIFT);
t                  45 arch/x86/kernel/tce_64.c 		t |= (1 << TCE_WRITE_SHIFT);
t                  51 arch/x86/kernel/tce_64.c 		t &= ~TCE_RPN_MASK;
t                  52 arch/x86/kernel/tce_64.c 		t |= (rpn << TCE_RPN_SHIFT);
t                  54 arch/x86/kernel/tce_64.c 		*tp = cpu_to_be64(t);
t                  23 arch/x86/kernel/tls.c 	struct thread_struct *t = &current->thread;
t                  27 arch/x86/kernel/tls.c 		if (desc_empty(&t->tls_array[idx]))
t                  87 arch/x86/kernel/tls.c 	struct thread_struct *t = &p->thread;
t                  88 arch/x86/kernel/tls.c 	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
t                 105 arch/x86/kernel/tls.c 	if (t == &current->thread)
t                 106 arch/x86/kernel/tls.c 		load_TLS(t, cpu);
t                 251 arch/x86/kernel/tls.c 	struct thread_struct *t = &target->thread;
t                 253 arch/x86/kernel/tls.c 	while (n > 0 && desc_empty(&t->tls_array[n - 1]))
t                 944 arch/x86/kernel/uprobes.c bool arch_uprobe_xol_was_trapped(struct task_struct *t)
t                 946 arch/x86/kernel/uprobes.c 	if (t->thread.trap_nr != UPROBE_TRAP_NR)
t                 535 arch/x86/kvm/cpuid.c 		int t, times = entry->eax & 0xff;
t                 538 arch/x86/kvm/cpuid.c 		for (t = 1; t < times; ++t) {
t                 542 arch/x86/kvm/cpuid.c 			do_host_cpuid(&entry[t], function, 0);
t                 684 arch/x86/kvm/cpuid.c 		int t, times = entry->eax;
t                 689 arch/x86/kvm/cpuid.c 		for (t = 1; t <= times; ++t) {
t                 692 arch/x86/kvm/cpuid.c 			do_host_cpuid(&entry[t], function, t);
t                 118 arch/x86/kvm/i8254.c 	s64 d, t;
t                 121 arch/x86/kvm/i8254.c 	t = kpit_elapsed(pit, c, channel);
t                 122 arch/x86/kvm/i8254.c 	d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
t                 145 arch/x86/kvm/i8254.c 	s64 d, t;
t                 148 arch/x86/kvm/i8254.c 	t = kpit_elapsed(pit, c, channel);
t                 149 arch/x86/kvm/i8254.c 	d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
t                  49 arch/x86/kvm/mtrr.c static bool valid_mtrr_type(unsigned t)
t                  51 arch/x86/kvm/mtrr.c 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
t                  31 arch/x86/kvm/tss.h 	u16 t;
t                2117 arch/x86/kvm/x86.c static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
t                2131 arch/x86/kvm/x86.c 	*t = ns;
t                5819 arch/x86/kvm/x86.c #define CMPXCHG_TYPE(t, ptr, old, new) \
t                5820 arch/x86/kvm/x86.c 	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
t                  17 arch/x86/lib/insn.c #define validate_next(t, insn, n)	\
t                  18 arch/x86/lib/insn.c 	((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
t                  20 arch/x86/lib/insn.c #define __get_next(t, insn)	\
t                  21 arch/x86/lib/insn.c 	({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
t                  23 arch/x86/lib/insn.c #define __peek_nbyte_next(t, insn, n)	\
t                  24 arch/x86/lib/insn.c 	({ t r = *(t*)((insn)->next_byte + n); r; })
t                  26 arch/x86/lib/insn.c #define get_next(t, insn)	\
t                  27 arch/x86/lib/insn.c 	({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
t                  29 arch/x86/lib/insn.c #define peek_nbyte_next(t, insn, n)	\
t                  30 arch/x86/lib/insn.c 	({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
t                  32 arch/x86/lib/insn.c #define peek_next(t, insn)	peek_nbyte_next(t, insn, 0)
t                 133 arch/x86/math-emu/fpu_aux.c 	FPU_REG t;
t                 164 arch/x86/math-emu/fpu_aux.c 	reg_copy(st0_ptr, &t);
t                 166 arch/x86/math-emu/fpu_aux.c 	reg_copy(&t, sti_ptr);
t                 755 arch/x86/math-emu/reg_ld_str.c 	FPU_REG t;
t                 771 arch/x86/math-emu/reg_ld_str.c 	reg_copy(st0_ptr, &t);
t                 772 arch/x86/math-emu/reg_ld_str.c 	precision_loss = FPU_round_to_int(&t, st0_tag);
t                 773 arch/x86/math-emu/reg_ld_str.c 	((long *)&tll)[0] = t.sigl;
t                 774 arch/x86/math-emu/reg_ld_str.c 	((long *)&tll)[1] = t.sigh;
t                 776 arch/x86/math-emu/reg_ld_str.c 	    ((t.sigh & 0x80000000) &&
t                 777 arch/x86/math-emu/reg_ld_str.c 	     !((t.sigh == 0x80000000) && (t.sigl == 0) && signnegative(&t)))) {
t                 789 arch/x86/math-emu/reg_ld_str.c 		if (signnegative(&t))
t                 805 arch/x86/math-emu/reg_ld_str.c 	FPU_REG t;
t                 820 arch/x86/math-emu/reg_ld_str.c 	reg_copy(st0_ptr, &t);
t                 821 arch/x86/math-emu/reg_ld_str.c 	precision_loss = FPU_round_to_int(&t, st0_tag);
t                 822 arch/x86/math-emu/reg_ld_str.c 	if (t.sigh ||
t                 823 arch/x86/math-emu/reg_ld_str.c 	    ((t.sigl & 0x80000000) &&
t                 824 arch/x86/math-emu/reg_ld_str.c 	     !((t.sigl == 0x80000000) && signnegative(&t)))) {
t                 830 arch/x86/math-emu/reg_ld_str.c 			t.sigl = 0x80000000;
t                 836 arch/x86/math-emu/reg_ld_str.c 		if (signnegative(&t))
t                 837 arch/x86/math-emu/reg_ld_str.c 			t.sigl = -(long)t.sigl;
t                 842 arch/x86/math-emu/reg_ld_str.c 	FPU_put_user(t.sigl, (unsigned long __user *)d);
t                 851 arch/x86/math-emu/reg_ld_str.c 	FPU_REG t;
t                 866 arch/x86/math-emu/reg_ld_str.c 	reg_copy(st0_ptr, &t);
t                 867 arch/x86/math-emu/reg_ld_str.c 	precision_loss = FPU_round_to_int(&t, st0_tag);
t                 868 arch/x86/math-emu/reg_ld_str.c 	if (t.sigh ||
t                 869 arch/x86/math-emu/reg_ld_str.c 	    ((t.sigl & 0xffff8000) &&
t                 870 arch/x86/math-emu/reg_ld_str.c 	     !((t.sigl == 0x8000) && signnegative(&t)))) {
t                 876 arch/x86/math-emu/reg_ld_str.c 			t.sigl = 0x8000;
t                 882 arch/x86/math-emu/reg_ld_str.c 		if (signnegative(&t))
t                 883 arch/x86/math-emu/reg_ld_str.c 			t.sigl = -t.sigl;
t                 888 arch/x86/math-emu/reg_ld_str.c 	FPU_put_user((short)t.sigl, d);
t                 897 arch/x86/math-emu/reg_ld_str.c 	FPU_REG t;
t                 915 arch/x86/math-emu/reg_ld_str.c 	reg_copy(st0_ptr, &t);
t                 916 arch/x86/math-emu/reg_ld_str.c 	precision_loss = FPU_round_to_int(&t, st0_tag);
t                 917 arch/x86/math-emu/reg_ld_str.c 	ll = significand(&t);
t                 920 arch/x86/math-emu/reg_ld_str.c 	if ((t.sigh > 0x0de0b6b3) ||
t                 921 arch/x86/math-emu/reg_ld_str.c 	    ((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff))) {
t                 217 arch/x86/platform/uv/uv_time.c 	u64 *t = &head->cpu[bcpu].expires;
t                 224 arch/x86/platform/uv/uv_time.c 	*t = expires;
t                 231 arch/x86/platform/uv/uv_time.c 			*t = ULLONG_MAX;
t                 253 arch/x86/platform/uv/uv_time.c 	u64 *t = &head->cpu[bcpu].expires;
t                 259 arch/x86/platform/uv/uv_time.c 	if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
t                 263 arch/x86/platform/uv/uv_time.c 		*t = ULLONG_MAX;
t                 204 arch/x86/um/asm/elf.h extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
t                 206 arch/x86/um/asm/elf.h #define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
t                  31 arch/x86/um/asm/processor.h #define task_pt_regs(t) (&(t)->thread.regs)
t                 104 arch/x86/um/signal.c 		unsigned long __user *t = (unsigned long __user *)to;
t                 107 arch/x86/um/signal.c 		if (__put_user(*f, t) ||
t                 108 arch/x86/um/signal.c 				__put_user(*(f + 1), t + 1) ||
t                 138 arch/x86/um/signal.c 		unsigned long *t = (unsigned long *)to;
t                 141 arch/x86/um/signal.c 		if (__get_user(*t, f) ||
t                 142 arch/x86/um/signal.c 		    __get_user(*(t + 1), f + 1) ||
t                  65 arch/x86/um/tls_32.c 	struct thread_struct *t = &task->thread;
t                  68 arch/x86/um/tls_32.c 	if (!t->arch.tls_array)
t                  72 arch/x86/um/tls_32.c 		if (!t->arch.tls_array[idx].present)
t                 206 arch/x86/um/tls_32.c 	struct thread_struct *t = &task->thread;
t                 211 arch/x86/um/tls_32.c 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
t                 212 arch/x86/um/tls_32.c 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
t                 213 arch/x86/um/tls_32.c 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
t                 241 arch/x86/um/tls_32.c 	struct thread_struct *t = &task->thread;
t                 243 arch/x86/um/tls_32.c 	if (!t->arch.tls_array)
t                 249 arch/x86/um/tls_32.c 	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
t                 252 arch/x86/um/tls_32.c 	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
t                 260 arch/x86/um/tls_32.c 		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
t                   9 arch/x86/um/tls_64.c int arch_set_tls(struct task_struct *t, unsigned long tls)
t                  15 arch/x86/um/tls_64.c 	t->thread.arch.fs = tls;
t                  40 arch/x86/um/vdso/um_vdso.c time_t __vdso_time(time_t *t)
t                  46 arch/x86/um/vdso/um_vdso.c 		: "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
t                  50 arch/x86/um/vdso/um_vdso.c time_t time(time_t *t) __attribute__((weak, alias("__vdso_time")));
t                 516 arch/x86/xen/enlighten_pv.c static void load_TLS_descriptor(struct thread_struct *t,
t                 524 arch/x86/xen/enlighten_pv.c 	if (desc_equal(shadow, &t->tls_array[i]))
t                 527 arch/x86/xen/enlighten_pv.c 	*shadow = t->tls_array[i];
t                 533 arch/x86/xen/enlighten_pv.c 	MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
t                 536 arch/x86/xen/enlighten_pv.c static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
t                 566 arch/x86/xen/enlighten_pv.c 	load_TLS_descriptor(t, cpu, 0);
t                 567 arch/x86/xen/enlighten_pv.c 	load_TLS_descriptor(t, cpu, 1);
t                 568 arch/x86/xen/enlighten_pv.c 	load_TLS_descriptor(t, cpu, 2);
t                 384 arch/x86/xen/time.c 	struct vcpu_register_time_memory_area t;
t                 392 arch/x86/xen/time.c 	t.addr.v = NULL;
t                 394 arch/x86/xen/time.c 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
t                 404 arch/x86/xen/time.c 	struct vcpu_register_time_memory_area t;
t                 410 arch/x86/xen/time.c 	t.addr.v = &xen_clock->pvti;
t                 412 arch/x86/xen/time.c 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
t                 434 arch/x86/xen/time.c 	struct vcpu_register_time_memory_area t;
t                 442 arch/x86/xen/time.c 	t.addr.v = &ti->pvti;
t                 444 arch/x86/xen/time.c 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
t                 457 arch/x86/xen/time.c 		t.addr.v = NULL;
t                 459 arch/x86/xen/time.c 					 0, &t);
t                  64 arch/xtensa/include/asm/asm-uaccess.h 	GET_CURRENT(\at,\sp)
t                  65 arch/xtensa/include/asm/asm-uaccess.h 	s32i	\av, \at, THREAD_CURRENT_DS
t                  92 arch/xtensa/include/asm/asm-uaccess.h 	get_fs	\at, \sp
t                  93 arch/xtensa/include/asm/asm-uaccess.h 	beqz	\at, \success
t                 120 arch/xtensa/include/asm/asm-uaccess.h 	movi	\at, __XTENSA_UL_CONST(TASK_SIZE)
t                 121 arch/xtensa/include/asm/asm-uaccess.h 	bgeu	\as, \at, \error
t                 122 arch/xtensa/include/asm/asm-uaccess.h 	sub	\at, \at, \as
t                 123 arch/xtensa/include/asm/asm-uaccess.h 	bgeu	\aa, \at, \error
t                 150 arch/xtensa/include/asm/asm-uaccess.h 	kernel_ok  \at, \sp, .Laccess_ok_\@
t                 151 arch/xtensa/include/asm/asm-uaccess.h 	user_ok    \aa, \as, \at, \error
t                  52 arch/xtensa/include/asm/asmmacro.h 		movi	\at, ((\size + \incr - 1) / (\incr))
t                  53 arch/xtensa/include/asm/asmmacro.h 		loop	\at, 99f
t                  55 arch/xtensa/include/asm/asmmacro.h 		addi	\at, \ar, \size
t                  69 arch/xtensa/include/asm/asmmacro.h 			addi	\at, \as, (1 << \incr_log2) - 1
t                  71 arch/xtensa/include/asm/asmmacro.h 				extui	\at, \at, \incr_log2, \mask_log2
t                  73 arch/xtensa/include/asm/asmmacro.h 				srli	\at, \at, \incr_log2
t                  76 arch/xtensa/include/asm/asmmacro.h 		loop\cond	\at, 99f
t                  79 arch/xtensa/include/asm/asmmacro.h 			extui	\at, \as, \incr_log2, \mask_log2
t                  82 arch/xtensa/include/asm/asmmacro.h 				srli	\at, \as, \incr_log2
t                  86 arch/xtensa/include/asm/asmmacro.h 			b\ncond	\at, 99f
t                  90 arch/xtensa/include/asm/asmmacro.h 			slli	\at, \at, \incr_log2
t                  91 arch/xtensa/include/asm/asmmacro.h 			add	\at, \ar, \at
t                  93 arch/xtensa/include/asm/asmmacro.h 			add	\at, \ar, \as
t                 107 arch/xtensa/include/asm/asmmacro.h 		sub	\at, \as, \ar
t                 109 arch/xtensa/include/asm/asmmacro.h 			addi	\at, \at, (1 << \incr_log2) - 1
t                 110 arch/xtensa/include/asm/asmmacro.h 			srli	\at, \at, \incr_log2
t                 112 arch/xtensa/include/asm/asmmacro.h 		loop	\at, 99f
t                  45 arch/xtensa/include/asm/cacheasm.h 	__loopi	\ar, \at, \size, (_reps << (\line_width))
t                  51 arch/xtensa/include/asm/cacheasm.h 	__endla	\ar, \at, _reps << (\line_width)
t                  59 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
t                  66 arch/xtensa/include/asm/cacheasm.h 	extui	\at, \ar, 0, \line_width
t                  67 arch/xtensa/include/asm/cacheasm.h 	add	\as, \as, \at
t                  69 arch/xtensa/include/asm/cacheasm.h 	__loops	\ar, \as, \at, \line_width
t                  71 arch/xtensa/include/asm/cacheasm.h 	__endla	\ar, \at, (1 << (\line_width))
t                  78 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
t                  86 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
t                  96 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
t                 106 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
t                 116 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
t                 126 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
t                 136 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
t                 147 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
t                 156 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
t                 165 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
t                 174 arch/xtensa/include/asm/cacheasm.h 	__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
t                 104 arch/xtensa/include/asm/coprocessor.h #define __REG0(cc,abi,t,name,s,a)	__REG0_ ## cc (abi,name)
t                 105 arch/xtensa/include/asm/coprocessor.h #define __REG1(cc,abi,t,name,s,a)	__REG1_ ## cc (name)
t                 236 arch/xtensa/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
t                 239 arch/xtensa/kernel/hw_breakpoint.c 		if (t->ptrace_bp[i]) {
t                 240 arch/xtensa/kernel/hw_breakpoint.c 			unregister_hw_breakpoint(t->ptrace_bp[i]);
t                 241 arch/xtensa/kernel/hw_breakpoint.c 			t->ptrace_bp[i] = NULL;
t                 245 arch/xtensa/kernel/hw_breakpoint.c 		if (t->ptrace_wp[i]) {
t                 246 arch/xtensa/kernel/hw_breakpoint.c 			unregister_hw_breakpoint(t->ptrace_wp[i]);
t                 247 arch/xtensa/kernel/hw_breakpoint.c 			t->ptrace_wp[i] = NULL;
t                 152 arch/xtensa/kernel/setup.c 	tagtable_t *t;
t                 166 arch/xtensa/kernel/setup.c 		for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
t                 167 arch/xtensa/kernel/setup.c 			if (tag->id == t->tag) {
t                 168 arch/xtensa/kernel/setup.c 				t->parse(tag);
t                 172 arch/xtensa/kernel/setup.c 		if (t == &__tagtable_end)
t                 268 arch/xtensa/mm/cache.c 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
t                 269 arch/xtensa/mm/cache.c 		__flush_invalidate_dcache_page_alias(t, phys);
t                 282 arch/xtensa/mm/cache.c 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
t                 286 arch/xtensa/mm/cache.c 			__invalidate_icache_page_alias(t, phys);
t                 307 arch/xtensa/mm/cache.c 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
t                 308 arch/xtensa/mm/cache.c 		__flush_invalidate_dcache_page_alias(t, phys);
t                 349 arch/xtensa/platforms/iss/network.c static void iss_net_timer(struct timer_list *t)
t                 351 arch/xtensa/platforms/iss/network.c 	struct iss_net_private *lp = from_timer(lp, t, timer);
t                  81 arch/xtensa/platforms/xt2000/setup.c 	static int i=0, t = 0;
t                  83 arch/xtensa/platforms/xt2000/setup.c 	if (--t < 0)
t                  85 arch/xtensa/platforms/xt2000/setup.c 		t = 59;
t                 460 block/blk-core.c static void blk_rq_timed_out_timer(struct timer_list *t)
t                 462 block/blk-core.c 	struct request_queue *q = from_timer(q, t, timeout);
t                 656 block/blk-iolatency.c static void blkiolatency_timer_fn(struct timer_list *t)
t                 658 block/blk-iolatency.c 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
t                3252 block/blk-mq.c 	struct elevator_type *t = NULL;
t                3256 block/blk-mq.c 			t = qe->type;
t                3260 block/blk-mq.c 	if (!t)
t                3267 block/blk-mq.c 	elevator_switch_mq(q, t);
t                 470 block/blk-settings.c void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
t                 472 block/blk-settings.c 	blk_stack_limits(&t->limits, &b->limits, 0);
t                 497 block/blk-settings.c int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t                 502 block/blk-settings.c 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t                 503 block/blk-settings.c 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t                 504 block/blk-settings.c 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t                 505 block/blk-settings.c 	t->max_write_same_sectors = min(t->max_write_same_sectors,
t                 507 block/blk-settings.c 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
t                 509 block/blk-settings.c 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t                 511 block/blk-settings.c 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
t                 513 block/blk-settings.c 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
t                 516 block/blk-settings.c 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
t                 517 block/blk-settings.c 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
t                 519 block/blk-settings.c 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
t                 522 block/blk-settings.c 	t->max_segment_size = min_not_zero(t->max_segment_size,
t                 525 block/blk-settings.c 	t->misaligned |= b->misaligned;
t                 532 block/blk-settings.c 	if (t->alignment_offset != alignment) {
t                 534 block/blk-settings.c 		top = max(t->physical_block_size, t->io_min)
t                 535 block/blk-settings.c 			+ t->alignment_offset;
t                 540 block/blk-settings.c 			t->misaligned = 1;
t                 545 block/blk-settings.c 	t->logical_block_size = max(t->logical_block_size,
t                 548 block/blk-settings.c 	t->physical_block_size = max(t->physical_block_size,
t                 551 block/blk-settings.c 	t->io_min = max(t->io_min, b->io_min);
t                 552 block/blk-settings.c 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t                 555 block/blk-settings.c 	if (t->physical_block_size & (t->logical_block_size - 1)) {
t                 556 block/blk-settings.c 		t->physical_block_size = t->logical_block_size;
t                 557 block/blk-settings.c 		t->misaligned = 1;
t                 562 block/blk-settings.c 	if (t->io_min & (t->physical_block_size - 1)) {
t                 563 block/blk-settings.c 		t->io_min = t->physical_block_size;
t                 564 block/blk-settings.c 		t->misaligned = 1;
t                 569 block/blk-settings.c 	if (t->io_opt & (t->physical_block_size - 1)) {
t                 570 block/blk-settings.c 		t->io_opt = 0;
t                 571 block/blk-settings.c 		t->misaligned = 1;
t                 575 block/blk-settings.c 	t->raid_partial_stripes_expensive =
t                 576 block/blk-settings.c 		max(t->raid_partial_stripes_expensive,
t                 580 block/blk-settings.c 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
t                 581 block/blk-settings.c 		% max(t->physical_block_size, t->io_min);
t                 584 block/blk-settings.c 	if (t->alignment_offset & (t->logical_block_size - 1)) {
t                 585 block/blk-settings.c 		t->misaligned = 1;
t                 593 block/blk-settings.c 		if (t->discard_granularity != 0 &&
t                 594 block/blk-settings.c 		    t->discard_alignment != alignment) {
t                 595 block/blk-settings.c 			top = t->discard_granularity + t->discard_alignment;
t                 600 block/blk-settings.c 				t->discard_misaligned = 1;
t                 603 block/blk-settings.c 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
t                 605 block/blk-settings.c 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
t                 607 block/blk-settings.c 		t->discard_granularity = max(t->discard_granularity,
t                 609 block/blk-settings.c 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
t                 610 block/blk-settings.c 			t->discard_granularity;
t                 614 block/blk-settings.c 		t->chunk_sectors = min_not_zero(t->chunk_sectors,
t                 632 block/blk-settings.c int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
t                 639 block/blk-settings.c 	return blk_stack_limits(t, &bq->limits, start);
t                 656 block/blk-settings.c 	struct request_queue *t = disk->queue;
t                 658 block/blk-settings.c 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
t                 668 block/blk-settings.c 	t->backing_dev_info->io_pages =
t                 669 block/blk-settings.c 		t->limits.max_sectors >> (PAGE_SHIFT - 9);
t                  79 block/blk-stat.c static void blk_stat_timer_fn(struct timer_list *t)
t                  81 block/blk-stat.c 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
t                 225 block/blk-throttle.c static void throtl_pending_timer_fn(struct timer_list *t);
t                1235 block/blk-throttle.c static void throtl_pending_timer_fn(struct timer_list *t)
t                1237 block/blk-throttle.c 	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
t                2465 block/blk-throttle.c 	unsigned long t;
t                2471 block/blk-throttle.c 	t = msecs_to_jiffies(v);
t                2472 block/blk-throttle.c 	if (t == 0 || t > MAX_THROTL_SLICE)
t                2474 block/blk-throttle.c 	q->td->throtl_slice = t;
t                 276 block/kyber-iosched.c static void kyber_timer_fn(struct timer_list *t)
t                 278 block/kyber-iosched.c 	struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
t                  46 crypto/aegis.h 	const u32 *t = crypto_ft_tab[0];
t                  49 crypto/aegis.h 	d0 = t[s[ 0]] ^ rol32(t[s[ 5]], 8) ^ rol32(t[s[10]], 16) ^ rol32(t[s[15]], 24);
t                  50 crypto/aegis.h 	d1 = t[s[ 4]] ^ rol32(t[s[ 9]], 8) ^ rol32(t[s[14]], 16) ^ rol32(t[s[ 3]], 24);
t                  51 crypto/aegis.h 	d2 = t[s[ 8]] ^ rol32(t[s[13]], 8) ^ rol32(t[s[ 2]], 16) ^ rol32(t[s[ 7]], 24);
t                  52 crypto/aegis.h 	d3 = t[s[12]] ^ rol32(t[s[ 1]], 8) ^ rol32(t[s[ 6]], 16) ^ rol32(t[s[11]], 24);
t                 307 crypto/cast5_generic.c 	u32 l, r, t;
t                 329 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
t                 330 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
t                 331 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
t                 332 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
t                 333 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
t                 334 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
t                 335 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
t                 336 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
t                 337 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
t                 338 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
t                 339 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
t                 340 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
t                 342 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
t                 343 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
t                 344 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
t                 345 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
t                 364 crypto/cast5_generic.c 	u32 l, r, t;
t                 376 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
t                 377 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
t                 378 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
t                 379 crypto/cast5_generic.c 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
t                 381 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
t                 382 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
t                 383 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
t                 384 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
t                 385 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
t                 386 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
t                 387 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
t                 388 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
t                 389 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
t                 390 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
t                 391 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
t                 392 crypto/cast5_generic.c 	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
t                 520 crypto/ecc.c   	u64 t[ECC_MAX_DIGITS * 2];
t                 525 crypto/ecc.c   		vli_umult(t, r + ndigits, c, ndigits);
t                 527 crypto/ecc.c   		vli_add(r, r, t, ndigits * 2);
t                 529 crypto/ecc.c   	vli_set(t, mod, ndigits);
t                 530 crypto/ecc.c   	vli_clear(t + ndigits, ndigits);
t                 531 crypto/ecc.c   	while (vli_cmp(r, t, ndigits * 2) >= 0)
t                 532 crypto/ecc.c   		vli_sub(r, r, t, ndigits * 2);
t                  60 crypto/fcrypt.c 	u32 t = lo & ((1 << n) - 1);				\
t                  62 crypto/fcrypt.c 	hi = (hi >> n) | (t << (24-n));				\
t                 262 crypto/gf128mul.c 	struct gf128mul_64k *t;
t                 265 crypto/gf128mul.c 	t = kzalloc(sizeof(*t), GFP_KERNEL);
t                 266 crypto/gf128mul.c 	if (!t)
t                 270 crypto/gf128mul.c 		t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
t                 271 crypto/gf128mul.c 		if (!t->t[i]) {
t                 272 crypto/gf128mul.c 			gf128mul_free_64k(t);
t                 273 crypto/gf128mul.c 			t = NULL;
t                 278 crypto/gf128mul.c 	t->t[0]->t[1] = *g;
t                 280 crypto/gf128mul.c 		gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
t                 285 crypto/gf128mul.c 				be128_xor(&t->t[i]->t[j + k],
t                 286 crypto/gf128mul.c 					  &t->t[i]->t[j], &t->t[i]->t[k]);
t                 292 crypto/gf128mul.c 			t->t[i]->t[j] = t->t[i - 1]->t[j];
t                 293 crypto/gf128mul.c 			gf128mul_x8_bbe(&t->t[i]->t[j]);
t                 298 crypto/gf128mul.c 	return t;
t                 302 crypto/gf128mul.c void gf128mul_free_64k(struct gf128mul_64k *t)
t                 307 crypto/gf128mul.c 		kzfree(t->t[i]);
t                 308 crypto/gf128mul.c 	kzfree(t);
t                 312 crypto/gf128mul.c void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
t                 318 crypto/gf128mul.c 	*r = t->t[0]->t[ap[15]];
t                 320 crypto/gf128mul.c 		be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
t                 343 crypto/gf128mul.c 	struct gf128mul_4k *t;
t                 346 crypto/gf128mul.c 	t = kzalloc(sizeof(*t), GFP_KERNEL);
t                 347 crypto/gf128mul.c 	if (!t)
t                 350 crypto/gf128mul.c 	t->t[128] = *g;
t                 352 crypto/gf128mul.c 		gf128mul_x_lle(&t->t[j], &t->t[j+j]);
t                 356 crypto/gf128mul.c 			be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
t                 359 crypto/gf128mul.c 	return t;
t                 365 crypto/gf128mul.c 	struct gf128mul_4k *t;
t                 368 crypto/gf128mul.c 	t = kzalloc(sizeof(*t), GFP_KERNEL);
t                 369 crypto/gf128mul.c 	if (!t)
t                 372 crypto/gf128mul.c 	t->t[1] = *g;
t                 374 crypto/gf128mul.c 		gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
t                 378 crypto/gf128mul.c 			be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
t                 381 crypto/gf128mul.c 	return t;
t                 385 crypto/gf128mul.c void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
t                 391 crypto/gf128mul.c 	*r = t->t[ap[15]];
t                 394 crypto/gf128mul.c 		be128_xor(r, r, &t->t[ap[i]]);
t                 400 crypto/gf128mul.c void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
t                 406 crypto/gf128mul.c 	*r = t->t[ap[0]];
t                 409 crypto/gf128mul.c 		be128_xor(r, r, &t->t[ap[i]]);
t                 129 crypto/keywrap.c 	u64 t = 6 * ((req->cryptlen) >> 3);
t                 163 crypto/keywrap.c 			block.A ^= cpu_to_be64(t);
t                 164 crypto/keywrap.c 			t--;
t                 198 crypto/keywrap.c 	u64 t = 1;
t                 240 crypto/keywrap.c 			block.A ^= cpu_to_be64(t);
t                 241 crypto/keywrap.c 			t++;
t                  53 crypto/lrw.c   	be128 t;
t                 148 crypto/lrw.c   	be128 t = rctx->t;
t                 179 crypto/lrw.c   			be128_xor(wdst++, &t, wsrc++);
t                 183 crypto/lrw.c   			be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
t                 236 crypto/lrw.c   	memcpy(&rctx->t, req->iv, sizeof(rctx->t));
t                 239 crypto/lrw.c   	gf128mul_64k_bbe(&rctx->t, ctx->table);
t                  49 crypto/sha3_generic.c 	u64 t[5], tt, bc[5];
t                  58 crypto/sha3_generic.c 	t[0] = bc[4] ^ rol64(bc[1], 1);
t                  59 crypto/sha3_generic.c 	t[1] = bc[0] ^ rol64(bc[2], 1);
t                  60 crypto/sha3_generic.c 	t[2] = bc[1] ^ rol64(bc[3], 1);
t                  61 crypto/sha3_generic.c 	t[3] = bc[2] ^ rol64(bc[4], 1);
t                  62 crypto/sha3_generic.c 	t[4] = bc[3] ^ rol64(bc[0], 1);
t                  64 crypto/sha3_generic.c 	st[0] ^= t[0];
t                  68 crypto/sha3_generic.c 	st[ 1] = rol64(st[ 6] ^ t[1], 44);
t                  69 crypto/sha3_generic.c 	st[ 6] = rol64(st[ 9] ^ t[4], 20);
t                  70 crypto/sha3_generic.c 	st[ 9] = rol64(st[22] ^ t[2], 61);
t                  71 crypto/sha3_generic.c 	st[22] = rol64(st[14] ^ t[4], 39);
t                  72 crypto/sha3_generic.c 	st[14] = rol64(st[20] ^ t[0], 18);
t                  73 crypto/sha3_generic.c 	st[20] = rol64(st[ 2] ^ t[2], 62);
t                  74 crypto/sha3_generic.c 	st[ 2] = rol64(st[12] ^ t[2], 43);
t                  75 crypto/sha3_generic.c 	st[12] = rol64(st[13] ^ t[3], 25);
t                  76 crypto/sha3_generic.c 	st[13] = rol64(st[19] ^ t[4],  8);
t                  77 crypto/sha3_generic.c 	st[19] = rol64(st[23] ^ t[3], 56);
t                  78 crypto/sha3_generic.c 	st[23] = rol64(st[15] ^ t[0], 41);
t                  79 crypto/sha3_generic.c 	st[15] = rol64(st[ 4] ^ t[4], 27);
t                  80 crypto/sha3_generic.c 	st[ 4] = rol64(st[24] ^ t[4], 14);
t                  81 crypto/sha3_generic.c 	st[24] = rol64(st[21] ^ t[1],  2);
t                  82 crypto/sha3_generic.c 	st[21] = rol64(st[ 8] ^ t[3], 55);
t                  83 crypto/sha3_generic.c 	st[ 8] = rol64(st[16] ^ t[1], 45);
t                  84 crypto/sha3_generic.c 	st[16] = rol64(st[ 5] ^ t[0], 36);
t                  85 crypto/sha3_generic.c 	st[ 5] = rol64(st[ 3] ^ t[3], 28);
t                  86 crypto/sha3_generic.c 	st[ 3] = rol64(st[18] ^ t[3], 21);
t                  87 crypto/sha3_generic.c 	st[18] = rol64(st[17] ^ t[2], 15);
t                  88 crypto/sha3_generic.c 	st[17] = rol64(st[11] ^ t[1], 10);
t                  89 crypto/sha3_generic.c 	st[11] = rol64(st[ 7] ^ t[2],  6);
t                  90 crypto/sha3_generic.c 	st[ 7] = rol64(st[10] ^ t[0],  3);
t                  91 crypto/sha3_generic.c 	st[10] = rol64(    tt ^ t[1],  1);
t                  54 crypto/sm3_generic.c static void sm3_expand(u32 *t, u32 *w, u32 *wt)
t                  61 crypto/sm3_generic.c 		w[i] = get_unaligned_be32((__u32 *)t + i);
t                  92 crypto/sm3_generic.c 		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
t                 119 crypto/sm4_generic.c 	u32 rk[4], t;
t                 130 crypto/sm4_generic.c 		t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
t                 131 crypto/sm4_generic.c 		ctx->rkey_enc[i] = t;
t                 135 crypto/sm4_generic.c 		rk[3] = t;
t                 174 crypto/sm4_generic.c 	u32 x[4], i, t;
t                 180 crypto/sm4_generic.c 		t = sm4_round(x, rk[i]);
t                 184 crypto/sm4_generic.c 		x[3] = t;
t                 556 crypto/tgr192.c 	u32 t, msb, lsb;
t                 561 crypto/tgr192.c 	t = tctx->nblocks;
t                 562 crypto/tgr192.c 	if ((lsb = t << 6) < t) { /* multiply by 64 to make a byte count */
t                 565 crypto/tgr192.c 	msb += t >> 26;
t                 566 crypto/tgr192.c 	t = lsb;
t                 567 crypto/tgr192.c 	if ((lsb = t + tctx->count) < t) {	/* add the count */
t                 570 crypto/tgr192.c 	t = lsb;
t                 571 crypto/tgr192.c 	if ((lsb = t << 3) < t)	{ /* multiply by 8 to make a bit count */
t                 574 crypto/tgr192.c 	msb += t >> 29;
t                 255 crypto/vmac.c  		u64 t1, t2, m1, m2, t;					\
t                 257 crypto/vmac.c  		rh = rl = t = 0;					\
t                 267 crypto/vmac.c  			t += (u64)(u32)m1 + (u32)m2;			\
t                 269 crypto/vmac.c  		ADD128(rh, rl, (t >> 32), (t << 32));			\
t                 286 crypto/vmac.c  	u64 p, q, t;
t                 295 crypto/vmac.c  	t = (u32)(p);
t                 301 crypto/vmac.c  	t |= ((u64)((u32)p & 0x7fffffff)) << 32;
t                 321 crypto/vmac.c  	*(u64 *)(ahi) = p + t;
t                 360 crypto/vmac.c  	u64 rh, rl, t, z = 0;
t                 363 crypto/vmac.c  	t = p1 >> 63;
t                 365 crypto/vmac.c  	ADD128(p1, p2, len, t);
t                 367 crypto/vmac.c  	t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
t                 368 crypto/vmac.c  	ADD128(p1, p2, z, t);
t                 372 crypto/vmac.c  	t = p1 + (p2 >> 32);
t                 373 crypto/vmac.c  	t += (t >> 32);
t                 374 crypto/vmac.c  	t += (u32)t > 0xfffffffeu;
t                 375 crypto/vmac.c  	p1 += (t >> 32);
t                 386 crypto/vmac.c  	t = rh >> 56;
t                 387 crypto/vmac.c  	ADD128(t, rl, z, rh);
t                 389 crypto/vmac.c  	ADD128(t, rl, z, rh);
t                 390 crypto/vmac.c  	t += t << 8;
t                 391 crypto/vmac.c  	rl += t;
t                 392 crypto/vmac.c  	rl += (0 - (rl < t)) & 257;
t                  34 crypto/xts.c   	le128 t;
t                  94 crypto/xts.c   	le128 t = rctx->t;
t                 117 crypto/xts.c   						rctx->t = t;
t                 118 crypto/xts.c   					gf128mul_x_ble(&t, &t);
t                 120 crypto/xts.c   				le128_xor(wdst, &t, wsrc);
t                 122 crypto/xts.c   					gf128mul_x_ble(&rctx->t, &t);
t                 127 crypto/xts.c   			le128_xor(wdst++, &t, wsrc++);
t                 128 crypto/xts.c   			gf128mul_x_ble(&t, &t);
t                 156 crypto/xts.c   		le128_xor(&b, &rctx->t, &b);
t                 181 crypto/xts.c   	le128_xor(b, &rctx->t, b);
t                 195 crypto/xts.c   	le128_xor(b, &rctx->t, b);
t                 256 crypto/xts.c   	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
t                 427 drivers/acpi/acpi_dbg.c 	struct task_struct *t;
t                 434 drivers/acpi/acpi_dbg.c 	t = kthread_create(acpi_aml_thread, NULL, "aml");
t                 435 drivers/acpi/acpi_dbg.c 	if (IS_ERR(t)) {
t                 437 drivers/acpi/acpi_dbg.c 		return PTR_ERR(t);
t                 441 drivers/acpi/acpi_dbg.c 	acpi_aml_io.thread = t;
t                 442 drivers/acpi/acpi_dbg.c 	acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
t                 443 drivers/acpi/acpi_dbg.c 	wake_up_process(t);
t                 377 drivers/acpi/acpica/acmacros.h #define ACPI_SET_DESCRIPTOR_TYPE(d, t)  (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = (t))
t                 429 drivers/acpi/acpica/acutils.h #define acpi_ut_create_internal_object(t) acpi_ut_create_internal_object_dbg (_acpi_module_name,__LINE__,_COMPONENT,t)
t                 172 drivers/acpi/apei/einj.c static int einj_timedout(u64 *t)
t                 174 drivers/acpi/apei/einj.c 	if ((s64)*t < SPIN_UNIT) {
t                 178 drivers/acpi/apei/einj.c 	*t -= SPIN_UNIT;
t                 100 drivers/acpi/apei/erst.c static int erst_timedout(u64 *t, u64 spin_unit)
t                 102 drivers/acpi/apei/erst.c 	if ((s64)*t < spin_unit) {
t                 106 drivers/acpi/apei/erst.c 	*t -= spin_unit;
t                 760 drivers/acpi/apei/ghes.c static void ghes_poll_func(struct timer_list *t)
t                 762 drivers/acpi/apei/ghes.c 	struct ghes *ghes = from_timer(ghes, t, timer);
t                 188 drivers/acpi/arm64/iort.c 	struct iort_its_msi_chip *its_msi_chip, *t;
t                 191 drivers/acpi/arm64/iort.c 	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
t                 631 drivers/acpi/ec.c 	struct transaction *t;
t                 644 drivers/acpi/ec.c 	t = ec->curr;
t                 649 drivers/acpi/ec.c 	if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
t                 657 drivers/acpi/ec.c 	if (!t)
t                 659 drivers/acpi/ec.c 	if (t->flags & ACPI_EC_COMMAND_POLL) {
t                 660 drivers/acpi/ec.c 		if (t->wlen > t->wi) {
t                 662 drivers/acpi/ec.c 				acpi_ec_write_data(ec, t->wdata[t->wi++]);
t                 665 drivers/acpi/ec.c 		} else if (t->rlen > t->ri) {
t                 667 drivers/acpi/ec.c 				t->rdata[t->ri++] = acpi_ec_read_data(ec);
t                 668 drivers/acpi/ec.c 				if (t->rlen == t->ri) {
t                 670 drivers/acpi/ec.c 					if (t->command == ACPI_EC_COMMAND_QUERY)
t                 677 drivers/acpi/ec.c 		} else if (t->wlen == t->wi &&
t                 686 drivers/acpi/ec.c 		    (t->command == ACPI_EC_COMMAND_QUERY)) {
t                 688 drivers/acpi/ec.c 			t->rdata[t->ri++] = 0x00;
t                 694 drivers/acpi/ec.c 			acpi_ec_write_cmd(ec, t->command);
t                 706 drivers/acpi/ec.c 		if (in_interrupt() && t) {
t                 707 drivers/acpi/ec.c 			if (t->irq_count < ec_storm_threshold)
t                 708 drivers/acpi/ec.c 				++t->irq_count;
t                 710 drivers/acpi/ec.c 			if (t->irq_count == ec_storm_threshold)
t                 786 drivers/acpi/ec.c 					struct transaction *t)
t                 800 drivers/acpi/ec.c 	ec->curr = t;
t                 801 drivers/acpi/ec.c 	ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
t                 808 drivers/acpi/ec.c 	if (t->irq_count == ec_storm_threshold)
t                 810 drivers/acpi/ec.c 	ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
t                 820 drivers/acpi/ec.c static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
t                 825 drivers/acpi/ec.c 	if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
t                 827 drivers/acpi/ec.c 	if (t->rdata)
t                 828 drivers/acpi/ec.c 		memset(t->rdata, 0, t->rlen);
t                 839 drivers/acpi/ec.c 	status = acpi_ec_transaction_unlocked(ec, t);
t                 851 drivers/acpi/ec.c 	struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
t                 855 drivers/acpi/ec.c 	return acpi_ec_transaction(ec, &t);
t                 860 drivers/acpi/ec.c 	struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
t                 865 drivers/acpi/ec.c 				acpi_ec_transaction(ec, &t) : 0;
t                 872 drivers/acpi/ec.c 	struct transaction t = {.command = ACPI_EC_COMMAND_READ,
t                 876 drivers/acpi/ec.c 	result = acpi_ec_transaction(ec, &t);
t                 884 drivers/acpi/ec.c 	struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
t                 888 drivers/acpi/ec.c 	return acpi_ec_transaction(ec, &t);
t                 926 drivers/acpi/ec.c 	struct transaction t = {.command = command,
t                 933 drivers/acpi/ec.c 	return acpi_ec_transaction(first_ec, &t);
t                1133 drivers/acpi/ec.c 	struct transaction *t;
t                1139 drivers/acpi/ec.c 	t = &q->transaction;
t                1140 drivers/acpi/ec.c 	t->command = ACPI_EC_COMMAND_QUERY;
t                1141 drivers/acpi/ec.c 	t->rdata = pval;
t                1142 drivers/acpi/ec.c 	t->rlen = 1;
t                1114 drivers/acpi/processor_idle.c 				  struct acpi_lpi_state *t)
t                1116 drivers/acpi/processor_idle.c 	curr_level->composite_states[curr_level->composite_states_size++] = t;
t                1124 drivers/acpi/processor_idle.c 	struct acpi_lpi_state *p, *t = curr_level->entries;
t                1127 drivers/acpi/processor_idle.c 	for (j = 0; j < state_count; j++, t++) {
t                1130 drivers/acpi/processor_idle.c 		if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
t                1143 drivers/acpi/processor_idle.c 			memcpy(flpi, t, sizeof(*t));
t                1151 drivers/acpi/processor_idle.c 			if (t->index <= p->enable_parent_state &&
t                1152 drivers/acpi/processor_idle.c 			    combine_lpi_states(p, t, flpi)) {
t                 394 drivers/acpi/resource.c 	int irq, p, t;
t                 411 drivers/acpi/resource.c 	if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
t                 412 drivers/acpi/resource.c 		u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
t                 417 drivers/acpi/resource.c 				   t ? "level" : "edge", p ? "low" : "high");
t                1779 drivers/android/binder.c 					   struct binder_transaction *t)
t                1783 drivers/android/binder.c 	BUG_ON(target_thread->transaction_stack != t);
t                1787 drivers/android/binder.c 	t->from = NULL;
t                1854 drivers/android/binder.c 		struct binder_transaction *t)
t                1858 drivers/android/binder.c 	spin_lock(&t->lock);
t                1859 drivers/android/binder.c 	from = t->from;
t                1862 drivers/android/binder.c 	spin_unlock(&t->lock);
t                1878 drivers/android/binder.c 		struct binder_transaction *t)
t                1879 drivers/android/binder.c 	__acquires(&t->from->proc->inner_lock)
t                1883 drivers/android/binder.c 	from = binder_get_txn_from(t);
t                1889 drivers/android/binder.c 	if (t->from) {
t                1890 drivers/android/binder.c 		BUG_ON(from != t->from);
t                1909 drivers/android/binder.c static void binder_free_txn_fixups(struct binder_transaction *t)
t                1913 drivers/android/binder.c 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
t                1920 drivers/android/binder.c static void binder_free_transaction(struct binder_transaction *t)
t                1922 drivers/android/binder.c 	struct binder_proc *target_proc = t->to_proc;
t                1926 drivers/android/binder.c 		if (t->buffer)
t                1927 drivers/android/binder.c 			t->buffer->transaction = NULL;
t                1934 drivers/android/binder.c 	binder_free_txn_fixups(t);
t                1935 drivers/android/binder.c 	kfree(t);
t                1939 drivers/android/binder.c static void binder_send_failed_reply(struct binder_transaction *t,
t                1945 drivers/android/binder.c 	BUG_ON(t->flags & TF_ONE_WAY);
t                1947 drivers/android/binder.c 		target_thread = binder_get_txn_from_and_acq_inner(t);
t                1951 drivers/android/binder.c 				      t->debug_id,
t                1955 drivers/android/binder.c 			binder_pop_transaction_ilocked(target_thread, t);
t                1974 drivers/android/binder.c 			binder_free_transaction(t);
t                1979 drivers/android/binder.c 		next = t->from_parent;
t                1983 drivers/android/binder.c 			     t->debug_id);
t                1985 drivers/android/binder.c 		binder_free_transaction(t);
t                1991 drivers/android/binder.c 		t = next;
t                1994 drivers/android/binder.c 			      t->debug_id);
t                2004 drivers/android/binder.c static void binder_cleanup_transaction(struct binder_transaction *t,
t                2008 drivers/android/binder.c 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
t                2009 drivers/android/binder.c 		binder_send_failed_reply(t, error_code);
t                2013 drivers/android/binder.c 			t->debug_id, reason);
t                2014 drivers/android/binder.c 		binder_free_transaction(t);
t                2437 drivers/android/binder.c 				   struct binder_transaction *t,
t                2442 drivers/android/binder.c 	struct binder_proc *target_proc = t->to_proc;
t                2479 drivers/android/binder.c 	trace_binder_transaction_node_to_ref(t, node, &rdata);
t                2490 drivers/android/binder.c 				   struct binder_transaction *t,
t                2494 drivers/android/binder.c 	struct binder_proc *target_proc = t->to_proc;
t                2530 drivers/android/binder.c 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
t                2549 drivers/android/binder.c 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
t                2563 drivers/android/binder.c 			       struct binder_transaction *t,
t                2568 drivers/android/binder.c 	struct binder_proc *target_proc = t->to_proc;
t                2577 drivers/android/binder.c 		target_allows_fd = t->buffer->target_node->accept_fds;
t                2612 drivers/android/binder.c 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
t                2613 drivers/android/binder.c 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
t                2627 drivers/android/binder.c 				     struct binder_transaction *t,
t                2634 drivers/android/binder.c 	struct binder_proc *target_proc = t->to_proc;
t                2656 drivers/android/binder.c 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
t                2669 drivers/android/binder.c 						    &fd, t->buffer,
t                2672 drivers/android/binder.c 			ret = binder_translate_fd(fd, offset, t, thread,
t                2680 drivers/android/binder.c static int binder_fixup_parent(struct binder_transaction *t,
t                2689 drivers/android/binder.c 	struct binder_buffer *b = t->buffer;
t                2691 drivers/android/binder.c 	struct binder_proc *target_proc = t->to_proc;
t                2753 drivers/android/binder.c static bool binder_proc_transaction(struct binder_transaction *t,
t                2757 drivers/android/binder.c 	struct binder_node *node = t->buffer->target_node;
t                2758 drivers/android/binder.c 	bool oneway = !!(t->flags & TF_ONE_WAY);
t                2784 drivers/android/binder.c 		binder_enqueue_thread_work_ilocked(thread, &t->work);
t                2786 drivers/android/binder.c 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
t                2788 drivers/android/binder.c 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
t                2847 drivers/android/binder.c 	struct binder_transaction *t;
t                3058 drivers/android/binder.c 	t = kzalloc(sizeof(*t), GFP_KERNEL);
t                3059 drivers/android/binder.c 	if (t == NULL) {
t                3065 drivers/android/binder.c 	INIT_LIST_HEAD(&t->fd_fixups);
t                3067 drivers/android/binder.c 	spin_lock_init(&t->lock);
t                3078 drivers/android/binder.c 	t->debug_id = t_debug_id;
t                3083 drivers/android/binder.c 			     proc->pid, thread->pid, t->debug_id,
t                3092 drivers/android/binder.c 			     proc->pid, thread->pid, t->debug_id,
t                3100 drivers/android/binder.c 		t->from = thread;
t                3102 drivers/android/binder.c 		t->from = NULL;
t                3103 drivers/android/binder.c 	t->sender_euid = task_euid(proc->tsk);
t                3104 drivers/android/binder.c 	t->to_proc = target_proc;
t                3105 drivers/android/binder.c 	t->to_thread = target_thread;
t                3106 drivers/android/binder.c 	t->code = tr->code;
t                3107 drivers/android/binder.c 	t->flags = tr->flags;
t                3108 drivers/android/binder.c 	t->priority = task_nice(current);
t                3133 drivers/android/binder.c 	trace_binder_transaction(reply, t, target_node);
t                3135 drivers/android/binder.c 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
t                3137 drivers/android/binder.c 		!reply && (t->flags & TF_ONE_WAY));
t                3138 drivers/android/binder.c 	if (IS_ERR(t->buffer)) {
t                3142 drivers/android/binder.c 		return_error_param = PTR_ERR(t->buffer);
t                3146 drivers/android/binder.c 		t->buffer = NULL;
t                3156 drivers/android/binder.c 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
t                3158 drivers/android/binder.c 						  t->buffer, buf_offset,
t                3161 drivers/android/binder.c 			t->security_ctx = 0;
t                3167 drivers/android/binder.c 	t->buffer->debug_id = t->debug_id;
t                3168 drivers/android/binder.c 	t->buffer->transaction = t;
t                3169 drivers/android/binder.c 	t->buffer->target_node = target_node;
t                3170 drivers/android/binder.c 	trace_binder_transaction_alloc_buf(t->buffer);
t                3174 drivers/android/binder.c 				t->buffer, 0,
t                3187 drivers/android/binder.c 				t->buffer,
t                3232 drivers/android/binder.c 						  t->buffer,
t                3240 drivers/android/binder.c 		object_size = binder_get_object(target_proc, t->buffer,
t                3247 drivers/android/binder.c 					  (u64)t->buffer->data_size);
t                3262 drivers/android/binder.c 			ret = binder_translate_binder(fp, t, thread);
t                3266 drivers/android/binder.c 							t->buffer,
t                3280 drivers/android/binder.c 			ret = binder_translate_handle(fp, t, thread);
t                3283 drivers/android/binder.c 							t->buffer,
t                3297 drivers/android/binder.c 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
t                3303 drivers/android/binder.c 							t->buffer,
t                3320 drivers/android/binder.c 				binder_validate_ptr(target_proc, t->buffer,
t                3333 drivers/android/binder.c 			if (!binder_validate_fixup(target_proc, t->buffer,
t                3346 drivers/android/binder.c 			ret = binder_translate_fd_array(fda, parent, t, thread,
t                3374 drivers/android/binder.c 						t->buffer,
t                3388 drivers/android/binder.c 				t->buffer->user_data + sg_buf_offset;
t                3393 drivers/android/binder.c 			ret = binder_fixup_parent(t, thread, bp,
t                3400 drivers/android/binder.c 							t->buffer,
t                3421 drivers/android/binder.c 	t->work.type = BINDER_WORK_TRANSACTION;
t                3430 drivers/android/binder.c 		BUG_ON(t->buffer->async_transaction != 0);
t                3432 drivers/android/binder.c 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
t                3436 drivers/android/binder.c 	} else if (!(t->flags & TF_ONE_WAY)) {
t                3437 drivers/android/binder.c 		BUG_ON(t->buffer->async_transaction != 0);
t                3447 drivers/android/binder.c 		t->need_reply = 1;
t                3448 drivers/android/binder.c 		t->from_parent = thread->transaction_stack;
t                3449 drivers/android/binder.c 		thread->transaction_stack = t;
t                3451 drivers/android/binder.c 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
t                3453 drivers/android/binder.c 			binder_pop_transaction_ilocked(thread, t);
t                3459 drivers/android/binder.c 		BUG_ON(t->buffer->async_transaction != 1);
t                3461 drivers/android/binder.c 		if (!binder_proc_transaction(t, target_proc, NULL))
t                3486 drivers/android/binder.c 	binder_free_txn_fixups(t);
t                3487 drivers/android/binder.c 	trace_binder_transaction_failed_buffer_release(t->buffer);
t                3488 drivers/android/binder.c 	binder_transaction_buffer_release(target_proc, t->buffer,
t                3493 drivers/android/binder.c 	t->buffer->transaction = NULL;
t                3494 drivers/android/binder.c 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
t                3503 drivers/android/binder.c 	kfree(t);
t                4125 drivers/android/binder.c 				  struct binder_transaction *t)
t                4130 drivers/android/binder.c 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
t                4136 drivers/android/binder.c 				     t->debug_id, fd);
t                4142 drivers/android/binder.c 			     t->debug_id, fd);
t                4143 drivers/android/binder.c 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
t                4146 drivers/android/binder.c 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
t                4153 drivers/android/binder.c 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
t                4161 drivers/android/binder.c 							    t->buffer,
t                4232 drivers/android/binder.c 		struct binder_transaction *t = NULL;
t                4262 drivers/android/binder.c 			t = container_of(w, struct binder_transaction, work);
t                4427 drivers/android/binder.c 		if (!t)
t                4430 drivers/android/binder.c 		BUG_ON(t->buffer == NULL);
t                4431 drivers/android/binder.c 		if (t->buffer->target_node) {
t                4432 drivers/android/binder.c 			struct binder_node *target_node = t->buffer->target_node;
t                4436 drivers/android/binder.c 			t->saved_priority = task_nice(current);
t                4437 drivers/android/binder.c 			if (t->priority < target_node->min_priority &&
t                4438 drivers/android/binder.c 			    !(t->flags & TF_ONE_WAY))
t                4439 drivers/android/binder.c 				binder_set_nice(t->priority);
t                4440 drivers/android/binder.c 			else if (!(t->flags & TF_ONE_WAY) ||
t                4441 drivers/android/binder.c 				 t->saved_priority > target_node->min_priority)
t                4449 drivers/android/binder.c 		trd->code = t->code;
t                4450 drivers/android/binder.c 		trd->flags = t->flags;
t                4451 drivers/android/binder.c 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t                4453 drivers/android/binder.c 		t_from = binder_get_txn_from(t);
t                4464 drivers/android/binder.c 		ret = binder_apply_fd_fixups(proc, t);
t                4466 drivers/android/binder.c 			struct binder_buffer *buffer = t->buffer;
t                4467 drivers/android/binder.c 			bool oneway = !!(t->flags & TF_ONE_WAY);
t                4468 drivers/android/binder.c 			int tid = t->debug_id;
t                4473 drivers/android/binder.c 			binder_cleanup_transaction(t, "fd fixups failed",
t                4492 drivers/android/binder.c 		trd->data_size = t->buffer->data_size;
t                4493 drivers/android/binder.c 		trd->offsets_size = t->buffer->offsets_size;
t                4494 drivers/android/binder.c 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
t                4496 drivers/android/binder.c 					ALIGN(t->buffer->data_size,
t                4499 drivers/android/binder.c 		tr.secctx = t->security_ctx;
t                4500 drivers/android/binder.c 		if (t->security_ctx) {
t                4508 drivers/android/binder.c 			binder_cleanup_transaction(t, "put_user failed",
t                4518 drivers/android/binder.c 			binder_cleanup_transaction(t, "copy_to_user failed",
t                4525 drivers/android/binder.c 		trace_binder_transaction_received(t);
t                4533 drivers/android/binder.c 			     t->debug_id, t_from ? t_from->proc->pid : 0,
t                4535 drivers/android/binder.c 			     t->buffer->data_size, t->buffer->offsets_size,
t                4541 drivers/android/binder.c 		t->buffer->allow_user_free = 1;
t                4542 drivers/android/binder.c 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
t                4544 drivers/android/binder.c 			t->to_parent = thread->transaction_stack;
t                4545 drivers/android/binder.c 			t->to_thread = thread;
t                4546 drivers/android/binder.c 			thread->transaction_stack = t;
t                4549 drivers/android/binder.c 			binder_free_transaction(t);
t                4589 drivers/android/binder.c 			struct binder_transaction *t;
t                4591 drivers/android/binder.c 			t = container_of(w, struct binder_transaction, work);
t                4593 drivers/android/binder.c 			binder_cleanup_transaction(t, "process died.",
t                4710 drivers/android/binder.c 	struct binder_transaction *t;
t                4729 drivers/android/binder.c 	t = thread->transaction_stack;
t                4730 drivers/android/binder.c 	if (t) {
t                4731 drivers/android/binder.c 		spin_lock(&t->lock);
t                4732 drivers/android/binder.c 		if (t->to_thread == thread)
t                4733 drivers/android/binder.c 			send_reply = t;
t                4735 drivers/android/binder.c 		__acquire(&t->lock);
t                4739 drivers/android/binder.c 	while (t) {
t                4740 drivers/android/binder.c 		last_t = t;
t                4745 drivers/android/binder.c 			     t->debug_id,
t                4746 drivers/android/binder.c 			     (t->to_thread == thread) ? "in" : "out");
t                4748 drivers/android/binder.c 		if (t->to_thread == thread) {
t                4749 drivers/android/binder.c 			t->to_proc = NULL;
t                4750 drivers/android/binder.c 			t->to_thread = NULL;
t                4751 drivers/android/binder.c 			if (t->buffer) {
t                4752 drivers/android/binder.c 				t->buffer->transaction = NULL;
t                4753 drivers/android/binder.c 				t->buffer = NULL;
t                4755 drivers/android/binder.c 			t = t->to_parent;
t                4756 drivers/android/binder.c 		} else if (t->from == thread) {
t                4757 drivers/android/binder.c 			t->from = NULL;
t                4758 drivers/android/binder.c 			t = t->from_parent;
t                4762 drivers/android/binder.c 		if (t)
t                4763 drivers/android/binder.c 			spin_lock(&t->lock);
t                4765 drivers/android/binder.c 			__acquire(&t->lock);
t                4768 drivers/android/binder.c 	__release(&t->lock);
t                5544 drivers/android/binder.c 					     struct binder_transaction *t)
t                5547 drivers/android/binder.c 	struct binder_buffer *buffer = t->buffer;
t                5549 drivers/android/binder.c 	spin_lock(&t->lock);
t                5550 drivers/android/binder.c 	to_proc = t->to_proc;
t                5553 drivers/android/binder.c 		   prefix, t->debug_id, t,
t                5554 drivers/android/binder.c 		   t->from ? t->from->proc->pid : 0,
t                5555 drivers/android/binder.c 		   t->from ? t->from->pid : 0,
t                5557 drivers/android/binder.c 		   t->to_thread ? t->to_thread->pid : 0,
t                5558 drivers/android/binder.c 		   t->code, t->flags, t->priority, t->need_reply);
t                5559 drivers/android/binder.c 	spin_unlock(&t->lock);
t                5588 drivers/android/binder.c 	struct binder_transaction *t;
t                5592 drivers/android/binder.c 		t = container_of(w, struct binder_transaction, work);
t                5594 drivers/android/binder.c 				m, proc, transaction_prefix, t);
t                5631 drivers/android/binder.c 	struct binder_transaction *t;
t                5641 drivers/android/binder.c 	t = thread->transaction_stack;
t                5642 drivers/android/binder.c 	while (t) {
t                5643 drivers/android/binder.c 		if (t->from == thread) {
t                5645 drivers/android/binder.c 					"    outgoing transaction", t);
t                5646 drivers/android/binder.c 			t = t->from_parent;
t                5647 drivers/android/binder.c 		} else if (t->to_thread == thread) {
t                5649 drivers/android/binder.c 						 "    incoming transaction", t);
t                5650 drivers/android/binder.c 			t = t->to_parent;
t                5653 drivers/android/binder.c 					"    bad transaction", t);
t                5654 drivers/android/binder.c 			t = NULL;
t                  99 drivers/android/binder_trace.h 	TP_PROTO(bool reply, struct binder_transaction *t,
t                 101 drivers/android/binder_trace.h 	TP_ARGS(reply, t, target_node),
t                 112 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 114 drivers/android/binder_trace.h 		__entry->to_proc = t->to_proc->pid;
t                 115 drivers/android/binder_trace.h 		__entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
t                 117 drivers/android/binder_trace.h 		__entry->code = t->code;
t                 118 drivers/android/binder_trace.h 		__entry->flags = t->flags;
t                 127 drivers/android/binder_trace.h 	TP_PROTO(struct binder_transaction *t),
t                 128 drivers/android/binder_trace.h 	TP_ARGS(t),
t                 134 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 140 drivers/android/binder_trace.h 	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
t                 142 drivers/android/binder_trace.h 	TP_ARGS(t, node, rdata),
t                 152 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 165 drivers/android/binder_trace.h 	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
t                 167 drivers/android/binder_trace.h 	TP_ARGS(t, node, rdata),
t                 177 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 190 drivers/android/binder_trace.h 	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
t                 193 drivers/android/binder_trace.h 	TP_ARGS(t, node, src_ref, dest_ref),
t                 204 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 218 drivers/android/binder_trace.h 	TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
t                 219 drivers/android/binder_trace.h 	TP_ARGS(t, fd, offset),
t                 227 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 236 drivers/android/binder_trace.h 	TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
t                 237 drivers/android/binder_trace.h 	TP_ARGS(t, fd, offset),
t                 245 drivers/android/binder_trace.h 		__entry->debug_id = t->debug_id;
t                 129 drivers/ata/ata_generic.c 	u16 t;
t                 138 drivers/ata/ata_generic.c 	pci_read_config_word(dev, 0x40, &t);
t                 139 drivers/ata/ata_generic.c 	if (t != 0)
t                 145 drivers/ata/ata_generic.c 	pci_read_config_word(dev, 0x40, &t);
t                 146 drivers/ata/ata_generic.c 	if (t) {
t                 972 drivers/ata/libahci.c static void ahci_sw_activity_blink(struct timer_list *t)
t                 974 drivers/ata/libahci.c 	struct ahci_em_priv *emp = from_timer(emp, t, timer);
t                3225 drivers/ata/libata-core.c static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
t                3227 drivers/ata/libata-core.c 	q->setup	= EZ(t->setup,       T);
t                3228 drivers/ata/libata-core.c 	q->act8b	= EZ(t->act8b,       T);
t                3229 drivers/ata/libata-core.c 	q->rec8b	= EZ(t->rec8b,       T);
t                3230 drivers/ata/libata-core.c 	q->cyc8b	= EZ(t->cyc8b,       T);
t                3231 drivers/ata/libata-core.c 	q->active	= EZ(t->active,      T);
t                3232 drivers/ata/libata-core.c 	q->recover	= EZ(t->recover,     T);
t                3233 drivers/ata/libata-core.c 	q->dmack_hold	= EZ(t->dmack_hold,  T);
t                3234 drivers/ata/libata-core.c 	q->cycle	= EZ(t->cycle,       T);
t                3235 drivers/ata/libata-core.c 	q->udma		= EZ(t->udma,       UT);
t                3254 drivers/ata/libata-core.c 	const struct ata_timing *t = ata_timing;
t                3256 drivers/ata/libata-core.c 	while (xfer_mode > t->mode)
t                3257 drivers/ata/libata-core.c 		t++;
t                3259 drivers/ata/libata-core.c 	if (xfer_mode == t->mode)
t                3260 drivers/ata/libata-core.c 		return t;
t                3269 drivers/ata/libata-core.c 		       struct ata_timing *t, int T, int UT)
t                3282 drivers/ata/libata-core.c 	memcpy(t, s, sizeof(*s));
t                3301 drivers/ata/libata-core.c 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
t                3308 drivers/ata/libata-core.c 	ata_timing_quantize(t, t, T, UT);
t                3318 drivers/ata/libata-core.c 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
t                3325 drivers/ata/libata-core.c 	if (t->act8b + t->rec8b < t->cyc8b) {
t                3326 drivers/ata/libata-core.c 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
t                3327 drivers/ata/libata-core.c 		t->rec8b = t->cyc8b - t->act8b;
t                3330 drivers/ata/libata-core.c 	if (t->active + t->recover < t->cycle) {
t                3331 drivers/ata/libata-core.c 		t->active += (t->cycle - (t->active + t->recover)) / 2;
t                3332 drivers/ata/libata-core.c 		t->recover = t->cycle - t->active;
t                3338 drivers/ata/libata-core.c 	if (t->active + t->recover > t->cycle)
t                3339 drivers/ata/libata-core.c 		t->cycle = t->active + t->recover;
t                3364 drivers/ata/libata-core.c 	const struct ata_timing *t;
t                3370 drivers/ata/libata-core.c 	for (t = ata_timing_find_mode(base_mode);
t                3371 drivers/ata/libata-core.c 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
t                3377 drivers/ata/libata-core.c 			this_cycle = t->cycle;
t                3380 drivers/ata/libata-core.c 			this_cycle = t->udma;
t                3389 drivers/ata/libata-core.c 		last_mode = t->mode;
t                3802 drivers/ata/libata-core.c 	unsigned long last_jiffies, t;
t                3806 drivers/ata/libata-core.c 	t = ata_deadline(jiffies, params[2]);
t                3807 drivers/ata/libata-core.c 	if (time_before(t, deadline))
t                3808 drivers/ata/libata-core.c 		deadline = t;
t                 816 drivers/ata/libata-eh.c void ata_eh_fastdrain_timerfn(struct timer_list *t)
t                 818 drivers/ata/libata-eh.c 	struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
t                  48 drivers/ata/libata-transport.c 	struct scsi_transport_template t;
t                  65 drivers/ata/libata-transport.c #define to_ata_internal(tmpl)	container_of(tmpl, struct ata_internal, t)
t                 719 drivers/ata/libata-transport.c 	i->t.eh_strategy_handler	= ata_scsi_error;
t                 720 drivers/ata/libata-transport.c 	i->t.user_scan			= ata_scsi_user_scan;
t                 722 drivers/ata/libata-transport.c 	i->t.host_attrs.ac.attrs = &i->port_attrs[0];
t                 723 drivers/ata/libata-transport.c 	i->t.host_attrs.ac.class = &ata_port_class.class;
t                 724 drivers/ata/libata-transport.c 	i->t.host_attrs.ac.match = ata_tport_match;
t                 725 drivers/ata/libata-transport.c 	transport_container_register(&i->t.host_attrs);
t                 764 drivers/ata/libata-transport.c 	return &i->t;
t                 771 drivers/ata/libata-transport.c void ata_release_transport(struct scsi_transport_template *t)
t                 773 drivers/ata/libata-transport.c 	struct ata_internal *i = to_ata_internal(t);
t                 775 drivers/ata/libata-transport.c 	transport_container_unregister(&i->t.host_attrs);
t                  15 drivers/ata/libata-transport.h void ata_release_transport(struct scsi_transport_template *t);
t                 141 drivers/ata/libata.h extern void ata_eh_fastdrain_timerfn(struct timer_list *t);
t                 116 drivers/ata/pata_acpi.c 	const struct ata_timing *t;
t                 122 drivers/ata/pata_acpi.c 	t = ata_timing_find_mode(adev->pio_mode);
t                 123 drivers/ata/pata_acpi.c 	acpi->gtm.drive[unit].pio = t->cycle;
t                 139 drivers/ata/pata_acpi.c 	const struct ata_timing *t;
t                 145 drivers/ata/pata_acpi.c 	t = ata_timing_find_mode(adev->dma_mode);
t                 147 drivers/ata/pata_acpi.c 		acpi->gtm.drive[unit].dma = t->udma;
t                 150 drivers/ata/pata_acpi.c 		acpi->gtm.drive[unit].dma = t->cycle;
t                 170 drivers/ata/pata_ali.c static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
t                 180 drivers/ata/pata_ali.c 	if (t != NULL) {
t                 181 drivers/ata/pata_ali.c 		t->setup = clamp_val(t->setup, 1, 8) & 7;
t                 182 drivers/ata/pata_ali.c 		t->act8b = clamp_val(t->act8b, 1, 8) & 7;
t                 183 drivers/ata/pata_ali.c 		t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
t                 184 drivers/ata/pata_ali.c 		t->active = clamp_val(t->active, 1, 8) & 7;
t                 185 drivers/ata/pata_ali.c 		t->recover = clamp_val(t->recover, 1, 16) & 15;
t                 187 drivers/ata/pata_ali.c 		pci_write_config_byte(pdev, cas, t->setup);
t                 188 drivers/ata/pata_ali.c 		pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
t                 189 drivers/ata/pata_ali.c 		pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
t                 210 drivers/ata/pata_ali.c 	struct ata_timing t;
t                 213 drivers/ata/pata_ali.c 	ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
t                 217 drivers/ata/pata_ali.c 		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
t                 220 drivers/ata/pata_ali.c 			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
t                 227 drivers/ata/pata_ali.c 	ali_program_modes(ap, adev, &t, 0);
t                 245 drivers/ata/pata_ali.c 	struct ata_timing t;
t                 262 drivers/ata/pata_ali.c 		ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
t                 266 drivers/ata/pata_ali.c 			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
t                 269 drivers/ata/pata_ali.c 				ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
t                 272 drivers/ata/pata_ali.c 		ali_program_modes(ap, adev, &t, 0);
t                  55 drivers/ata/pata_amd.c 	u8 t;
t                  85 drivers/ata/pata_amd.c 	pci_read_config_byte(pdev, offset + 0x0C, &t);
t                  86 drivers/ata/pata_amd.c 	t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
t                  87 drivers/ata/pata_amd.c 	pci_write_config_byte(pdev, offset + 0x0C , t);
t                  99 drivers/ata/pata_amd.c 		t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
t                 103 drivers/ata/pata_amd.c 		t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
t                 107 drivers/ata/pata_amd.c 		t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
t                 111 drivers/ata/pata_amd.c 		t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
t                 120 drivers/ata/pata_amd.c 		pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
t                 207 drivers/ata/pata_atp867x.c 	struct ata_timing t, p;
t                 214 drivers/ata/pata_atp867x.c 	ata_timing_compute(adev, speed, &t, T, UT);
t                 217 drivers/ata/pata_atp867x.c 		ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
t                 227 drivers/ata/pata_atp867x.c 	b = atp867x_get_active_clocks_shifted(ap, t.active) |
t                 228 drivers/ata/pata_atp867x.c 	    atp867x_get_recover_clocks_shifted(t.recover);
t                 235 drivers/ata/pata_atp867x.c 	b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
t                 236 drivers/ata/pata_atp867x.c 	    atp867x_get_recover_clocks_shifted(t.rec8b);
t                 105 drivers/ata/pata_bk3710.c 	const struct ata_timing *t;
t                 111 drivers/ata/pata_bk3710.c 	t = ata_timing_find_mode(mode);
t                 112 drivers/ata/pata_bk3710.c 	cycletime = max_t(int, t->cycle, min_cycle);
t                 116 drivers/ata/pata_bk3710.c 	td = DIV_ROUND_UP(t->active, ideclk_period);
t                 153 drivers/ata/pata_bk3710.c 	const struct ata_timing *t;
t                 157 drivers/ata/pata_bk3710.c 	t = ata_timing_find_mode(XFER_PIO_0 + mode);
t                 161 drivers/ata/pata_bk3710.c 	t2 = DIV_ROUND_UP(t->active, ideclk_period);
t                 183 drivers/ata/pata_bk3710.c 	t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
t                 184 drivers/ata/pata_bk3710.c 	t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
t                 203 drivers/ata/pata_bk3710.c 	const struct ata_timing *t = ata_timing_find_mode(adev->pio_mode);
t                 216 drivers/ata/pata_bk3710.c 		if (pio < 3 && cycle_time < t->cycle)
t                 221 drivers/ata/pata_bk3710.c 		cycle_time = t->cycle;
t                  56 drivers/ata/pata_cmd640.c 	struct ata_timing t;
t                  63 drivers/ata/pata_cmd640.c 	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
t                  73 drivers/ata/pata_cmd640.c 		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
t                  77 drivers/ata/pata_cmd640.c 	if (t.recover > 16) {
t                  78 drivers/ata/pata_cmd640.c 		t.active += t.recover - 16;
t                  79 drivers/ata/pata_cmd640.c 		t.recover = 16;
t                  81 drivers/ata/pata_cmd640.c 	if (t.active > 16)
t                  82 drivers/ata/pata_cmd640.c 		t.active = 16;
t                  87 drivers/ata/pata_cmd640.c 	if (t.recover > 1)
t                  88 drivers/ata/pata_cmd640.c 		t.recover--;	/* 640B only */
t                  90 drivers/ata/pata_cmd640.c 		t.recover = 15;
t                  92 drivers/ata/pata_cmd640.c 	if (t.setup > 4)
t                  93 drivers/ata/pata_cmd640.c 		t.setup = 0xC0;
t                  95 drivers/ata/pata_cmd640.c 		t.setup = setup_data[t.setup];
t                  98 drivers/ata/pata_cmd640.c 		t.active &= 0x0F;	/* 0 = 16 */
t                 103 drivers/ata/pata_cmd640.c 		reg |= t.setup;
t                 107 drivers/ata/pata_cmd640.c 		pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
t                 114 drivers/ata/pata_cmd640.c 		reg |= t.setup;
t                 116 drivers/ata/pata_cmd640.c 		timing->reg58[adev->devno] = (t.active << 4) | t.recover;
t                  97 drivers/ata/pata_cmd64x.c 	struct ata_timing t;
t                 118 drivers/ata/pata_cmd64x.c 	if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
t                 129 drivers/ata/pata_cmd64x.c 			ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
t                 134 drivers/ata/pata_cmd64x.c 		t.active, t.recover, t.setup);
t                 135 drivers/ata/pata_cmd64x.c 	if (t.recover > 16) {
t                 136 drivers/ata/pata_cmd64x.c 		t.active += t.recover - 16;
t                 137 drivers/ata/pata_cmd64x.c 		t.recover = 16;
t                 139 drivers/ata/pata_cmd64x.c 	if (t.active > 16)
t                 140 drivers/ata/pata_cmd64x.c 		t.active = 16;
t                 145 drivers/ata/pata_cmd64x.c 	if (t.recover == 16)
t                 146 drivers/ata/pata_cmd64x.c 		t.recover = 0;
t                 147 drivers/ata/pata_cmd64x.c 	else if (t.recover > 1)
t                 148 drivers/ata/pata_cmd64x.c 		t.recover--;
t                 150 drivers/ata/pata_cmd64x.c 		t.recover = 15;
t                 152 drivers/ata/pata_cmd64x.c 	if (t.setup > 4)
t                 153 drivers/ata/pata_cmd64x.c 		t.setup = 0xC0;
t                 155 drivers/ata/pata_cmd64x.c 		t.setup = setup_data[t.setup];
t                 157 drivers/ata/pata_cmd64x.c 	t.active &= 0x0F;	/* 0 = 16 */
t                 162 drivers/ata/pata_cmd64x.c 	reg |= t.setup;
t                 166 drivers/ata/pata_cmd64x.c 	pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
t                  55 drivers/ata/pata_cypress.c 	struct ata_timing t;
t                  60 drivers/ata/pata_cypress.c 	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
t                  65 drivers/ata/pata_cypress.c 	time_16 = clamp_val(t.recover - 1, 0, 15) |
t                  66 drivers/ata/pata_cypress.c 		  (clamp_val(t.active - 1, 0, 15) << 4);
t                  67 drivers/ata/pata_cypress.c 	time_8 = clamp_val(t.act8b - 1, 0, 15) |
t                  68 drivers/ata/pata_cypress.c 		 (clamp_val(t.rec8b - 1, 0, 15) << 4);
t                  74 drivers/ata/pata_cypress.c 		addr |= clamp_val(t.setup - 1, 0, 15);
t                  84 drivers/ata/pata_cypress.c 		addr |= (clamp_val(t.setup - 1, 0, 15) << 4);
t                 130 drivers/ata/pata_ep93xx.c 	struct ata_timing t;
t                 267 drivers/ata/pata_ep93xx.c 	const struct ata_timing *t = &drv_data->t;
t                 268 drivers/ata/pata_ep93xx.c 	unsigned long t0 = reg ? t->cyc8b : t->cycle;
t                 269 drivers/ata/pata_ep93xx.c 	unsigned long t2 = reg ? t->act8b : t->active;
t                 270 drivers/ata/pata_ep93xx.c 	unsigned long t2i = reg ? t->rec8b : t->recover;
t                 272 drivers/ata/pata_ep93xx.c 	ep93xx_pata_rw_begin(base, addr, t->setup);
t                 301 drivers/ata/pata_ep93xx.c 	const struct ata_timing *t = &drv_data->t;
t                 302 drivers/ata/pata_ep93xx.c 	unsigned long t0 = reg ? t->cyc8b : t->cycle;
t                 303 drivers/ata/pata_ep93xx.c 	unsigned long t2 = reg ? t->act8b : t->active;
t                 304 drivers/ata/pata_ep93xx.c 	unsigned long t2i = reg ? t->rec8b : t->recover;
t                 306 drivers/ata/pata_ep93xx.c 	ep93xx_pata_rw_begin(base, addr, t->setup);
t                 344 drivers/ata/pata_ep93xx.c 	ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0);
t                 346 drivers/ata/pata_ep93xx.c 		struct ata_timing t;
t                 347 drivers/ata/pata_ep93xx.c 		ata_timing_compute(pair, pair->pio_mode, &t, T, 0);
t                 348 drivers/ata/pata_ep93xx.c 		ata_timing_merge(&t, &drv_data->t, &drv_data->t,
t                 872 drivers/ata/pata_ep93xx.c 	drv_data->t = *ata_timing_find_mode(XFER_PIO_0);
t                 234 drivers/ata/pata_hpt366.c 	u32 mask, reg, t;
t                 244 drivers/ata/pata_hpt366.c 	t = hpt36x_find_mode(ap, mode);
t                 252 drivers/ata/pata_hpt366.c 	reg = ((reg & ~mask) | (t & mask)) & ~0xc0000000;
t                 191 drivers/ata/pata_icside.c 	struct ata_timing t;
t                 198 drivers/ata/pata_icside.c 	if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
t                 205 drivers/ata/pata_icside.c 	if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
t                 207 drivers/ata/pata_icside.c 	else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
t                 209 drivers/ata/pata_icside.c 	else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
t                 215 drivers/ata/pata_icside.c 		     t.active, t.recover, t.cycle, iomd_type);
t                 351 drivers/ata/pata_legacy.c 	struct ata_timing t;
t                 354 drivers/ata/pata_legacy.c 	ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
t                 356 drivers/ata/pata_legacy.c 	active = clamp_val(t.active, 2, 15);
t                 357 drivers/ata/pata_legacy.c 	recover = clamp_val(t.recover, 4, 15);
t                 385 drivers/ata/pata_legacy.c 	struct ata_timing t;
t                 388 drivers/ata/pata_legacy.c 	ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
t                 390 drivers/ata/pata_legacy.c 	active = clamp_val(t.active, 2, 15);
t                 391 drivers/ata/pata_legacy.c 	recover = clamp_val(t.recover, 2, 16) & 0x0F;
t                 449 drivers/ata/pata_legacy.c 	struct ata_timing t;
t                 464 drivers/ata/pata_legacy.c 	ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
t                 471 drivers/ata/pata_legacy.c 		ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
t                 474 drivers/ata/pata_legacy.c 	active = clamp_val(t.active, 2, 17) - 2;
t                 475 drivers/ata/pata_legacy.c 	recover = clamp_val(t.recover, 1, 16) - 1;
t                 476 drivers/ata/pata_legacy.c 	setup = clamp_val(t.setup, 1, 4) - 1;
t                 524 drivers/ata/pata_legacy.c 	struct ata_timing t;
t                 543 drivers/ata/pata_legacy.c 	ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
t                 550 drivers/ata/pata_legacy.c 		ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
t                 553 drivers/ata/pata_legacy.c 	active = clamp_val(t.active, 2, 17) - 2;
t                 554 drivers/ata/pata_legacy.c 	recover = clamp_val(t.recover, 1, 16) - 1;
t                 555 drivers/ata/pata_legacy.c 	setup = clamp_val(t.setup, 1, 4) - 1;
t                 640 drivers/ata/pata_legacy.c 	struct ata_timing t;
t                 646 drivers/ata/pata_legacy.c 	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
t                 649 drivers/ata/pata_legacy.c 		active = 8 - clamp_val(t.active, 1, 8);
t                 650 drivers/ata/pata_legacy.c 		recovery = 18 - clamp_val(t.recover, 3, 18);
t                 652 drivers/ata/pata_legacy.c 		active = 9 - clamp_val(t.active, 2, 9);
t                 653 drivers/ata/pata_legacy.c 		recovery = 15 - clamp_val(t.recover, 0, 15);
t                 777 drivers/ata/pata_legacy.c 	struct ata_timing t;
t                 787 drivers/ata/pata_legacy.c 		ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
t                 789 drivers/ata/pata_legacy.c 		ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
t                 791 drivers/ata/pata_legacy.c 	active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
t                 792 drivers/ata/pata_legacy.c 	recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
t                 803 drivers/ata/pata_legacy.c 	reg |= (clamp_val(t.setup, 0, 3) << 6);
t                  87 drivers/ata/pata_macio.c #define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
t                  88 drivers/ata/pata_macio.c #define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
t                 395 drivers/ata/pata_macio.c 	const struct pata_macio_timing *t;
t                 408 drivers/ata/pata_macio.c 	t = pata_macio_find_timing(priv, adev->pio_mode);
t                 409 drivers/ata/pata_macio.c 	if (t == NULL) {
t                 412 drivers/ata/pata_macio.c 		t = pata_macio_find_timing(priv, XFER_PIO_0);
t                 414 drivers/ata/pata_macio.c 	BUG_ON(t == NULL);
t                 417 drivers/ata/pata_macio.c 	priv->treg[adev->devno][0] |= t->reg1;
t                 420 drivers/ata/pata_macio.c 	t = pata_macio_find_timing(priv, adev->dma_mode);
t                 421 drivers/ata/pata_macio.c 	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
t                 423 drivers/ata/pata_macio.c 		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
t                 425 drivers/ata/pata_macio.c 	BUG_ON(t == NULL);
t                 428 drivers/ata/pata_macio.c 	priv->treg[adev->devno][0] |= t->reg1;
t                 429 drivers/ata/pata_macio.c 	priv->treg[adev->devno][1] |= t->reg2;
t                 296 drivers/ata/pata_mpc52xx.c 	struct mpc52xx_ata_timings *t = &priv->timings[dev];
t                 302 drivers/ata/pata_mpc52xx.c 	t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
t                 303 drivers/ata/pata_mpc52xx.c 	t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
t                 304 drivers/ata/pata_mpc52xx.c 	t->using_udma = 0;
t                 313 drivers/ata/pata_mpc52xx.c 	struct mpc52xx_ata_timings *t = &priv->timings[dev];
t                 319 drivers/ata/pata_mpc52xx.c 	t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
t                 320 drivers/ata/pata_mpc52xx.c 	t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
t                 321 drivers/ata/pata_mpc52xx.c 	t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
t                 322 drivers/ata/pata_mpc52xx.c 	t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
t                 323 drivers/ata/pata_mpc52xx.c 	t->udma5 = (u32)s->tzah << 24;
t                 324 drivers/ata/pata_mpc52xx.c 	t->using_udma = 1;
t                  59 drivers/ata/pata_ns87415.c 	struct ata_timing t;
t                  67 drivers/ata/pata_ns87415.c 	ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
t                  69 drivers/ata/pata_ns87415.c 	clocking = 17 - clamp_val(t.active, 2, 17);
t                  70 drivers/ata/pata_ns87415.c 	clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4;
t                 250 drivers/ata/pata_via.c 	struct ata_timing t, p;
t                 267 drivers/ata/pata_via.c 	ata_timing_compute(adev, mode, &t, T, UT);
t                 273 drivers/ata/pata_via.c 			ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
t                 284 drivers/ata/pata_via.c 		setup |= (clamp_val(t.setup, 1, 4) - 1) << shift;
t                 290 drivers/ata/pata_via.c 		((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
t                 292 drivers/ata/pata_via.c 		((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
t                 298 drivers/ata/pata_via.c 		ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
t                 301 drivers/ata/pata_via.c 		ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
t                 304 drivers/ata/pata_via.c 		ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
t                 307 drivers/ata/pata_via.c 		ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
t                 320 drivers/ata/pata_via.c 		if (t.udma) {
t                3264 drivers/ata/sata_mv.c 	u32 t;
t                3269 drivers/ata/sata_mv.c 	t = readl(reg);
t                3270 drivers/ata/sata_mv.c 	writel(t | STOP_PCI_MASTER, reg);
t                3274 drivers/ata/sata_mv.c 		t = readl(reg);
t                3275 drivers/ata/sata_mv.c 		if (PCI_MASTER_EMPTY & t)
t                3278 drivers/ata/sata_mv.c 	if (!(PCI_MASTER_EMPTY & t)) {
t                3287 drivers/ata/sata_mv.c 		writel(t | GLOB_SFT_RST, reg);
t                3288 drivers/ata/sata_mv.c 		t = readl(reg);
t                3290 drivers/ata/sata_mv.c 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
t                3292 drivers/ata/sata_mv.c 	if (!(GLOB_SFT_RST & t)) {
t                3301 drivers/ata/sata_mv.c 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
t                3302 drivers/ata/sata_mv.c 		t = readl(reg);
t                3304 drivers/ata/sata_mv.c 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
t                3306 drivers/ata/sata_mv.c 	if (GLOB_SFT_RST & t) {
t                 281 drivers/atm/ambassador.c static void do_housekeeping (struct timer_list *t);
t                1435 drivers/atm/ambassador.c     amb_txq * t = &dev->txq;
t                1437 drivers/atm/ambassador.c 		    t->pending, t->maximum, t->high, t->filled);
t                1481 drivers/atm/ambassador.c static void do_housekeeping (struct timer_list *t) {
t                1482 drivers/atm/ambassador.c   amb_dev * dev = from_timer(dev, t, housekeeping);
t                1378 drivers/atm/firestream.c 	void  *t;
t                1381 drivers/atm/firestream.c 		t = kmalloc (size, flags);
t                1382 drivers/atm/firestream.c 		if ((unsigned long)t & (alignment-1)) {
t                1383 drivers/atm/firestream.c 			printk ("Kmalloc doesn't align things correctly! %p\n", t);
t                1384 drivers/atm/firestream.c 			kfree (t);
t                1387 drivers/atm/firestream.c 		return t;
t                1647 drivers/atm/firestream.c static void fs_poll (struct timer_list *t)
t                1649 drivers/atm/firestream.c 	struct fs_dev *dev = from_timer(dev, t, timer);
t                 345 drivers/atm/horizon.c static void do_housekeeping (struct timer_list *t);
t                1406 drivers/atm/horizon.c static void do_housekeeping (struct timer_list *t) {
t                1408 drivers/atm/horizon.c   hrz_dev * dev = from_timer(dev, t, housekeeping);
t                1530 drivers/atm/idt77252.c tst_timer(struct timer_list *t)
t                1532 drivers/atm/idt77252.c 	struct idt77252_dev *card = from_timer(card, t, tst_timer);
t                2075 drivers/atm/idt77252.c idt77252_est_timer(struct timer_list *t)
t                2077 drivers/atm/idt77252.c 	struct rate_estimator *est = from_timer(est, t, timer);
t                 703 drivers/atm/iphase.c         u32	t;
t                 723 drivers/atm/iphase.c 	t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
t                 724 drivers/atm/iphase.c 	while (!(t & NVDO))
t                 725 drivers/atm/iphase.c 		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
t                 740 drivers/atm/iphase.c         u32	t;
t                 753 drivers/atm/iphase.c 		NVRAM_CLKIN(t);
t                 754 drivers/atm/iphase.c 		val |= (t << i);
t                1374 drivers/atm/iphase.h 		u32 t; \
t                1375 drivers/atm/iphase.h 		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); \
t                1376 drivers/atm/iphase.h 		t &= (val); \
t                1377 drivers/atm/iphase.h 		writel(t, iadev->reg+IPHASE5575_EEPROM_ACCESS); \
t                1388 drivers/atm/iphase.h 		u32 t; \
t                1389 drivers/atm/iphase.h 		t =  readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); \
t                1390 drivers/atm/iphase.h 		t |= (val); \
t                1391 drivers/atm/iphase.h 		writel(t, iadev->reg+IPHASE5575_EEPROM_ACCESS); \
t                 477 drivers/atm/lanai.c 	u32 t;
t                 478 drivers/atm/lanai.c 	t = readl(reg_addr(lanai, reg));
t                 480 drivers/atm/lanai.c 	    (int) reg, t);
t                 481 drivers/atm/lanai.c 	return t;
t                1760 drivers/atm/lanai.c static void lanai_timed_poll(struct timer_list *t)
t                1762 drivers/atm/lanai.c 	struct lanai_dev *lanai = from_timer(lanai, t, timer);
t                 228 drivers/auxdisplay/img-ascii-lcd.c static void img_ascii_lcd_scroll(struct timer_list *t)
t                 230 drivers/auxdisplay/img-ascii-lcd.c 	struct img_ascii_lcd_ctx *ctx = from_timer(ctx, t, timer);
t                 277 drivers/base/arch_topology.c 	struct device_node *t;
t                 281 drivers/base/arch_topology.c 		t = of_get_child_by_name(core, name);
t                 282 drivers/base/arch_topology.c 		if (t) {
t                 284 drivers/base/arch_topology.c 			cpu = get_cpu_for_node(t);
t                 291 drivers/base/arch_topology.c 				       t);
t                 292 drivers/base/arch_topology.c 				of_node_put(t);
t                 295 drivers/base/arch_topology.c 			of_node_put(t);
t                 298 drivers/base/arch_topology.c 	} while (t);
t                 513 drivers/base/power/main.c static void dpm_watchdog_handler(struct timer_list *t)
t                 515 drivers/base/power/main.c 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
t                  62 drivers/base/power/wakeup.c static void pm_wakeup_timer_fn(struct timer_list *t);
t                 737 drivers/base/power/wakeup.c static void pm_wakeup_timer_fn(struct timer_list *t)
t                 739 drivers/base/power/wakeup.c 	struct wakeup_source *ws = from_timer(ws, t, timer);
t                 258 drivers/base/regmap/regmap-irq.c 	const struct regmap_irq_type *t = &irq_data->type;
t                 260 drivers/base/regmap/regmap-irq.c 	if ((t->types_supported & type) != type)
t                 263 drivers/base/regmap/regmap-irq.c 	reg = t->type_reg_offset / map->reg_stride;
t                 265 drivers/base/regmap/regmap-irq.c 	if (t->type_reg_mask)
t                 266 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] &= ~t->type_reg_mask;
t                 268 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] &= ~(t->type_falling_val |
t                 269 drivers/base/regmap/regmap-irq.c 				      t->type_rising_val |
t                 270 drivers/base/regmap/regmap-irq.c 				      t->type_level_low_val |
t                 271 drivers/base/regmap/regmap-irq.c 				      t->type_level_high_val);
t                 274 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] |= t->type_falling_val;
t                 278 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] |= t->type_rising_val;
t                 282 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] |= (t->type_falling_val |
t                 283 drivers/base/regmap/regmap-irq.c 					t->type_rising_val);
t                 287 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] |= t->type_level_high_val;
t                 291 drivers/base/regmap/regmap-irq.c 		d->type_buf[reg] |= t->type_level_low_val;
t                  18 drivers/base/regmap/regmap-spi.c 	struct spi_transfer t[2];
t                  43 drivers/base/regmap/regmap-spi.c 	struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
t                  47 drivers/base/regmap/regmap-spi.c 	spi_message_add_tail(&t[0], &m);
t                  48 drivers/base/regmap/regmap-spi.c 	spi_message_add_tail(&t[1], &m);
t                  64 drivers/base/regmap/regmap-spi.c 	async->t[0].tx_buf = reg;
t                  65 drivers/base/regmap/regmap-spi.c 	async->t[0].len = reg_len;
t                  66 drivers/base/regmap/regmap-spi.c 	async->t[1].tx_buf = val;
t                  67 drivers/base/regmap/regmap-spi.c 	async->t[1].len = val_len;
t                  70 drivers/base/regmap/regmap-spi.c 	spi_message_add_tail(&async->t[0], &async->m);
t                  72 drivers/base/regmap/regmap-spi.c 		spi_message_add_tail(&async->t[1], &async->m);
t                  90 drivers/bcma/sprom.c 	static const u8 t[] = {
t                 124 drivers/bcma/sprom.c 	return t[crc ^ data];
t                 124 drivers/block/aoe/aoe.h 	struct aoetgt *t;		/* parent target I belong to */
t                 222 drivers/block/aoe/aoe.h void aoecmd_wreset(struct aoetgt *t);
t                  52 drivers/block/aoe/aoeblk.c 	struct aoetgt *t = d->targets[0];
t                  54 drivers/block/aoe/aoeblk.c 	if (t == NULL)
t                  56 drivers/block/aoe/aoeblk.c 	return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
t                  64 drivers/block/aoe/aoeblk.c 	struct aoetgt **t, **te;
t                  71 drivers/block/aoe/aoeblk.c 	t = d->targets;
t                  72 drivers/block/aoe/aoeblk.c 	te = t + d->ntargets;
t                  73 drivers/block/aoe/aoeblk.c 	for (; t < te && *t; t++) {
t                  74 drivers/block/aoe/aoeblk.c 		ifp = (*t)->ifs;
t                 116 drivers/block/aoe/aoeblk.c 	struct aoetgt **t, **te;
t                 131 drivers/block/aoe/aoeblk.c 	t = d->targets;
t                 132 drivers/block/aoe/aoeblk.c 	te = t + d->ntargets;
t                 133 drivers/block/aoe/aoeblk.c 	for (; t < te && *t; t++) {
t                 135 drivers/block/aoe/aoeblk.c 		seq_printf(s, "falloc: %ld\n", (*t)->falloc);
t                 137 drivers/block/aoe/aoeblk.c 			list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
t                 138 drivers/block/aoe/aoeblk.c 		seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
t                 139 drivers/block/aoe/aoeblk.c 			(*t)->maxout, (*t)->nframes);
t                 140 drivers/block/aoe/aoeblk.c 		seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
t                 141 drivers/block/aoe/aoeblk.c 		seq_printf(s, "\ttaint:%d\n", (*t)->taint);
t                 142 drivers/block/aoe/aoeblk.c 		seq_printf(s, "\tr:%d\n", (*t)->rpkts);
t                 143 drivers/block/aoe/aoeblk.c 		seq_printf(s, "\tw:%d\n", (*t)->wpkts);
t                 144 drivers/block/aoe/aoeblk.c 		ifp = (*t)->ifs;
t                 145 drivers/block/aoe/aoeblk.c 		ife = ifp + ARRAY_SIZE((*t)->ifs);
t                 129 drivers/block/aoe/aoecmd.c aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
t                 133 drivers/block/aoe/aoecmd.c 	memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
t                 134 drivers/block/aoe/aoecmd.c 	memcpy(h->dst, t->addr, sizeof h->dst);
t                 157 drivers/block/aoe/aoecmd.c ifrotate(struct aoetgt *t)
t                 161 drivers/block/aoe/aoecmd.c 	ifp = t->ifp;
t                 163 drivers/block/aoe/aoecmd.c 	if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
t                 164 drivers/block/aoe/aoecmd.c 		ifp = t->ifs;
t                 167 drivers/block/aoe/aoecmd.c 	return t->ifp = ifp;
t                 195 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                 197 drivers/block/aoe/aoecmd.c 	t = f->t;
t                 202 drivers/block/aoe/aoecmd.c 	list_add(&f->head, &t->ffree);
t                 206 drivers/block/aoe/aoecmd.c newtframe(struct aoedev *d, struct aoetgt *t)
t                 212 drivers/block/aoe/aoecmd.c 	if (list_empty(&t->ffree)) {
t                 213 drivers/block/aoe/aoecmd.c 		if (t->falloc >= NSKBPOOLMAX*2)
t                 218 drivers/block/aoe/aoecmd.c 		t->falloc++;
t                 219 drivers/block/aoe/aoecmd.c 		f->t = t;
t                 221 drivers/block/aoe/aoecmd.c 		pos = t->ffree.next;
t                 253 drivers/block/aoe/aoecmd.c 	struct aoetgt *t, **tt;
t                 267 drivers/block/aoe/aoecmd.c 		t = *tt;
t                 268 drivers/block/aoe/aoecmd.c 		if (!t->taint) {
t                 270 drivers/block/aoe/aoecmd.c 			totout += t->nout;
t                 272 drivers/block/aoe/aoecmd.c 		if (t->nout < t->maxout
t                 273 drivers/block/aoe/aoecmd.c 		&& (use_tainted || !t->taint)
t                 274 drivers/block/aoe/aoecmd.c 		&& t->ifp->nd) {
t                 275 drivers/block/aoe/aoecmd.c 			f = newtframe(d, t);
t                 277 drivers/block/aoe/aoecmd.c 				ifrotate(t);
t                 310 drivers/block/aoe/aoecmd.c 	struct aoedev *d = f->t->d;
t                 320 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                 335 drivers/block/aoe/aoecmd.c 	t = f->t;
t                 336 drivers/block/aoe/aoecmd.c 	f->tag = aoehdr_atainit(t->d, t, h);
t                 338 drivers/block/aoe/aoecmd.c 	t->nout++;
t                 345 drivers/block/aoe/aoecmd.c 	if (t->d->flags & DEVFL_EXT) {
t                 358 drivers/block/aoe/aoecmd.c 		t->wpkts++;
t                 360 drivers/block/aoe/aoecmd.c 		t->rpkts++;
t                 365 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
t                 457 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                 461 drivers/block/aoe/aoecmd.c 	t = f->t;
t                 464 drivers/block/aoe/aoecmd.c 	if (ifrotate(t) == NULL) {
t                 477 drivers/block/aoe/aoecmd.c 			h->src, h->dst, t->nout);
t                 484 drivers/block/aoe/aoecmd.c 	memcpy(h->dst, t->addr, sizeof h->dst);
t                 485 drivers/block/aoe/aoecmd.c 	memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
t                 487 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
t                 526 drivers/block/aoe/aoecmd.c getif(struct aoetgt *t, struct net_device *nd)
t                 530 drivers/block/aoe/aoecmd.c 	p = t->ifs;
t                 539 drivers/block/aoe/aoecmd.c ejectif(struct aoetgt *t, struct aoeif *ifp)
t                 546 drivers/block/aoe/aoecmd.c 	e = t->ifs + NAOEIFS - 1;
t                 559 drivers/block/aoe/aoecmd.c 	nf = newframe(f->t->d);
t                 562 drivers/block/aoe/aoecmd.c 	if (nf->t == f->t) {
t                 580 drivers/block/aoe/aoecmd.c probe(struct aoetgt *t)
t                 589 drivers/block/aoe/aoecmd.c 	d = t->d;
t                 590 drivers/block/aoe/aoecmd.c 	f = newtframe(d, t);
t                 594 drivers/block/aoe/aoecmd.c 			t->addr,
t                 600 drivers/block/aoe/aoecmd.c 	ifrotate(t);
t                 601 drivers/block/aoe/aoecmd.c 	f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
t                 627 drivers/block/aoe/aoecmd.c 	long t;
t                 629 drivers/block/aoe/aoecmd.c 	t = 2 * d->rttavg >> RTTSCALE;
t                 630 drivers/block/aoe/aoecmd.c 	t += 8 * d->rttdev >> RTTDSCALE;
t                 631 drivers/block/aoe/aoecmd.c 	if (t == 0)
t                 632 drivers/block/aoe/aoecmd.c 		t = 1;
t                 634 drivers/block/aoe/aoecmd.c 	return t;
t                 640 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                 652 drivers/block/aoe/aoecmd.c 		t = f->t;
t                 653 drivers/block/aoe/aoecmd.c 		if (t->taint) {
t                 657 drivers/block/aoe/aoecmd.c 					if (t->nout_probes == 0
t                 659 drivers/block/aoe/aoecmd.c 						probe(t);
t                 660 drivers/block/aoe/aoecmd.c 						t->nout_probes++;
t                 666 drivers/block/aoe/aoecmd.c 					t = f->t;
t                 671 drivers/block/aoe/aoecmd.c 			} else if (tsince_hr(f) < t->taint * rto(d)) {
t                 680 drivers/block/aoe/aoecmd.c 			f->t->d->flags |= DEVFL_KICKME;
t                 683 drivers/block/aoe/aoecmd.c 		if (t->nout >= t->maxout)
t                 686 drivers/block/aoe/aoecmd.c 		t->nout++;
t                 688 drivers/block/aoe/aoecmd.c 			t->nout_probes++;
t                 700 drivers/block/aoe/aoecmd.c scorn(struct aoetgt *t)
t                 704 drivers/block/aoe/aoecmd.c 	n = t->taint++;
t                 705 drivers/block/aoe/aoecmd.c 	t->taint += t->taint * 2;
t                 706 drivers/block/aoe/aoecmd.c 	if (n > t->taint)
t                 707 drivers/block/aoe/aoecmd.c 		t->taint = n;
t                 708 drivers/block/aoe/aoecmd.c 	if (t->taint > MAX_TAINT)
t                 709 drivers/block/aoe/aoecmd.c 		t->taint = MAX_TAINT;
t                 730 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                 786 drivers/block/aoe/aoecmd.c 		t = f->t;
t                 791 drivers/block/aoe/aoecmd.c 			scorn(t); /* avoid this target */
t                 793 drivers/block/aoe/aoecmd.c 		if (t->maxout != 1) {
t                 794 drivers/block/aoe/aoecmd.c 			t->ssthresh = t->maxout / 2;
t                 795 drivers/block/aoe/aoecmd.c 			t->maxout = 1;
t                 799 drivers/block/aoe/aoecmd.c 			t->nout_probes--;
t                 801 drivers/block/aoe/aoecmd.c 			ifp = getif(t, f->skb->dev);
t                 802 drivers/block/aoe/aoecmd.c 			if (ifp && ++ifp->lost > (t->nframes << 1)
t                 803 drivers/block/aoe/aoecmd.c 			&& (ifp != t->ifs || t->ifs[1].nd)) {
t                 804 drivers/block/aoe/aoecmd.c 				ejectif(t, ifp);
t                 809 drivers/block/aoe/aoecmd.c 		t->nout--;
t                 927 drivers/block/aoe/aoecmd.c ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
t                 969 drivers/block/aoe/aoecmd.c 			t->addr,
t                 985 drivers/block/aoe/aoecmd.c calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
t                 999 drivers/block/aoe/aoecmd.c 	if (!t || t->maxout >= t->nframes)
t                1001 drivers/block/aoe/aoecmd.c 	if (t->maxout < t->ssthresh)
t                1002 drivers/block/aoe/aoecmd.c 		t->maxout += 1;
t                1003 drivers/block/aoe/aoecmd.c 	else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
t                1004 drivers/block/aoe/aoecmd.c 		t->maxout += 1;
t                1005 drivers/block/aoe/aoecmd.c 		t->next_cwnd = t->maxout;
t                1012 drivers/block/aoe/aoecmd.c 	struct aoetgt **t, **e;
t                1014 drivers/block/aoe/aoecmd.c 	t = d->targets;
t                1015 drivers/block/aoe/aoecmd.c 	e = t + d->ntargets;
t                1016 drivers/block/aoe/aoecmd.c 	for (; t < e && *t; t++)
t                1017 drivers/block/aoe/aoecmd.c 		if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
t                1018 drivers/block/aoe/aoecmd.c 			return *t;
t                1083 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                1092 drivers/block/aoe/aoecmd.c 	t = f->t;
t                1093 drivers/block/aoe/aoecmd.c 	d = t->d;
t                1142 drivers/block/aoe/aoecmd.c 		ifp = getif(t, skb->dev);
t                1158 drivers/block/aoe/aoecmd.c 		ataid_complete(d, t, skb->data);
t                1169 drivers/block/aoe/aoecmd.c 	if (t->taint > 0
t                1170 drivers/block/aoe/aoecmd.c 	&& --t->taint > 0
t                1171 drivers/block/aoe/aoecmd.c 	&& t->nout_probes == 0) {
t                1174 drivers/block/aoe/aoecmd.c 			probe(t);
t                1175 drivers/block/aoe/aoecmd.c 			t->nout_probes++;
t                1212 drivers/block/aoe/aoecmd.c 		actual_id = f->t->d->aoeminor % ncpus;
t                1285 drivers/block/aoe/aoecmd.c 	id = f->t->d->aoeminor % ncpus;
t                1328 drivers/block/aoe/aoecmd.c 		calc_rttavg(d, f->t, tsince_hr(f));
t                1329 drivers/block/aoe/aoecmd.c 		f->t->nout--;
t                1331 drivers/block/aoe/aoecmd.c 			f->t->nout_probes--;
t                1383 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                1389 drivers/block/aoe/aoecmd.c 	t = *d->tgt;
t                1397 drivers/block/aoe/aoecmd.c 	f->tag = aoehdr_atainit(d, t, h);
t                1399 drivers/block/aoe/aoecmd.c 	t->nout++;
t                1408 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
t                1444 drivers/block/aoe/aoecmd.c 	struct aoetgt *t, **tt, **te;
t                1456 drivers/block/aoe/aoecmd.c 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
t                1457 drivers/block/aoe/aoecmd.c 	if (!t)
t                1459 drivers/block/aoe/aoecmd.c 	t->nframes = nframes;
t                1460 drivers/block/aoe/aoecmd.c 	t->d = d;
t                1461 drivers/block/aoe/aoecmd.c 	memcpy(t->addr, addr, sizeof t->addr);
t                1462 drivers/block/aoe/aoecmd.c 	t->ifp = t->ifs;
t                1463 drivers/block/aoe/aoecmd.c 	aoecmd_wreset(t);
t                1464 drivers/block/aoe/aoecmd.c 	t->maxout = t->nframes / 2;
t                1465 drivers/block/aoe/aoecmd.c 	INIT_LIST_HEAD(&t->ffree);
t                1466 drivers/block/aoe/aoecmd.c 	return *tt = t;
t                1476 drivers/block/aoe/aoecmd.c 	struct aoetgt **t, **e;
t                1479 drivers/block/aoe/aoecmd.c 	t = d->targets;
t                1480 drivers/block/aoe/aoecmd.c 	e = t + d->ntargets;
t                1481 drivers/block/aoe/aoecmd.c 	for (; t < e && *t; t++)
t                1482 drivers/block/aoe/aoecmd.c 		if (bcnt == 0 || bcnt > (*t)->minbcnt)
t                1483 drivers/block/aoe/aoecmd.c 			bcnt = (*t)->minbcnt;
t                1492 drivers/block/aoe/aoecmd.c setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
t                1498 drivers/block/aoe/aoecmd.c 	d = t->d;
t                1500 drivers/block/aoe/aoecmd.c 	p = t->ifs;
t                1520 drivers/block/aoe/aoecmd.c 	t->minbcnt = minbcnt;
t                1530 drivers/block/aoe/aoecmd.c 	struct aoetgt *t;
t                1573 drivers/block/aoe/aoecmd.c 	t = gettgt(d, h->src);
t                1574 drivers/block/aoe/aoecmd.c 	if (t) {
t                1575 drivers/block/aoe/aoecmd.c 		t->nframes = n;
t                1576 drivers/block/aoe/aoecmd.c 		if (n < t->maxout)
t                1577 drivers/block/aoe/aoecmd.c 			aoecmd_wreset(t);
t                1579 drivers/block/aoe/aoecmd.c 		t = addtgt(d, h->src, n);
t                1580 drivers/block/aoe/aoecmd.c 		if (!t)
t                1589 drivers/block/aoe/aoecmd.c 	setifbcnt(t, skb->dev, n);
t                1607 drivers/block/aoe/aoecmd.c aoecmd_wreset(struct aoetgt *t)
t                1609 drivers/block/aoe/aoecmd.c 	t->maxout = 1;
t                1610 drivers/block/aoe/aoecmd.c 	t->ssthresh = t->nframes / 2;
t                1611 drivers/block/aoe/aoecmd.c 	t->next_cwnd = t->nframes;
t                1617 drivers/block/aoe/aoecmd.c 	struct aoetgt **t, **te;
t                1623 drivers/block/aoe/aoecmd.c 	t = d->targets;
t                1624 drivers/block/aoe/aoecmd.c 	te = t + d->ntargets;
t                1625 drivers/block/aoe/aoecmd.c 	for (; t < te && *t; t++)
t                1626 drivers/block/aoe/aoecmd.c 		aoecmd_wreset(*t);
t                1668 drivers/block/aoe/aoecmd.c 		d = f->t->d;
t                  18 drivers/block/aoe/aoedev.c static void freetgt(struct aoedev *d, struct aoetgt *t);
t                 148 drivers/block/aoe/aoedev.c dummy_timer(struct timer_list *t)
t                 152 drivers/block/aoe/aoedev.c 	d = from_timer(d, t, timer);
t                 191 drivers/block/aoe/aoedev.c 		aoe_failbuf(f->t->d, f->buf);
t                 199 drivers/block/aoe/aoedev.c 	struct aoetgt *t, **tt, **te;
t                 218 drivers/block/aoe/aoedev.c 	for (; tt < te && (t = *tt); tt++) {
t                 219 drivers/block/aoe/aoedev.c 		aoecmd_wreset(t);
t                 220 drivers/block/aoe/aoedev.c 		t->nout = 0;
t                 262 drivers/block/aoe/aoedev.c 	struct aoetgt **t, **e;
t                 284 drivers/block/aoe/aoedev.c 	t = d->targets;
t                 285 drivers/block/aoe/aoedev.c 	e = t + d->ntargets;
t                 286 drivers/block/aoe/aoedev.c 	for (; t < e && *t; t++)
t                 287 drivers/block/aoe/aoedev.c 		freetgt(d, *t);
t                 499 drivers/block/aoe/aoedev.c freetgt(struct aoedev *d, struct aoetgt *t)
t                 505 drivers/block/aoe/aoedev.c 	for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) {
t                 511 drivers/block/aoe/aoedev.c 	head = &t->ffree;
t                 518 drivers/block/aoe/aoedev.c 	kfree(t);
t                  20 drivers/block/aoe/aoemain.c static void discover_timer(struct timer_list *t)
t                  22 drivers/block/aoe/aoemain.c 	mod_timer(t, jiffies + HZ * 60); /* one minute */
t                 304 drivers/block/drbd/drbd_actlog.c 	unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
t                 307 drivers/block/drbd/drbd_actlog.c 	t = ((t % stripes) * stripe_size_4kB) + t/stripes;
t                 310 drivers/block/drbd/drbd_actlog.c 	t *= 8;
t                 313 drivers/block/drbd/drbd_actlog.c 	return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
t                1541 drivers/block/drbd/drbd_int.h extern void resync_timer_fn(struct timer_list *t);
t                1542 drivers/block/drbd/drbd_int.h extern void start_resync_timer_fn(struct timer_list *t);
t                  55 drivers/block/drbd/drbd_main.c static void md_sync_timer_fn(struct timer_list *t);
t                1710 drivers/block/drbd/drbd_main.c 		struct p_trim *t = (struct p_trim*)p;
t                1711 drivers/block/drbd/drbd_main.c 		t->size = cpu_to_be32(req->i.size);
t                1712 drivers/block/drbd/drbd_main.c 		err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
t                3697 drivers/block/drbd/drbd_main.c static void md_sync_timer_fn(struct timer_list *t)
t                3699 drivers/block/drbd/drbd_main.c 	struct drbd_device *device = from_timer(device, t, md_sync_timer);
t                1148 drivers/block/drbd/drbd_nl.c 	struct lru_cache *n, *t;
t                1158 drivers/block/drbd/drbd_nl.c 	t = device->act_log;
t                1167 drivers/block/drbd/drbd_nl.c 	if (t) {
t                1168 drivers/block/drbd/drbd_nl.c 		for (i = 0; i < t->nr_elements; i++) {
t                1169 drivers/block/drbd/drbd_nl.c 			e = lc_element_by_index(t, i);
t                1184 drivers/block/drbd/drbd_nl.c 		lc_destroy(t);
t                 211 drivers/block/drbd/drbd_receiver.c 	struct drbd_peer_request *peer_req, *t;
t                 216 drivers/block/drbd/drbd_receiver.c 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
t                 423 drivers/block/drbd/drbd_receiver.c 	struct drbd_peer_request *peer_req, *t;
t                 431 drivers/block/drbd/drbd_receiver.c 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
t                 445 drivers/block/drbd/drbd_receiver.c 	struct drbd_peer_request *peer_req, *t;
t                 453 drivers/block/drbd/drbd_receiver.c 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
t                 460 drivers/block/drbd/drbd_receiver.c 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
t                 524 drivers/block/drbd/drbd_receiver.c 			long t;
t                 526 drivers/block/drbd/drbd_receiver.c 			t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
t                 529 drivers/block/drbd/drbd_receiver.c 			t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
t                 531 drivers/block/drbd/drbd_receiver.c 			if (t)
t                5965 drivers/block/drbd/drbd_receiver.c 	long t;
t                5970 drivers/block/drbd/drbd_receiver.c 	t = ping_timeout ? nc->ping_timeo : nc->ping_int;
t                5973 drivers/block/drbd/drbd_receiver.c 	t *= HZ;
t                5975 drivers/block/drbd/drbd_receiver.c 		t /= 10;
t                5977 drivers/block/drbd/drbd_receiver.c 	connection->meta.socket->sk->sk_rcvtimeo = t;
t                6060 drivers/block/drbd/drbd_receiver.c 				long t;
t                6062 drivers/block/drbd/drbd_receiver.c 				t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
t                6065 drivers/block/drbd/drbd_receiver.c 				t = wait_event_timeout(connection->ping_wait,
t                6067 drivers/block/drbd/drbd_receiver.c 						       t);
t                6068 drivers/block/drbd/drbd_receiver.c 				if (t)
t                1706 drivers/block/drbd/drbd_req.c void request_timer_fn(struct timer_list *t)
t                1708 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = from_timer(device, t, request_timer);
t                 287 drivers/block/drbd/drbd_req.h extern void request_timer_fn(struct timer_list *t);
t                 138 drivers/block/drbd/drbd_vli.h #define LEVEL(t,b,v)					\
t                 141 drivers/block/drbd/drbd_vli.h 			*out = ((in & ((~0ULL) >> (64-t))) >> b) + adj;	\
t                 142 drivers/block/drbd/drbd_vli.h 			return t;			\
t                 144 drivers/block/drbd/drbd_vli.h 		adj += 1ULL << (t - b);			\
t                 164 drivers/block/drbd/drbd_vli.h #define LEVEL(t,b,v) do {		\
t                 165 drivers/block/drbd/drbd_vli.h 		max += 1ULL << (t - b);	\
t                 169 drivers/block/drbd/drbd_vli.h 			return t;	\
t                 447 drivers/block/drbd/drbd_worker.c void resync_timer_fn(struct timer_list *t)
t                 449 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = from_timer(device, t, resync_timer);
t                 933 drivers/block/drbd/drbd_worker.c 			const unsigned long t = device->rs_total;
t                 935 drivers/block/drbd/drbd_worker.c 				(t == 0)     ? 0 :
t                 936 drivers/block/drbd/drbd_worker.c 			(t < 100000) ? ((s*100)/t) : (s/(t/100));
t                1693 drivers/block/drbd/drbd_worker.c void start_resync_timer_fn(struct timer_list *t)
t                1695 drivers/block/drbd/drbd_worker.c 	struct drbd_device *device = from_timer(device, t, start_resync_timer);
t                 910 drivers/block/floppy.c static void motor_off_callback(struct timer_list *t)
t                 912 drivers/block/floppy.c 	unsigned long nr = t - motor_off_timer;
t                 199 drivers/block/paride/bpck.c 	int t, s;
t                 208 drivers/block/paride/bpck.c 	t2(2); t = r1()&0xf8;
t                 210 drivers/block/paride/bpck.c 	if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; }
t                 211 drivers/block/paride/bpck.c 	if ((t == o1) && ((!f7) || (s == o1)))  {
t                 120 drivers/block/paride/paride.h { 	union { u16 u; char t[2]; } r;
t                 122 drivers/block/paride/paride.h 	r.t[0]=b[2*k+1]; r.t[1]=b[2*k];
t                 770 drivers/block/paride/pt.c 	int k, n, r, p, s, t, b;
t                 782 drivers/block/paride/pt.c 	t = 0;
t                 844 drivers/block/paride/pt.c 				if (copy_to_user(buf + t, tape->bufptr, b)) {
t                 848 drivers/block/paride/pt.c 				t += b;
t                 858 drivers/block/paride/pt.c 	return t;
t                 867 drivers/block/paride/pt.c 	int k, n, r, p, s, t, b;
t                 883 drivers/block/paride/pt.c 	t = 0;
t                 943 drivers/block/paride/pt.c 				if (copy_from_user(tape->bufptr, buf + t, b)) {
t                 948 drivers/block/paride/pt.c 				t += b;
t                 959 drivers/block/paride/pt.c 	return t;
t                 192 drivers/block/rsxx/cregs.c static void creg_cmd_timed_out(struct timer_list *t)
t                 194 drivers/block/rsxx/cregs.c 	struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
t                 343 drivers/block/rsxx/dma.c static void dma_engine_stalled(struct timer_list *t)
t                 345 drivers/block/rsxx/dma.c 	struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
t                 715 drivers/block/skd_main.c static void skd_timer_tick(struct timer_list *t)
t                 717 drivers/block/skd_main.c 	struct skd_device *skdev = from_timer(skdev, t, timer);
t                 239 drivers/block/swim3.c static void scan_timeout(struct timer_list *t);
t                 240 drivers/block/swim3.c static void seek_timeout(struct timer_list *t);
t                 241 drivers/block/swim3.c static void settle_timeout(struct timer_list *t);
t                 242 drivers/block/swim3.c static void xfer_timeout(struct timer_list *t);
t                 363 drivers/block/swim3.c 			void (*proc)(struct timer_list *t))
t                 539 drivers/block/swim3.c static void scan_timeout(struct timer_list *t)
t                 541 drivers/block/swim3.c 	struct floppy_state *fs = from_timer(fs, t, timeout);
t                 563 drivers/block/swim3.c static void seek_timeout(struct timer_list *t)
t                 565 drivers/block/swim3.c 	struct floppy_state *fs = from_timer(fs, t, timeout);
t                 582 drivers/block/swim3.c static void settle_timeout(struct timer_list *t)
t                 584 drivers/block/swim3.c 	struct floppy_state *fs = from_timer(fs, t, timeout);
t                 611 drivers/block/swim3.c static void xfer_timeout(struct timer_list *t)
t                 613 drivers/block/swim3.c 	struct floppy_state *fs = from_timer(fs, t, timeout);
t                 780 drivers/block/xsysace.c static void ace_stall_timer(struct timer_list *t)
t                 782 drivers/block/xsysace.c 	struct ace_device *ace = from_timer(ace, t, stall_timer);
t                 159 drivers/bluetooth/bluecard_cs.c static void bluecard_activity_led_timeout(struct timer_list *t)
t                 161 drivers/bluetooth/bluecard_cs.c 	struct bluecard_info *info = from_timer(info, t, timer);
t                 689 drivers/bluetooth/hci_bcsp.c static void bcsp_timed_event(struct timer_list *t)
t                 691 drivers/bluetooth/hci_bcsp.c 	struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp);
t                 132 drivers/bluetooth/hci_h5.c static void h5_timed_event(struct timer_list *t)
t                 136 drivers/bluetooth/hci_h5.c 	struct h5 *h5 = from_timer(h5, t, timer);
t                 393 drivers/bluetooth/hci_qca.c static void hci_ibs_tx_idle_timeout(struct timer_list *t)
t                 395 drivers/bluetooth/hci_qca.c 	struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
t                 428 drivers/bluetooth/hci_qca.c static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
t                 430 drivers/bluetooth/hci_qca.c 	struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
t                  90 drivers/bus/moxtet.c 	const enum turris_mox_module_id *t;
t                  98 drivers/bus/moxtet.c 	for (t = tdrv->id_table; *t; ++t)
t                  99 drivers/bus/moxtet.c 		if (*t == mdev->id)
t                  62 drivers/char/dsp56k.c 	long i, t, m; \
t                  66 drivers/char/dsp56k.c 			for (t = 0; t < timeout && !ENABLE; t++) \
t                  79 drivers/char/dsp56k.c 	int t; \
t                  80 drivers/char/dsp56k.c 	for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \
t                  89 drivers/char/dsp56k.c 	int t; \
t                  90 drivers/char/dsp56k.c 	for(t = 0; t < n && !DSP56K_RECEIVE; t++) \
t                 513 drivers/char/dtlk.c 	unsigned char *t;
t                 532 drivers/char/dtlk.c 	t = buf;
t                 533 drivers/char/dtlk.c 	status.serial_number = t[0] + t[1] * 256; /* serial number is
t                 535 drivers/char/dtlk.c 	t += 2;
t                 538 drivers/char/dtlk.c 	while (*t != '\r') {
t                 539 drivers/char/dtlk.c 		status.rom_version[i] = *t;
t                 542 drivers/char/dtlk.c 		t++;
t                 545 drivers/char/dtlk.c 	t++;
t                 547 drivers/char/dtlk.c 	status.mode = *t++;
t                 548 drivers/char/dtlk.c 	status.punc_level = *t++;
t                 549 drivers/char/dtlk.c 	status.formant_freq = *t++;
t                 550 drivers/char/dtlk.c 	status.pitch = *t++;
t                 551 drivers/char/dtlk.c 	status.speed = *t++;
t                 552 drivers/char/dtlk.c 	status.volume = *t++;
t                 553 drivers/char/dtlk.c 	status.tone = *t++;
t                 554 drivers/char/dtlk.c 	status.expression = *t++;
t                 555 drivers/char/dtlk.c 	status.ext_dict_loaded = *t++;
t                 556 drivers/char/dtlk.c 	status.ext_dict_status = *t++;
t                 557 drivers/char/dtlk.c 	status.free_ram = *t++;
t                 558 drivers/char/dtlk.c 	status.articulation = *t++;
t                 559 drivers/char/dtlk.c 	status.reverb = *t++;
t                 560 drivers/char/dtlk.c 	status.eob = *t++;
t                 159 drivers/char/hpet.c 		unsigned long m, t, mc, base, k;
t                 163 drivers/char/hpet.c 		t = devp->hd_ireqfreq;
t                 181 drivers/char/hpet.c 		base = mc % t;
t                 182 drivers/char/hpet.c 		k = (mc - base + hpetp->hp_delta) / t;
t                 183 drivers/char/hpet.c 		write_counter(t * (k + 1) + base,
t                 459 drivers/char/hpet.c 	unsigned long g, v, t, m;
t                 519 drivers/char/hpet.c 	t = devp->hd_ireqfreq;
t                 541 drivers/char/hpet.c 		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
t                 546 drivers/char/hpet.c 		write_counter(t, &timer->hpet_compare);
t                 550 drivers/char/hpet.c 		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
t                 781 drivers/char/hpet.c 	unsigned long t, m, count, i, flags, start;
t                 796 drivers/char/hpet.c 	t = read_counter(&timer->hpet_compare);
t                 807 drivers/char/hpet.c 		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
t                  54 drivers/char/hw_random/pic32-rng.c 	u32 t;
t                  58 drivers/char/hw_random/pic32-rng.c 		t = readl(priv->base + RNGRCNT) & RCNT_MASK;
t                  59 drivers/char/hw_random/pic32-rng.c 		if (t == 64) {
t                  90 drivers/char/hw_random/xgene-rng.c static void xgene_rng_expired_timer(struct timer_list *t)
t                  92 drivers/char/hw_random/xgene-rng.c 	struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
t                 366 drivers/char/ipmi/bt-bmc.c static void poll_timer(struct timer_list *t)
t                 368 drivers/char/ipmi/bt-bmc.c 	struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
t                 268 drivers/char/ipmi/ipmi_si_intf.c 	struct timespec t;
t                 270 drivers/char/ipmi/ipmi_si_intf.c 	ktime_get_ts(&t);
t                 271 drivers/char/ipmi/ipmi_si_intf.c 	pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
t                1085 drivers/char/ipmi/ipmi_si_intf.c static void smi_timeout(struct timer_list *t)
t                1087 drivers/char/ipmi/ipmi_si_intf.c 	struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
t                 557 drivers/char/ipmi/ipmi_ssif.c static void retry_timeout(struct timer_list *t)
t                 559 drivers/char/ipmi/ipmi_ssif.c 	struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
t                 575 drivers/char/ipmi/ipmi_ssif.c static void watch_timeout(struct timer_list *t)
t                 577 drivers/char/ipmi/ipmi_ssif.c 	struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
t                1112 drivers/char/ipmi/ipmi_ssif.c 		struct timespec64 t;
t                1114 drivers/char/ipmi/ipmi_ssif.c 		ktime_get_real_ts64(&t);
t                1118 drivers/char/ipmi/ipmi_ssif.c 			(long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
t                 662 drivers/char/pcmcia/cm4000_cs.c static void monitor_card(struct timer_list *t)
t                 664 drivers/char/pcmcia/cm4000_cs.c 	struct cm4000_dev *dev = from_timer(dev, t, timer);
t                 107 drivers/char/pcmcia/cm4040_cs.c static void cm4040_do_poll(struct timer_list *t)
t                 109 drivers/char/pcmcia/cm4040_cs.c 	struct reader_dev *dev = from_timer(dev, t, poll_timer);
t                 378 drivers/char/pcmcia/synclink_cs.c static void tx_timeout(struct timer_list *t);
t                3835 drivers/char/pcmcia/synclink_cs.c static void tx_timeout(struct timer_list *t)
t                3837 drivers/char/pcmcia/synclink_cs.c 	MGSLPC_INFO *info = from_timer(info, t, tx_timer);
t                1749 drivers/char/random.c static void entropy_timer(struct timer_list *t)
t                2065 drivers/char/random.c 	__u32 t, buf[16];
t                2076 drivers/char/random.c 			if (!arch_get_random_int(&t))
t                2078 drivers/char/random.c 			buf[i] ^= t;
t                 187 drivers/char/tlclk.c static void switchover_timeout(struct timer_list *t);
t                  82 drivers/char/tpm/tpm-dev-common.c static void user_reader_timeout(struct timer_list *t)
t                  84 drivers/char/tpm/tpm-dev-common.c 	struct file_priv *priv = from_timer(priv, t, user_read_timer);
t                 611 drivers/char/xillybus/xillybus_core.c 	long t;
t                 621 drivers/char/xillybus/xillybus_core.c 	t = wait_event_interruptible_timeout(channel->wr_wait,
t                 625 drivers/char/xillybus/xillybus_core.c 	if (t <= 0) {
t                1918 drivers/char/xillybus/xillybus_core.c 	long t;
t                1925 drivers/char/xillybus/xillybus_core.c 	t = wait_event_interruptible_timeout(endpoint->ep_wait,
t                1928 drivers/char/xillybus/xillybus_core.c 	if (t <= 0) {
t                1939 drivers/char/xillybus/xillybus_core.c 	long t;
t                1988 drivers/char/xillybus/xillybus_core.c 	t = wait_event_interruptible_timeout(endpoint->ep_wait,
t                1991 drivers/char/xillybus/xillybus_core.c 	if (t <= 0) {
t                3924 drivers/clk/clk.c 		struct hlist_node *t;
t                3927 drivers/clk/clk.c 		hlist_for_each_entry_safe(child, t, &clk->core->children,
t                 197 drivers/clk/pxa/clk-pxa25x.c 	unsigned int t;
t                 200 drivers/clk/pxa/clk-pxa25x.c 	t  = clkcfg & (1 << 0);
t                 201 drivers/clk/pxa/clk-pxa25x.c 	if (t)
t                 240 drivers/clk/pxa/clk-pxa25x.c 	unsigned int l, m, n2, t;
t                 243 drivers/clk/pxa/clk-pxa25x.c 	t = clkcfg & (1 << 0);
t                 227 drivers/clk/pxa/clk-pxa27x.c 	unsigned int t, ht;
t                 232 drivers/clk/pxa/clk-pxa27x.c 	t  = clkcfg & (1 << 0);
t                 324 drivers/clk/pxa/clk-pxa27x.c 	unsigned int t, ht, osc_forced;
t                 332 drivers/clk/pxa/clk-pxa27x.c 	t  = clkcfg & (1 << 0);
t                 335 drivers/clk/pxa/clk-pxa27x.c 	if (ht || t)
t                 222 drivers/clk/pxa/clk-pxa3xx.c 	unsigned int t;
t                 229 drivers/clk/pxa/clk-pxa3xx.c 	t = xclkcfg & 0x1;
t                 231 drivers/clk/pxa/clk-pxa3xx.c 	if (t)
t                 243 drivers/clk/pxa/clk-pxa3xx.c 	unsigned int t, xclkcfg;
t                 247 drivers/clk/pxa/clk-pxa3xx.c 	t = xclkcfg & 0x1;
t                 249 drivers/clk/pxa/clk-pxa3xx.c 	return t ? (parent_rate / xn) * 2 : parent_rate;
t                 260 drivers/clk/pxa/clk-pxa3xx.c 	unsigned int t, xclkcfg;
t                 264 drivers/clk/pxa/clk-pxa3xx.c 	t = xclkcfg & 0x1;
t                 267 drivers/clk/pxa/clk-pxa3xx.c 	return t ? parent_rate * xl * xn : parent_rate * xl;
t                 169 drivers/clk/samsung/clk.h #define __DIV(_id, cname, pname, o, s, w, f, df, t)	\
t                 179 drivers/clk/samsung/clk.h 		.table		= t,				\
t                 188 drivers/clk/samsung/clk.h #define DIV_T(_id, cname, pname, o, s, w, t)			\
t                 189 drivers/clk/samsung/clk.h 	__DIV(_id, cname, pname, o, s, w, 0, 0, t)
t                 117 drivers/clk/tegra/clk-emc.c 	int i, k, t;
t                 126 drivers/clk/tegra/clk-emc.c 	for (t = k; t < tegra->num_timings; t++) {
t                 127 drivers/clk/tegra/clk-emc.c 		if (tegra->timings[t].ram_code != ram_code)
t                 131 drivers/clk/tegra/clk-emc.c 	for (i = k; i < t; i++) {
t                 134 drivers/clk/tegra/clk-emc.c 		if (timing->rate < req->rate && i != t - 1)
t                 618 drivers/clocksource/arm_arch_timer.c #define arch_timer_check_ool_workaround(t,a)		do { } while(0)
t                1138 drivers/clocksource/arm_arch_timer.c 	struct arch_timer *t;
t                1140 drivers/clocksource/arm_arch_timer.c 	t = kzalloc(sizeof(*t), GFP_KERNEL);
t                1141 drivers/clocksource/arm_arch_timer.c 	if (!t)
t                1144 drivers/clocksource/arm_arch_timer.c 	t->base = base;
t                1145 drivers/clocksource/arm_arch_timer.c 	t->evt.irq = irq;
t                1146 drivers/clocksource/arm_arch_timer.c 	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
t                1153 drivers/clocksource/arm_arch_timer.c 	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
t                1156 drivers/clocksource/arm_arch_timer.c 		kfree(t);
t                  78 drivers/clocksource/timer-atmel-pit.c 	u32 t;
t                  82 drivers/clocksource/timer-atmel-pit.c 	t = pit_read(data->base, AT91_PIT_PIIR);
t                  85 drivers/clocksource/timer-atmel-pit.c 	elapsed += PIT_PICNT(t) * data->cycle;
t                  86 drivers/clocksource/timer-atmel-pit.c 	elapsed += PIT_CPIV(t);
t                  51 drivers/clocksource/timer-digicolor.c #define CONTROL(t)	((t)*8)
t                  52 drivers/clocksource/timer-digicolor.c #define COUNT(t)	((t)*8 + 4)
t                 303 drivers/clocksource/timer-ti-dm.c 	struct omap_dm_timer *timer = NULL, *t;
t                 325 drivers/clocksource/timer-ti-dm.c 	list_for_each_entry(t, &omap_timer_list, node) {
t                 326 drivers/clocksource/timer-ti-dm.c 		if (t->reserved)
t                 331 drivers/clocksource/timer-ti-dm.c 			if (id == t->pdev->id) {
t                 332 drivers/clocksource/timer-ti-dm.c 				timer = t;
t                 338 drivers/clocksource/timer-ti-dm.c 			if (cap == (t->capability & cap)) {
t                 349 drivers/clocksource/timer-ti-dm.c 				timer = t;
t                 353 drivers/clocksource/timer-ti-dm.c 				if (t->capability == cap)
t                 358 drivers/clocksource/timer-ti-dm.c 			if (np == t->pdev->dev.of_node) {
t                 359 drivers/clocksource/timer-ti-dm.c 				timer = t;
t                 366 drivers/clocksource/timer-ti-dm.c 			timer = t;
t                  37 drivers/clocksource/timer-vt8500.c #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
t                  45 drivers/cpufreq/bmips-cpufreq.c #define BMIPS(c, t, m, f) { \
t                  47 drivers/cpufreq/bmips-cpufreq.c 	.bmips_type = (t), \
t                 610 drivers/cpufreq/cpufreq.c 	struct cpufreq_governor *t;
t                 612 drivers/cpufreq/cpufreq.c 	for_each_governor(t)
t                 613 drivers/cpufreq/cpufreq.c 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
t                 614 drivers/cpufreq/cpufreq.c 			return t;
t                 636 drivers/cpufreq/cpufreq.c 	struct cpufreq_governor *t;
t                 640 drivers/cpufreq/cpufreq.c 	t = find_governor(str_governor);
t                 641 drivers/cpufreq/cpufreq.c 	if (!t) {
t                 652 drivers/cpufreq/cpufreq.c 		t = find_governor(str_governor);
t                 654 drivers/cpufreq/cpufreq.c 	if (t && !try_module_get(t->owner))
t                 655 drivers/cpufreq/cpufreq.c 		t = NULL;
t                 659 drivers/cpufreq/cpufreq.c 	return t;
t                 806 drivers/cpufreq/cpufreq.c 	struct cpufreq_governor *t;
t                 813 drivers/cpufreq/cpufreq.c 	for_each_governor(t) {
t                 817 drivers/cpufreq/cpufreq.c 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
t                 165 drivers/cpufreq/longhaul.c 	u32 t;
t                 194 drivers/cpufreq/longhaul.c 			t = inl(acpi_gbl_FADT.xpm_timer_block.address);
t                 211 drivers/cpufreq/longhaul.c 		t = inl(acpi_gbl_FADT.xpm_timer_block.address);
t                 231 drivers/cpufreq/longhaul.c 			t = inl(acpi_gbl_FADT.xpm_timer_block.address);
t                 669 drivers/cpufreq/powernv-cpufreq.c void gpstate_timer_handler(struct timer_list *t)
t                 671 drivers/cpufreq/powernv-cpufreq.c 	struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
t                2074 drivers/crypto/axis/artpec6_crypto.c static void artpec6_crypto_timeout(struct timer_list *t)
t                2076 drivers/crypto/axis/artpec6_crypto.c 	struct artpec6_crypto *ac = from_timer(ac, t, timer);
t                1322 drivers/crypto/hifn_795x.c 	struct scatterlist *t;
t                1339 drivers/crypto/hifn_795x.c 	t = &rctx->walk.cache[0];
t                1342 drivers/crypto/hifn_795x.c 		if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
t                1343 drivers/crypto/hifn_795x.c 			BUG_ON(!sg_page(t));
t                1344 drivers/crypto/hifn_795x.c 			dpage = sg_page(t);
t                1346 drivers/crypto/hifn_795x.c 			len = t->length;
t                1358 drivers/crypto/hifn_795x.c 		t++;
t                1439 drivers/crypto/hifn_795x.c 	struct scatterlist *dst, *t;
t                1460 drivers/crypto/hifn_795x.c 			t = &w->cache[idx];
t                1508 drivers/crypto/hifn_795x.c 			t->length = copy;
t                1509 drivers/crypto/hifn_795x.c 			t->offset = offset;
t                1670 drivers/crypto/hifn_795x.c 		struct scatterlist *dst, *t;
t                1674 drivers/crypto/hifn_795x.c 			t = &rctx->walk.cache[idx];
t                1680 drivers/crypto/hifn_795x.c 				__func__, sg_page(t), t->length,
t                1683 drivers/crypto/hifn_795x.c 			if (!t->length) {
t                1689 drivers/crypto/hifn_795x.c 			saddr = kmap_atomic(sg_page(t));
t                1691 drivers/crypto/hifn_795x.c 			err = ablkcipher_get(saddr, &t->length, t->offset,
t                2380 drivers/crypto/hifn_795x.c static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
t                2389 drivers/crypto/hifn_795x.c 	snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
t                2391 drivers/crypto/hifn_795x.c 		 t->drv_name, dev->name);
t                2396 drivers/crypto/hifn_795x.c 	alg->alg.cra_blocksize = t->bsize;
t                2401 drivers/crypto/hifn_795x.c 	alg->alg.cra_u.ablkcipher = t->ablkcipher;
t                1128 drivers/crypto/picoxcell_crypto.c static void spacc_packet_timeout(struct timer_list *t)
t                1130 drivers/crypto/picoxcell_crypto.c 	struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
t                 446 drivers/dma-buf/st-dma-fence.c 	const struct race_thread *t = arg;
t                 461 drivers/dma-buf/st-dma-fence.c 		rcu_assign_pointer(t->fences[t->id], f1);
t                 466 drivers/dma-buf/st-dma-fence.c 			f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
t                 470 drivers/dma-buf/st-dma-fence.c 		if (t->before)
t                 477 drivers/dma-buf/st-dma-fence.c 		if (!t->before)
t                 487 drivers/dma-buf/st-dma-fence.c 			       t->id, pass, miss,
t                 488 drivers/dma-buf/st-dma-fence.c 			       t->before ? "before" : "after",
t                 495 drivers/dma-buf/st-dma-fence.c 		rcu_assign_pointer(t->fences[t->id], NULL);
t                 504 drivers/dma-buf/st-dma-fence.c 		__func__, t->id, pass, miss);
t                 515 drivers/dma-buf/st-dma-fence.c 		struct race_thread t[2];
t                 518 drivers/dma-buf/st-dma-fence.c 		for (i = 0; i < ARRAY_SIZE(t); i++) {
t                 519 drivers/dma-buf/st-dma-fence.c 			t[i].fences = f;
t                 520 drivers/dma-buf/st-dma-fence.c 			t[i].id = i;
t                 521 drivers/dma-buf/st-dma-fence.c 			t[i].before = pass;
t                 522 drivers/dma-buf/st-dma-fence.c 			t[i].task = kthread_run(thread_signal_callback, &t[i],
t                 524 drivers/dma-buf/st-dma-fence.c 			get_task_struct(t[i].task);
t                 529 drivers/dma-buf/st-dma-fence.c 		for (i = 0; i < ARRAY_SIZE(t); i++) {
t                 532 drivers/dma-buf/st-dma-fence.c 			err = kthread_stop(t[i].task);
t                 536 drivers/dma-buf/st-dma-fence.c 			put_task_struct(t[i].task);
t                 201 drivers/dma/bcm2835-dma.c 		struct dma_async_tx_descriptor *t)
t                 203 drivers/dma/bcm2835-dma.c 	return container_of(t, struct bcm2835_desc, vd.tx);
t                 174 drivers/dma/img-mdc-dma.c static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
t                 176 drivers/dma/img-mdc-dma.c 	struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
t                 360 drivers/dma/imx-dma.c static void imxdma_watchdog(struct timer_list *t)
t                 362 drivers/dma/imx-dma.c 	struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
t                 235 drivers/dma/imx-sdma.c 	u32 t      : 1;
t                 746 drivers/dma/imx-sdma.c static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
t                 748 drivers/dma/imx-sdma.c 	return container_of(t, struct sdma_desc, vd.tx);
t                 870 drivers/dma/ioat/dma.c void ioat_timer_event(struct timer_list *t)
t                 872 drivers/dma/ioat/dma.c 	struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
t                 397 drivers/dma/ioat/dma.h void ioat_timer_event(struct timer_list *t);
t                 116 drivers/dma/mediatek/mtk-uart-apdma.c 	(struct dma_async_tx_descriptor *t)
t                 118 drivers/dma/mediatek/mtk-uart-apdma.c 	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
t                 176 drivers/dma/moxart-dma.c 	struct dma_async_tx_descriptor *t)
t                 178 drivers/dma/moxart-dma.c 	return container_of(t, struct moxart_desc, vd.tx);
t                 251 drivers/dma/pl330.c #define UNTIL(t, s)	while (!(_state(t) & (s))) cpu_relax();
t                 860 drivers/dma/pl330.c #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
t                  94 drivers/dma/qcom/hidma.c struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
t                  96 drivers/dma/qcom/hidma.c 	return container_of(t, struct hidma_desc, desc);
t                 166 drivers/dma/ti/edma.c #define EDMA_TCC(t)	((t) << 12)
t                 221 drivers/dma/ti/omap-dma.c static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
t                 223 drivers/dma/ti/omap-dma.c 	return container_of(t, struct omap_desc, vd.tx);
t                 861 drivers/edac/sb_edac.c static const char *get_intlv_mode_str(u32 reg, enum type t)
t                 863 drivers/edac/sb_edac.c 	if (t == KNIGHTS_LANDING)
t                  65 drivers/firewire/core-transaction.c static int try_cancel_split_timeout(struct fw_transaction *t)
t                  67 drivers/firewire/core-transaction.c 	if (t->is_split_transaction)
t                  68 drivers/firewire/core-transaction.c 		return del_timer(&t->split_timeout_timer);
t                  76 drivers/firewire/core-transaction.c 	struct fw_transaction *t;
t                  80 drivers/firewire/core-transaction.c 	list_for_each_entry(t, &card->transaction_list, link) {
t                  81 drivers/firewire/core-transaction.c 		if (t == transaction) {
t                  82 drivers/firewire/core-transaction.c 			if (!try_cancel_split_timeout(t)) {
t                  86 drivers/firewire/core-transaction.c 			list_del_init(&t->link);
t                  87 drivers/firewire/core-transaction.c 			card->tlabel_mask &= ~(1ULL << t->tlabel);
t                  93 drivers/firewire/core-transaction.c 	if (&t->link != &card->transaction_list) {
t                  94 drivers/firewire/core-transaction.c 		t->callback(card, rcode, NULL, 0, t->callback_data);
t                 129 drivers/firewire/core-transaction.c 	struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
t                 130 drivers/firewire/core-transaction.c 	struct fw_card *card = t->card;
t                 134 drivers/firewire/core-transaction.c 	if (list_empty(&t->link)) {
t                 138 drivers/firewire/core-transaction.c 	list_del(&t->link);
t                 139 drivers/firewire/core-transaction.c 	card->tlabel_mask &= ~(1ULL << t->tlabel);
t                 142 drivers/firewire/core-transaction.c 	t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
t                 145 drivers/firewire/core-transaction.c static void start_split_transaction_timeout(struct fw_transaction *t,
t                 152 drivers/firewire/core-transaction.c 	if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
t                 157 drivers/firewire/core-transaction.c 	t->is_split_transaction = true;
t                 158 drivers/firewire/core-transaction.c 	mod_timer(&t->split_timeout_timer,
t                 167 drivers/firewire/core-transaction.c 	struct fw_transaction *t =
t                 172 drivers/firewire/core-transaction.c 		close_transaction(t, card, RCODE_COMPLETE);
t                 175 drivers/firewire/core-transaction.c 		start_split_transaction_timeout(t, card);
t                 180 drivers/firewire/core-transaction.c 		close_transaction(t, card, RCODE_BUSY);
t                 183 drivers/firewire/core-transaction.c 		close_transaction(t, card, RCODE_DATA_ERROR);
t                 186 drivers/firewire/core-transaction.c 		close_transaction(t, card, RCODE_TYPE_ERROR);
t                 193 drivers/firewire/core-transaction.c 		close_transaction(t, card, status);
t                 337 drivers/firewire/core-transaction.c void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
t                 359 drivers/firewire/core-transaction.c 	t->node_id = destination_id;
t                 360 drivers/firewire/core-transaction.c 	t->tlabel = tlabel;
t                 361 drivers/firewire/core-transaction.c 	t->card = card;
t                 362 drivers/firewire/core-transaction.c 	t->is_split_transaction = false;
t                 363 drivers/firewire/core-transaction.c 	timer_setup(&t->split_timeout_timer,
t                 365 drivers/firewire/core-transaction.c 	t->callback = callback;
t                 366 drivers/firewire/core-transaction.c 	t->callback_data = callback_data;
t                 368 drivers/firewire/core-transaction.c 	fw_fill_request(&t->packet, tcode, t->tlabel,
t                 371 drivers/firewire/core-transaction.c 	t->packet.callback = transmit_complete_callback;
t                 373 drivers/firewire/core-transaction.c 	list_add_tail(&t->link, &card->transaction_list);
t                 377 drivers/firewire/core-transaction.c 	card->driver->send_request(card, &t->packet);
t                 419 drivers/firewire/core-transaction.c 	struct fw_transaction t;
t                 421 drivers/firewire/core-transaction.c 	timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
t                 424 drivers/firewire/core-transaction.c 	fw_send_request(card, &t, tcode, destination_id, generation, speed,
t                 427 drivers/firewire/core-transaction.c 	destroy_timer_on_stack(&t.split_timeout_timer);
t                 938 drivers/firewire/core-transaction.c 	struct fw_transaction *t;
t                 950 drivers/firewire/core-transaction.c 	list_for_each_entry(t, &card->transaction_list, link) {
t                 951 drivers/firewire/core-transaction.c 		if (t->node_id == source && t->tlabel == tlabel) {
t                 952 drivers/firewire/core-transaction.c 			if (!try_cancel_split_timeout(t)) {
t                 956 drivers/firewire/core-transaction.c 			list_del_init(&t->link);
t                 957 drivers/firewire/core-transaction.c 			card->tlabel_mask &= ~(1ULL << t->tlabel);
t                 963 drivers/firewire/core-transaction.c 	if (&t->link == &card->transaction_list) {
t                1003 drivers/firewire/core-transaction.c 	card->driver->cancel_packet(card, &t->packet);
t                1005 drivers/firewire/core-transaction.c 	t->callback(card, rcode, data, data_length, t->callback_data);
t                 256 drivers/firewire/sbp2.c 	struct fw_transaction t;
t                 506 drivers/firewire/sbp2.c 	fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
t                 525 drivers/firewire/sbp2.c 		if (fw_cancel_transaction(device->card, &orb->t) == 0)
t                 665 drivers/firewire/sbp2.c 	struct fw_transaction *t;
t                 668 drivers/firewire/sbp2.c 	t = kmalloc(sizeof(*t), GFP_ATOMIC);
t                 669 drivers/firewire/sbp2.c 	if (t == NULL)
t                 672 drivers/firewire/sbp2.c 	fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
t                 675 drivers/firewire/sbp2.c 			&d, 4, complete_agent_reset_write_no_wait, t);
t                  36 drivers/firmware/arm_scmi/base.c 	struct scmi_xfer *t;
t                  41 drivers/firmware/arm_scmi/base.c 				 SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
t                  45 drivers/firmware/arm_scmi/base.c 	ret = scmi_do_xfer(handle, t);
t                  47 drivers/firmware/arm_scmi/base.c 		attr_info = t->rx.buf;
t                  52 drivers/firmware/arm_scmi/base.c 	scmi_xfer_put(handle, t);
t                  71 drivers/firmware/arm_scmi/base.c 	struct scmi_xfer *t;
t                  84 drivers/firmware/arm_scmi/base.c 	ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
t                  88 drivers/firmware/arm_scmi/base.c 	ret = scmi_do_xfer(handle, t);
t                  90 drivers/firmware/arm_scmi/base.c 		memcpy(vendor_id, t->rx.buf, size);
t                  92 drivers/firmware/arm_scmi/base.c 	scmi_xfer_put(handle, t);
t                 111 drivers/firmware/arm_scmi/base.c 	struct scmi_xfer *t;
t                 115 drivers/firmware/arm_scmi/base.c 				 SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
t                 119 drivers/firmware/arm_scmi/base.c 	ret = scmi_do_xfer(handle, t);
t                 121 drivers/firmware/arm_scmi/base.c 		impl_ver = t->rx.buf;
t                 125 drivers/firmware/arm_scmi/base.c 	scmi_xfer_put(handle, t);
t                 144 drivers/firmware/arm_scmi/base.c 	struct scmi_xfer *t;
t                 150 drivers/firmware/arm_scmi/base.c 				 SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
t                 154 drivers/firmware/arm_scmi/base.c 	num_skip = t->tx.buf;
t                 155 drivers/firmware/arm_scmi/base.c 	num_ret = t->rx.buf;
t                 156 drivers/firmware/arm_scmi/base.c 	list = t->rx.buf + sizeof(*num_ret);
t                 162 drivers/firmware/arm_scmi/base.c 		ret = scmi_do_xfer(handle, t);
t                 178 drivers/firmware/arm_scmi/base.c 	scmi_xfer_put(handle, t);
t                 199 drivers/firmware/arm_scmi/base.c 	struct scmi_xfer *t;
t                 203 drivers/firmware/arm_scmi/base.c 				 SCMI_MAX_STR_SIZE, &t);
t                 207 drivers/firmware/arm_scmi/base.c 	put_unaligned_le32(id, t->tx.buf);
t                 209 drivers/firmware/arm_scmi/base.c 	ret = scmi_do_xfer(handle, t);
t                 211 drivers/firmware/arm_scmi/base.c 		strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
t                 213 drivers/firmware/arm_scmi/base.c 	scmi_xfer_put(handle, t);
t                  78 drivers/firmware/arm_scmi/clock.c 	struct scmi_xfer *t;
t                  82 drivers/firmware/arm_scmi/clock.c 				 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
t                  86 drivers/firmware/arm_scmi/clock.c 	attr = t->rx.buf;
t                  88 drivers/firmware/arm_scmi/clock.c 	ret = scmi_do_xfer(handle, t);
t                  94 drivers/firmware/arm_scmi/clock.c 	scmi_xfer_put(handle, t);
t                 102 drivers/firmware/arm_scmi/clock.c 	struct scmi_xfer *t;
t                 106 drivers/firmware/arm_scmi/clock.c 				 sizeof(clk_id), sizeof(*attr), &t);
t                 110 drivers/firmware/arm_scmi/clock.c 	put_unaligned_le32(clk_id, t->tx.buf);
t                 111 drivers/firmware/arm_scmi/clock.c 	attr = t->rx.buf;
t                 113 drivers/firmware/arm_scmi/clock.c 	ret = scmi_do_xfer(handle, t);
t                 119 drivers/firmware/arm_scmi/clock.c 	scmi_xfer_put(handle, t);
t                 132 drivers/firmware/arm_scmi/clock.c 	struct scmi_xfer *t;
t                 137 drivers/firmware/arm_scmi/clock.c 				 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
t                 141 drivers/firmware/arm_scmi/clock.c 	clk_desc = t->tx.buf;
t                 142 drivers/firmware/arm_scmi/clock.c 	rlist = t->rx.buf;
t                 149 drivers/firmware/arm_scmi/clock.c 		ret = scmi_do_xfer(handle, t);
t                 192 drivers/firmware/arm_scmi/clock.c 	scmi_xfer_put(handle, t);
t                 200 drivers/firmware/arm_scmi/clock.c 	struct scmi_xfer *t;
t                 203 drivers/firmware/arm_scmi/clock.c 				 sizeof(__le32), sizeof(u64), &t);
t                 207 drivers/firmware/arm_scmi/clock.c 	put_unaligned_le32(clk_id, t->tx.buf);
t                 209 drivers/firmware/arm_scmi/clock.c 	ret = scmi_do_xfer(handle, t);
t                 211 drivers/firmware/arm_scmi/clock.c 		*value = get_unaligned_le64(t->rx.buf);
t                 213 drivers/firmware/arm_scmi/clock.c 	scmi_xfer_put(handle, t);
t                 222 drivers/firmware/arm_scmi/clock.c 	struct scmi_xfer *t;
t                 227 drivers/firmware/arm_scmi/clock.c 				 sizeof(*cfg), 0, &t);
t                 235 drivers/firmware/arm_scmi/clock.c 	cfg = t->tx.buf;
t                 242 drivers/firmware/arm_scmi/clock.c 		ret = scmi_do_xfer_with_response(handle, t);
t                 244 drivers/firmware/arm_scmi/clock.c 		ret = scmi_do_xfer(handle, t);
t                 249 drivers/firmware/arm_scmi/clock.c 	scmi_xfer_put(handle, t);
t                 257 drivers/firmware/arm_scmi/clock.c 	struct scmi_xfer *t;
t                 261 drivers/firmware/arm_scmi/clock.c 				 sizeof(*cfg), 0, &t);
t                 265 drivers/firmware/arm_scmi/clock.c 	cfg = t->tx.buf;
t                 269 drivers/firmware/arm_scmi/clock.c 	ret = scmi_do_xfer(handle, t);
t                 271 drivers/firmware/arm_scmi/clock.c 	scmi_xfer_put(handle, t);
t                 246 drivers/firmware/arm_scmi/driver.c 	struct scmi_xfer *t = m;
t                 260 drivers/firmware/arm_scmi/driver.c 	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
t                 262 drivers/firmware/arm_scmi/driver.c 	iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
t                 263 drivers/firmware/arm_scmi/driver.c 	iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
t                 264 drivers/firmware/arm_scmi/driver.c 	if (t->tx.buf)
t                 265 drivers/firmware/arm_scmi/driver.c 		memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
t                 576 drivers/firmware/arm_scmi/driver.c 	struct scmi_xfer *t;
t                 579 drivers/firmware/arm_scmi/driver.c 				 sizeof(*version), &t);
t                 583 drivers/firmware/arm_scmi/driver.c 	ret = scmi_do_xfer(handle, t);
t                 585 drivers/firmware/arm_scmi/driver.c 		rev_info = t->rx.buf;
t                 589 drivers/firmware/arm_scmi/driver.c 	scmi_xfer_put(handle, t);
t                 159 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 163 drivers/firmware/arm_scmi/perf.c 				 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
t                 167 drivers/firmware/arm_scmi/perf.c 	attr = t->rx.buf;
t                 169 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 180 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 189 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 194 drivers/firmware/arm_scmi/perf.c 				 sizeof(*attr), &t);
t                 198 drivers/firmware/arm_scmi/perf.c 	put_unaligned_le32(domain, t->tx.buf);
t                 199 drivers/firmware/arm_scmi/perf.c 	attr = t->rx.buf;
t                 201 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 225 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 243 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 249 drivers/firmware/arm_scmi/perf.c 				 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
t                 253 drivers/firmware/arm_scmi/perf.c 	dom_info = t->tx.buf;
t                 254 drivers/firmware/arm_scmi/perf.c 	level_info = t->rx.buf;
t                 261 drivers/firmware/arm_scmi/perf.c 		ret = scmi_do_xfer(handle, t);
t                 291 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 335 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 339 drivers/firmware/arm_scmi/perf.c 				 sizeof(*limits), 0, &t);
t                 343 drivers/firmware/arm_scmi/perf.c 	limits = t->tx.buf;
t                 348 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 350 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 374 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 378 drivers/firmware/arm_scmi/perf.c 				 sizeof(__le32), 0, &t);
t                 382 drivers/firmware/arm_scmi/perf.c 	put_unaligned_le32(domain, t->tx.buf);
t                 384 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 386 drivers/firmware/arm_scmi/perf.c 		limits = t->rx.buf;
t                 392 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 415 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 419 drivers/firmware/arm_scmi/perf.c 				 sizeof(*lvl), 0, &t);
t                 423 drivers/firmware/arm_scmi/perf.c 	t->hdr.poll_completion = poll;
t                 424 drivers/firmware/arm_scmi/perf.c 	lvl = t->tx.buf;
t                 428 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 430 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 453 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 456 drivers/firmware/arm_scmi/perf.c 				 sizeof(u32), sizeof(u32), &t);
t                 460 drivers/firmware/arm_scmi/perf.c 	t->hdr.poll_completion = poll;
t                 461 drivers/firmware/arm_scmi/perf.c 	put_unaligned_le32(domain, t->tx.buf);
t                 463 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 465 drivers/firmware/arm_scmi/perf.c 		*level = get_unaligned_le32(t->rx.buf);
t                 467 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                 504 drivers/firmware/arm_scmi/perf.c 	struct scmi_xfer *t;
t                 514 drivers/firmware/arm_scmi/perf.c 				 sizeof(*info), sizeof(*resp), &t);
t                 518 drivers/firmware/arm_scmi/perf.c 	info = t->tx.buf;
t                 522 drivers/firmware/arm_scmi/perf.c 	ret = scmi_do_xfer(handle, t);
t                 526 drivers/firmware/arm_scmi/perf.c 	resp = t->rx.buf;
t                 560 drivers/firmware/arm_scmi/perf.c 	scmi_xfer_put(handle, t);
t                  63 drivers/firmware/arm_scmi/power.c 	struct scmi_xfer *t;
t                  67 drivers/firmware/arm_scmi/power.c 				 SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
t                  71 drivers/firmware/arm_scmi/power.c 	attr = t->rx.buf;
t                  73 drivers/firmware/arm_scmi/power.c 	ret = scmi_do_xfer(handle, t);
t                  81 drivers/firmware/arm_scmi/power.c 	scmi_xfer_put(handle, t);
t                  90 drivers/firmware/arm_scmi/power.c 	struct scmi_xfer *t;
t                  95 drivers/firmware/arm_scmi/power.c 				 sizeof(*attr), &t);
t                  99 drivers/firmware/arm_scmi/power.c 	put_unaligned_le32(domain, t->tx.buf);
t                 100 drivers/firmware/arm_scmi/power.c 	attr = t->rx.buf;
t                 102 drivers/firmware/arm_scmi/power.c 	ret = scmi_do_xfer(handle, t);
t                 112 drivers/firmware/arm_scmi/power.c 	scmi_xfer_put(handle, t);
t                 120 drivers/firmware/arm_scmi/power.c 	struct scmi_xfer *t;
t                 124 drivers/firmware/arm_scmi/power.c 				 sizeof(*st), 0, &t);
t                 128 drivers/firmware/arm_scmi/power.c 	st = t->tx.buf;
t                 133 drivers/firmware/arm_scmi/power.c 	ret = scmi_do_xfer(handle, t);
t                 135 drivers/firmware/arm_scmi/power.c 	scmi_xfer_put(handle, t);
t                 143 drivers/firmware/arm_scmi/power.c 	struct scmi_xfer *t;
t                 146 drivers/firmware/arm_scmi/power.c 				 sizeof(u32), sizeof(u32), &t);
t                 150 drivers/firmware/arm_scmi/power.c 	put_unaligned_le32(domain, t->tx.buf);
t                 152 drivers/firmware/arm_scmi/power.c 	ret = scmi_do_xfer(handle, t);
t                 154 drivers/firmware/arm_scmi/power.c 		*state = get_unaligned_le32(t->rx.buf);
t                 156 drivers/firmware/arm_scmi/power.c 	scmi_xfer_put(handle, t);
t                  59 drivers/firmware/arm_scmi/reset.c 	struct scmi_xfer *t;
t                  63 drivers/firmware/arm_scmi/reset.c 				 SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
t                  67 drivers/firmware/arm_scmi/reset.c 	ret = scmi_do_xfer(handle, t);
t                  69 drivers/firmware/arm_scmi/reset.c 		attr = get_unaligned_le32(t->rx.buf);
t                  73 drivers/firmware/arm_scmi/reset.c 	scmi_xfer_put(handle, t);
t                  82 drivers/firmware/arm_scmi/reset.c 	struct scmi_xfer *t;
t                  87 drivers/firmware/arm_scmi/reset.c 				 sizeof(*attr), &t);
t                  91 drivers/firmware/arm_scmi/reset.c 	put_unaligned_le32(domain, t->tx.buf);
t                  92 drivers/firmware/arm_scmi/reset.c 	attr = t->rx.buf;
t                  94 drivers/firmware/arm_scmi/reset.c 	ret = scmi_do_xfer(handle, t);
t                 106 drivers/firmware/arm_scmi/reset.c 	scmi_xfer_put(handle, t);
t                 137 drivers/firmware/arm_scmi/reset.c 	struct scmi_xfer *t;
t                 146 drivers/firmware/arm_scmi/reset.c 				 sizeof(*dom), 0, &t);
t                 150 drivers/firmware/arm_scmi/reset.c 	dom = t->tx.buf;
t                 156 drivers/firmware/arm_scmi/reset.c 		ret = scmi_do_xfer_with_response(handle, t);
t                 158 drivers/firmware/arm_scmi/reset.c 		ret = scmi_do_xfer(handle, t);
t                 160 drivers/firmware/arm_scmi/reset.c 	scmi_xfer_put(handle, t);
t                  82 drivers/firmware/arm_scmi/sensors.c 	struct scmi_xfer *t;
t                  86 drivers/firmware/arm_scmi/sensors.c 				 SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
t                  90 drivers/firmware/arm_scmi/sensors.c 	attr = t->rx.buf;
t                  92 drivers/firmware/arm_scmi/sensors.c 	ret = scmi_do_xfer(handle, t);
t                 101 drivers/firmware/arm_scmi/sensors.c 	scmi_xfer_put(handle, t);
t                 111 drivers/firmware/arm_scmi/sensors.c 	struct scmi_xfer *t;
t                 115 drivers/firmware/arm_scmi/sensors.c 				 SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
t                 119 drivers/firmware/arm_scmi/sensors.c 	buf = t->rx.buf;
t                 123 drivers/firmware/arm_scmi/sensors.c 		put_unaligned_le32(desc_index, t->tx.buf);
t                 125 drivers/firmware/arm_scmi/sensors.c 		ret = scmi_do_xfer(handle, t);
t                 163 drivers/firmware/arm_scmi/sensors.c 	scmi_xfer_put(handle, t);
t                 172 drivers/firmware/arm_scmi/sensors.c 	struct scmi_xfer *t;
t                 176 drivers/firmware/arm_scmi/sensors.c 				 SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
t                 180 drivers/firmware/arm_scmi/sensors.c 	cfg = t->tx.buf;
t                 184 drivers/firmware/arm_scmi/sensors.c 	ret = scmi_do_xfer(handle, t);
t                 186 drivers/firmware/arm_scmi/sensors.c 	scmi_xfer_put(handle, t);
t                 196 drivers/firmware/arm_scmi/sensors.c 	struct scmi_xfer *t;
t                 200 drivers/firmware/arm_scmi/sensors.c 				 SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
t                 204 drivers/firmware/arm_scmi/sensors.c 	trip = t->tx.buf;
t                 210 drivers/firmware/arm_scmi/sensors.c 	ret = scmi_do_xfer(handle, t);
t                 212 drivers/firmware/arm_scmi/sensors.c 	scmi_xfer_put(handle, t);
t                 220 drivers/firmware/arm_scmi/sensors.c 	struct scmi_xfer *t;
t                 227 drivers/firmware/arm_scmi/sensors.c 				 sizeof(u64), &t);
t                 231 drivers/firmware/arm_scmi/sensors.c 	sensor = t->tx.buf;
t                 236 drivers/firmware/arm_scmi/sensors.c 		ret = scmi_do_xfer_with_response(handle, t);
t                 239 drivers/firmware/arm_scmi/sensors.c 						    ((__le32 *)t->rx.buf + 1));
t                 242 drivers/firmware/arm_scmi/sensors.c 		ret = scmi_do_xfer(handle, t);
t                 244 drivers/firmware/arm_scmi/sensors.c 			*value = get_unaligned_le64(t->rx.buf);
t                 247 drivers/firmware/arm_scmi/sensors.c 	scmi_xfer_put(handle, t);
t                 356 drivers/firmware/arm_scpi.c 	struct scpi_xfer *t, *match = NULL;
t                 373 drivers/firmware/arm_scpi.c 		list_for_each_entry(t, &ch->rx_pending, node)
t                 374 drivers/firmware/arm_scpi.c 			if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
t                 375 drivers/firmware/arm_scpi.c 				list_del(&t->node);
t                 376 drivers/firmware/arm_scpi.c 				match = t;
t                 424 drivers/firmware/arm_scpi.c 	struct scpi_xfer *t = msg;
t                 428 drivers/firmware/arm_scpi.c 	if (t->tx_buf) {
t                 430 drivers/firmware/arm_scpi.c 			memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
t                 432 drivers/firmware/arm_scpi.c 			memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
t                 435 drivers/firmware/arm_scpi.c 	if (t->rx_buf) {
t                 438 drivers/firmware/arm_scpi.c 		t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token);
t                 440 drivers/firmware/arm_scpi.c 		list_add_tail(&t->node, &ch->rx_pending);
t                 445 drivers/firmware/arm_scpi.c 		iowrite32(t->cmd, &mem->command);
t                 450 drivers/firmware/arm_scpi.c 	struct scpi_xfer *t;
t                 457 drivers/firmware/arm_scpi.c 	t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node);
t                 458 drivers/firmware/arm_scpi.c 	list_del(&t->node);
t                 460 drivers/firmware/arm_scpi.c 	return t;
t                 463 drivers/firmware/arm_scpi.c static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
t                 466 drivers/firmware/arm_scpi.c 	list_add_tail(&t->node, &ch->xfers_list);
t                 102 drivers/firmware/dmi-id.c 		char *t;
t                 108 drivers/firmware/dmi-id.c 		t = kmalloc(strlen(c) + 1, GFP_KERNEL);
t                 109 drivers/firmware/dmi-id.c 		if (!t)
t                 111 drivers/firmware/dmi-id.c 		ascii_filter(t, c);
t                 112 drivers/firmware/dmi-id.c 		l = scnprintf(p, left, ":%s%s", f->prefix, t);
t                 113 drivers/firmware/dmi-id.c 		kfree(t);
t                 230 drivers/firmware/iscsi_ibft.c static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
t                 235 drivers/firmware/iscsi_ibft.c 				"found %d instead!\n", t, id, hdr->id);
t                 241 drivers/firmware/iscsi_ibft.c 				"found %d instead!\n", t, length, hdr->length);
t                 305 drivers/firmware/tegra/bpmp-debugfs.c 	uint32_t d, t;
t                 323 drivers/firmware/tegra/bpmp-debugfs.c 		err = seqbuf_read_u32(seqbuf, &t);
t                 330 drivers/firmware/tegra/bpmp-debugfs.c 		if (t & DEBUGFS_S_ISDIR) {
t                 340 drivers/firmware/tegra/bpmp-debugfs.c 			mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
t                 341 drivers/firmware/tegra/bpmp-debugfs.c 			mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
t                 247 drivers/gpio/gpio-aspeed.c #define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
t                 248 drivers/gpio/gpio-aspeed.c #define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
t                 249 drivers/gpio/gpio-aspeed.c #define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
t                  33 drivers/gpio/gpio-tegra.c #define GPIO_CNF(t, x)		(GPIO_REG(t, x) + 0x00)
t                  34 drivers/gpio/gpio-tegra.c #define GPIO_OE(t, x)		(GPIO_REG(t, x) + 0x10)
t                  35 drivers/gpio/gpio-tegra.c #define GPIO_OUT(t, x)		(GPIO_REG(t, x) + 0X20)
t                  36 drivers/gpio/gpio-tegra.c #define GPIO_IN(t, x)		(GPIO_REG(t, x) + 0x30)
t                  37 drivers/gpio/gpio-tegra.c #define GPIO_INT_STA(t, x)	(GPIO_REG(t, x) + 0x40)
t                  38 drivers/gpio/gpio-tegra.c #define GPIO_INT_ENB(t, x)	(GPIO_REG(t, x) + 0x50)
t                  39 drivers/gpio/gpio-tegra.c #define GPIO_INT_LVL(t, x)	(GPIO_REG(t, x) + 0x60)
t                  40 drivers/gpio/gpio-tegra.c #define GPIO_INT_CLR(t, x)	(GPIO_REG(t, x) + 0x70)
t                  41 drivers/gpio/gpio-tegra.c #define GPIO_DBC_CNT(t, x)	(GPIO_REG(t, x) + 0xF0)
t                  44 drivers/gpio/gpio-tegra.c #define GPIO_MSK_CNF(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x00)
t                  45 drivers/gpio/gpio-tegra.c #define GPIO_MSK_OE(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x10)
t                  46 drivers/gpio/gpio-tegra.c #define GPIO_MSK_OUT(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0X20)
t                  47 drivers/gpio/gpio-tegra.c #define GPIO_MSK_DBC_EN(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x30)
t                  48 drivers/gpio/gpio-tegra.c #define GPIO_MSK_INT_STA(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x40)
t                  49 drivers/gpio/gpio-tegra.c #define GPIO_MSK_INT_ENB(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x50)
t                  50 drivers/gpio/gpio-tegra.c #define GPIO_MSK_INT_LVL(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x60)
t                 615 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c 	struct drm_display_mode *t, *mode;
t                 618 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c 	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
t                 628 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c 		list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
t                 289 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c static void amdgpu_fence_fallback(struct timer_list *t)
t                 291 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_ring *ring = from_timer(ring, t,
t                 224 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	unsigned t;
t                 236 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	t = offset / AMDGPU_GPU_PAGE_SIZE;
t                 237 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
t                 246 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
t                 248 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 					       t, page_base, flags);
t                 278 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	unsigned i, j, t;
t                 285 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	t = offset / AMDGPU_GPU_PAGE_SIZE;
t                 289 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
t                 290 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 			amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
t                 315 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	unsigned t,p;
t                 325 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	t = offset / AMDGPU_GPU_PAGE_SIZE;
t                 326 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
t                 234 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
t                 283 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c 	signed long t;
t                 322 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c 			t = dma_fence_wait_any_timeout(fences, count, false,
t                 328 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c 			r = (t > 0) ? 0 : t;
t                  28 drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h #define AMDGIM_ERROR_CODE(t,c)       (((t&0xF)<<12)|(c&0xFFF))
t                 399 drivers/gpu/drm/amd/amdgpu/kv_dpm.c 			data |= ((values->t << local_cac_reg->t_shift) &
t                  81 drivers/gpu/drm/amd/amdgpu/kv_dpm.h 	u32 t;
t                1864 drivers/gpu/drm/amd/amdgpu/si_dpm.c 						     u16 v, s32 t, u32 ileakage, u32 *leakage)
t                1872 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	temperature = div64_s64(drm_int2fixp(t), 1000);
t                1893 drivers/gpu/drm/amd/amdgpu/si_dpm.c 					     s32 t,
t                1897 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
t                2684 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	s32 t;
t                2692 drivers/gpu/drm/amd/amdgpu/si_dpm.c 		t = (1000 * (i * t_step + t0));
t                2700 drivers/gpu/drm/amd/amdgpu/si_dpm.c 							 t,
t                3358 drivers/gpu/drm/amd/amdgpu/si_dpm.c static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
t                3367 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	t1 = (t * (k - 100));
t                3370 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	ah = ((a * t) + 5000) / 10000;
t                3373 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	*th = t - ah;
t                3374 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	*tl = t + al;
t                1917 drivers/gpu/drm/amd/amdgpu/sid.h #define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
t                1919 drivers/gpu/drm/amd/amdgpu/sid.h 					 (((t) & 0x1) << 23) |		\
t                 122 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c 	enum transmitter t)
t                 124 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c 	switch (t) {
t                  55 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h 	enum transmitter t);
t                 145 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c 	enum transmitter t)
t                 147 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c 	switch (t) {
t                  50 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h 	enum transmitter t);
t                  51 drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h 	uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
t                  58 drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h 	uint8_t (*phy_id_to_atom)(enum transmitter t);
t                  34 drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c static uint8_t phy_id_to_atom(enum transmitter t)
t                  38 drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c 	switch (t) {
t                  34 drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c static uint8_t phy_id_to_atom(enum transmitter t)
t                  38 drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c 	switch (t) {
t                  34 drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c static uint8_t phy_id_to_atom(enum transmitter t)
t                  38 drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c 	switch (t) {
t                 273 drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c static uint8_t phy_id_to_atom(enum transmitter t)
t                 277 drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c 	switch (t) {
t                 819 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	struct fixed31_32 t;
t                 847 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 			t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
t                 850 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 			t = dc_fixpt_zero;
t                 855 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 		t2 = dc_fixpt_mul(t, t);
t                 856 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 		t3 = dc_fixpt_mul(t2, t);
t                 873 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 				dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
t                 433 drivers/gpu/drm/drm_connector.c 	struct drm_display_mode *mode, *t;
t                 447 drivers/gpu/drm/drm_connector.c 	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
t                 450 drivers/gpu/drm/drm_connector.c 	list_for_each_entry_safe(mode, t, &connector->modes, head)
t                1862 drivers/gpu/drm/drm_edid.c #define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
t                1875 drivers/gpu/drm/drm_edid.c 	struct drm_display_mode *t, *cur_mode, *preferred_mode;
t                1890 drivers/gpu/drm/drm_edid.c 	list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
t                2015 drivers/gpu/drm/drm_edid.c is_rb(struct detailed_timing *t, void *data)
t                2017 drivers/gpu/drm/drm_edid.c 	u8 *r = (u8 *)t;
t                2037 drivers/gpu/drm/drm_edid.c find_gtf2(struct detailed_timing *t, void *data)
t                2039 drivers/gpu/drm/drm_edid.c 	u8 *r = (u8 *)t;
t                2124 drivers/gpu/drm/drm_edid.c 	     struct std_timing *t)
t                2130 drivers/gpu/drm/drm_edid.c 	unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
t                2132 drivers/gpu/drm/drm_edid.c 	unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
t                2136 drivers/gpu/drm/drm_edid.c 	if (bad_std_timing(t->hsize, t->vfreq_aspect))
t                2140 drivers/gpu/drm/drm_edid.c 	hsize = t->hsize * 8 + 248;
t                2386 drivers/gpu/drm/drm_edid.c 		    struct edid *edid, u8 *t)
t                2390 drivers/gpu/drm/drm_edid.c 	hmin = t[7];
t                2392 drivers/gpu/drm/drm_edid.c 	    hmin += ((t[4] & 0x04) ? 255 : 0);
t                2393 drivers/gpu/drm/drm_edid.c 	hmax = t[8];
t                2395 drivers/gpu/drm/drm_edid.c 	    hmax += ((t[4] & 0x08) ? 255 : 0);
t                2403 drivers/gpu/drm/drm_edid.c 		    struct edid *edid, u8 *t)
t                2407 drivers/gpu/drm/drm_edid.c 	vmin = t[5];
t                2409 drivers/gpu/drm/drm_edid.c 	    vmin += ((t[4] & 0x01) ? 255 : 0);
t                2410 drivers/gpu/drm/drm_edid.c 	vmax = t[6];
t                2412 drivers/gpu/drm/drm_edid.c 	    vmax += ((t[4] & 0x02) ? 255 : 0);
t                2419 drivers/gpu/drm/drm_edid.c range_pixel_clock(struct edid *edid, u8 *t)
t                2422 drivers/gpu/drm/drm_edid.c 	if (t[9] == 0 || t[9] == 255)
t                2426 drivers/gpu/drm/drm_edid.c 	if (edid->revision >= 4 && t[10] == 0x04)
t                2427 drivers/gpu/drm/drm_edid.c 		return (t[9] * 10000) - ((t[12] >> 2) * 250);
t                2430 drivers/gpu/drm/drm_edid.c 	return t[9] * 10000 + 5001;
t                2438 drivers/gpu/drm/drm_edid.c 	u8 *t = (u8 *)timing;
t                2440 drivers/gpu/drm/drm_edid.c 	if (!mode_in_hsync_range(mode, edid, t))
t                2443 drivers/gpu/drm/drm_edid.c 	if (!mode_in_vsync_range(mode, edid, t))
t                2446 drivers/gpu/drm/drm_edid.c 	if ((max_clock = range_pixel_clock(edid, t)))
t                2451 drivers/gpu/drm/drm_edid.c 	if (edid->revision >= 4 && t[10] == 0x04)
t                2452 drivers/gpu/drm/drm_edid.c 		if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
t                4018 drivers/gpu/drm/drm_edid.c monitor_name(struct detailed_timing *t, void *data)
t                4020 drivers/gpu/drm/drm_edid.c 	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
t                4021 drivers/gpu/drm/drm_edid.c 		*(u8 **)data = t->data.other_data.data.str.str;
t                1292 drivers/gpu/drm/drm_modes.c 	struct drm_display_mode *mode, *t;
t                1294 drivers/gpu/drm/drm_modes.c 	list_for_each_entry_safe(mode, t, mode_list, head) {
t                 395 drivers/gpu/drm/drm_vblank.c static void vblank_disable_fn(struct timer_list *t)
t                 397 drivers/gpu/drm/drm_vblank.c 	struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
t                1137 drivers/gpu/drm/drm_vblank.c 	struct drm_pending_vblank_event *e, *t;
t                1172 drivers/gpu/drm/drm_vblank.c 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
t                1712 drivers/gpu/drm/drm_vblank.c 	struct drm_pending_vblank_event *e, *t;
t                1720 drivers/gpu/drm/drm_vblank.c 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
t                 285 drivers/gpu/drm/etnaviv/etnaviv_drv.c #define TS(t) ((struct timespec){ \
t                 286 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	.tv_sec = (t).tv_sec, \
t                 287 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	.tv_nsec = (t).tv_nsec \
t                 158 drivers/gpu/drm/exynos/exynos_drm_vidi.c static void vidi_fake_vblank_timer(struct timer_list *t)
t                 160 drivers/gpu/drm/exynos/exynos_drm_vidi.c 	struct vidi_context *ctx = from_timer(ctx, t, timer);
t                  74 drivers/gpu/drm/gma500/accel_2d.c 	unsigned long t = jiffies + HZ;
t                  78 drivers/gpu/drm/gma500/accel_2d.c 		if (time_after(jiffies, t)) {
t                  56 drivers/gpu/drm/gma500/mdfld_dsi_output.h 	int t = 100000;
t                  59 drivers/gpu/drm/gma500/mdfld_dsi_output.h 		if (--t == 0)
t                  14 drivers/gpu/drm/gma500/psb_lid.c static void psb_lid_timer_func(struct timer_list *t)
t                  16 drivers/gpu/drm/gma500/psb_lid.c 	struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
t                 748 drivers/gpu/drm/i2c/tda998x_drv.c static void tda998x_edid_delay_done(struct timer_list *t)
t                 750 drivers/gpu/drm/i2c/tda998x_drv.c 	struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
t                  35 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct igt_live_test t;
t                  97 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
t                 143 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = igt_live_test_end(&t);
t                 370 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		struct igt_live_test t;
t                 387 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
t                 445 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		if (igt_live_test_end(&t))
t                 465 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct igt_live_test t;
t                 494 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = igt_live_test_begin(&t, i915, __func__, "");
t                 570 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (igt_live_test_end(&t))
t                1047 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct igt_live_test t;
t                1066 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = igt_live_test_begin(&t, i915, __func__, "");
t                1141 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (igt_live_test_end(&t))
t                1370 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct igt_live_test t;
t                1392 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = igt_live_test_begin(&t, i915, __func__, "");
t                1459 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (igt_live_test_end(&t))
t                1052 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		struct tasklet_struct *t = &engine->execlists.tasklet;
t                1057 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		if (tasklet_trylock(t)) {
t                1059 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			if (__tasklet_is_enabled(t))
t                1060 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				t->func(t->data);
t                1061 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			tasklet_unlock(t);
t                1066 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		tasklet_unlock_wait(t);
t                 598 drivers/gpu/drm/i915/gt/intel_mocs.c 	struct drm_i915_mocs_table t;
t                 605 drivers/gpu/drm/i915/gt/intel_mocs.c 	if (get_mocs_settings(rq->engine->gt, &t)) {
t                 607 drivers/gpu/drm/i915/gt/intel_mocs.c 		ret = emit_mocs_control_table(rq, &t);
t                 612 drivers/gpu/drm/i915/gt/intel_mocs.c 		ret = emit_mocs_l3cc_table(rq, &t);
t                  82 drivers/gpu/drm/i915/gt/mock_engine.c static void hw_delay_complete(struct timer_list *t)
t                  84 drivers/gpu/drm/i915/gt/mock_engine.c 	struct mock_engine *engine = from_timer(engine, t, hw_delay);
t                1614 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct tasklet_struct * const t = &engine->execlists.tasklet;
t                1620 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	tasklet_disable_nosync(t);
t                1626 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	tasklet_enable(t);
t                 358 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct igt_live_test t;
t                 367 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
t                 461 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_end(&t)) {
t                 540 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct igt_live_test t;
t                 546 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
t                 587 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_end(&t)) {
t                 643 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct igt_live_test t;
t                 649 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
t                 693 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_end(&t)) {
t                1175 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct igt_live_test t;
t                1202 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
t                1266 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_live_test_end(&t)) {
t                1596 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct igt_live_test t;
t                1630 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
t                1652 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (igt_live_test_end(&t))
t                1683 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct igt_live_test t;
t                1715 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
t                1775 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = igt_live_test_end(&t);
t                1856 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct igt_live_test t;
t                1879 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
t                1924 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = igt_live_test_end(&t);
t                 147 drivers/gpu/drm/i915/gvt/gtt.c #define gtt_init_entry(e, t, p, v) do { \
t                 148 drivers/gpu/drm/i915/gvt/gtt.c 	(e)->type = t; \
t                  53 drivers/gpu/drm/i915/gvt/gvt.c 	struct intel_vgpu_type *t;
t                  58 drivers/gpu/drm/i915/gvt/gvt.c 		t = &gvt->types[i];
t                  59 drivers/gpu/drm/i915/gvt/gvt.c 		if (!strncmp(t->name, name + strlen(driver_name) + 1,
t                  60 drivers/gpu/drm/i915/gvt/gvt.c 			sizeof(t->name)))
t                  61 drivers/gpu/drm/i915/gvt/gvt.c 			return t;
t                 863 drivers/gpu/drm/i915/gvt/handlers.c 		u8 t)
t                 865 drivers/gpu/drm/i915/gvt/handlers.c 	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
t                 871 drivers/gpu/drm/i915/gvt/handlers.c 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
t                 883 drivers/gpu/drm/i915/gvt/handlers.c 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
t                 946 drivers/gpu/drm/i915/gvt/handlers.c 		int t;
t                 975 drivers/gpu/drm/i915/gvt/handlers.c 		for (t = 0; t < 4; t++) {
t                 976 drivers/gpu/drm/i915/gvt/handlers.c 			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
t                 978 drivers/gpu/drm/i915/gvt/handlers.c 			buf[t * 4] = (r >> 24) & 0xff;
t                 979 drivers/gpu/drm/i915/gvt/handlers.c 			buf[t * 4 + 1] = (r >> 16) & 0xff;
t                 980 drivers/gpu/drm/i915/gvt/handlers.c 			buf[t * 4 + 2] = (r >> 8) & 0xff;
t                 981 drivers/gpu/drm/i915/gvt/handlers.c 			buf[t * 4 + 3] = r & 0xff;
t                 986 drivers/gpu/drm/i915/gvt/handlers.c 			for (t = 0; t <= len; t++) {
t                 987 drivers/gpu/drm/i915/gvt/handlers.c 				int p = addr + t;
t                 989 drivers/gpu/drm/i915/gvt/handlers.c 				dpcd->data[p] = buf[t];
t                 993 drivers/gpu/drm/i915/gvt/handlers.c 							buf[t]);
t                1045 drivers/gpu/drm/i915/gvt/handlers.c 				int t;
t                1047 drivers/gpu/drm/i915/gvt/handlers.c 				t = dpcd->data[addr + i - 1];
t                1048 drivers/gpu/drm/i915/gvt/handlers.c 				t <<= (24 - 8 * (i % 4));
t                1049 drivers/gpu/drm/i915/gvt/handlers.c 				ret |= t;
t                  80 drivers/gpu/drm/i915/i915_gem.h static inline void tasklet_lock(struct tasklet_struct *t)
t                  82 drivers/gpu/drm/i915/i915_gem.h 	while (!tasklet_trylock(t))
t                  86 drivers/gpu/drm/i915/i915_gem.h static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
t                  88 drivers/gpu/drm/i915/i915_gem.h 	if (!atomic_fetch_inc(&t->count))
t                  89 drivers/gpu/drm/i915/i915_gem.h 		tasklet_unlock_wait(t);
t                  92 drivers/gpu/drm/i915/i915_gem.h static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
t                  94 drivers/gpu/drm/i915/i915_gem.h 	return !atomic_read(&t->count);
t                  97 drivers/gpu/drm/i915/i915_gem.h static inline bool __tasklet_enable(struct tasklet_struct *t)
t                  99 drivers/gpu/drm/i915/i915_gem.h 	return atomic_dec_and_test(&t->count);
t                 102 drivers/gpu/drm/i915/i915_gem.h static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
t                 104 drivers/gpu/drm/i915/i915_gem.h 	return test_bit(TASKLET_STATE_SCHED, &t->state);
t                4291 drivers/gpu/drm/i915/i915_reg.h #define   EDP_MAX_SU_DISABLE_TIME(t)	((t) << 20)
t                1304 drivers/gpu/drm/i915/i915_request.c 	unsigned long t;
t                1319 drivers/gpu/drm/i915/i915_request.c 	t = local_clock() >> 10;
t                1322 drivers/gpu/drm/i915/i915_request.c 	return t;
t                 129 drivers/gpu/drm/i915/i915_selftest.h #define igt_timeout(t, fmt, ...) \
t                 130 drivers/gpu/drm/i915/i915_selftest.h 	__igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
t                 390 drivers/gpu/drm/i915/i915_sw_fence.c static void timer_i915_sw_fence_wake(struct timer_list *t)
t                 392 drivers/gpu/drm/i915/i915_sw_fence.c 	struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
t                8093 drivers/gpu/drm/i915/intel_pm.c 	u16 t;
t                8140 drivers/gpu/drm/i915/intel_pm.c 		    cparams[i].t == dev_priv->ips.r_t) {
t                8262 drivers/gpu/drm/i915/intel_pm.c 	unsigned long t, corr, state1, corr2, state2;
t                8273 drivers/gpu/drm/i915/intel_pm.c 	t = i915_mch_val(dev_priv);
t                8278 drivers/gpu/drm/i915/intel_pm.c 	if (t > 80)
t                8279 drivers/gpu/drm/i915/intel_pm.c 		corr = ((t * 2349) + 135940);
t                8280 drivers/gpu/drm/i915/intel_pm.c 	else if (t >= 50)
t                8281 drivers/gpu/drm/i915/intel_pm.c 		corr = ((t * 964) + 29317);
t                8283 drivers/gpu/drm/i915/intel_pm.c 		corr = ((t * 301) + 1004);
t                 116 drivers/gpu/drm/i915/intel_wakeref.c static void wakeref_auto_timeout(struct timer_list *t)
t                 118 drivers/gpu/drm/i915/intel_wakeref.c 	struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
t                 284 drivers/gpu/drm/i915/selftests/i915_request.c 	struct smoketest *t = arg;
t                 285 drivers/gpu/drm/i915/selftests/i915_request.c 	struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
t                 286 drivers/gpu/drm/i915/selftests/i915_request.c 	const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
t                 287 drivers/gpu/drm/i915/selftests/i915_request.c 	const unsigned int total = 4 * t->ncontexts + 1;
t                 336 drivers/gpu/drm/i915/selftests/i915_request.c 				t->contexts[order[n] % t->ncontexts];
t                 342 drivers/gpu/drm/i915/selftests/i915_request.c 			ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
t                 344 drivers/gpu/drm/i915/selftests/i915_request.c 			rq = t->request_alloc(ce);
t                 386 drivers/gpu/drm/i915/selftests/i915_request.c 			       t->engine->name);
t                 389 drivers/gpu/drm/i915/selftests/i915_request.c 			intel_gt_set_wedged(t->engine->gt);
t                 420 drivers/gpu/drm/i915/selftests/i915_request.c 	atomic_long_add(num_fences, &t->num_fences);
t                 421 drivers/gpu/drm/i915/selftests/i915_request.c 	atomic_long_add(num_waits, &t->num_waits);
t                 432 drivers/gpu/drm/i915/selftests/i915_request.c 	struct smoketest t = {
t                 453 drivers/gpu/drm/i915/selftests/i915_request.c 	t.contexts =
t                 454 drivers/gpu/drm/i915/selftests/i915_request.c 		kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
t                 455 drivers/gpu/drm/i915/selftests/i915_request.c 	if (!t.contexts) {
t                 460 drivers/gpu/drm/i915/selftests/i915_request.c 	mutex_lock(&t.engine->i915->drm.struct_mutex);
t                 461 drivers/gpu/drm/i915/selftests/i915_request.c 	for (n = 0; n < t.ncontexts; n++) {
t                 462 drivers/gpu/drm/i915/selftests/i915_request.c 		t.contexts[n] = mock_context(t.engine->i915, "mock");
t                 463 drivers/gpu/drm/i915/selftests/i915_request.c 		if (!t.contexts[n]) {
t                 468 drivers/gpu/drm/i915/selftests/i915_request.c 	mutex_unlock(&t.engine->i915->drm.struct_mutex);
t                 472 drivers/gpu/drm/i915/selftests/i915_request.c 					 &t, "igt/%d", n);
t                 494 drivers/gpu/drm/i915/selftests/i915_request.c 		atomic_long_read(&t.num_waits),
t                 495 drivers/gpu/drm/i915/selftests/i915_request.c 		atomic_long_read(&t.num_fences),
t                 498 drivers/gpu/drm/i915/selftests/i915_request.c 	mutex_lock(&t.engine->i915->drm.struct_mutex);
t                 500 drivers/gpu/drm/i915/selftests/i915_request.c 	for (n = 0; n < t.ncontexts; n++) {
t                 501 drivers/gpu/drm/i915/selftests/i915_request.c 		if (!t.contexts[n])
t                 503 drivers/gpu/drm/i915/selftests/i915_request.c 		mock_context_close(t.contexts[n]);
t                 505 drivers/gpu/drm/i915/selftests/i915_request.c 	mutex_unlock(&t.engine->i915->drm.struct_mutex);
t                 506 drivers/gpu/drm/i915/selftests/i915_request.c 	kfree(t.contexts);
t                 543 drivers/gpu/drm/i915/selftests/i915_request.c 	struct igt_live_test t;
t                 561 drivers/gpu/drm/i915/selftests/i915_request.c 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
t                 600 drivers/gpu/drm/i915/selftests/i915_request.c 		err = igt_live_test_end(&t);
t                 685 drivers/gpu/drm/i915/selftests/i915_request.c 	struct igt_live_test t;
t                 710 drivers/gpu/drm/i915/selftests/i915_request.c 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
t                 742 drivers/gpu/drm/i915/selftests/i915_request.c 		err = igt_live_test_end(&t);
t                 838 drivers/gpu/drm/i915/selftests/i915_request.c 	struct igt_live_test t;
t                 851 drivers/gpu/drm/i915/selftests/i915_request.c 	err = igt_live_test_begin(&t, i915, __func__, "");
t                 921 drivers/gpu/drm/i915/selftests/i915_request.c 	err = igt_live_test_end(&t);
t                 942 drivers/gpu/drm/i915/selftests/i915_request.c 	struct igt_live_test t;
t                 955 drivers/gpu/drm/i915/selftests/i915_request.c 	err = igt_live_test_begin(&t, i915, __func__, "");
t                1038 drivers/gpu/drm/i915/selftests/i915_request.c 	err = igt_live_test_end(&t);
t                1105 drivers/gpu/drm/i915/selftests/i915_request.c 	struct smoketest t[I915_NUM_ENGINES];
t                1141 drivers/gpu/drm/i915/selftests/i915_request.c 	memset(&t[0], 0, sizeof(t[0]));
t                1142 drivers/gpu/drm/i915/selftests/i915_request.c 	t[0].request_alloc = __live_request_alloc;
t                1143 drivers/gpu/drm/i915/selftests/i915_request.c 	t[0].ncontexts = 64;
t                1144 drivers/gpu/drm/i915/selftests/i915_request.c 	t[0].contexts = kmalloc_array(t[0].ncontexts,
t                1145 drivers/gpu/drm/i915/selftests/i915_request.c 				      sizeof(*t[0].contexts),
t                1147 drivers/gpu/drm/i915/selftests/i915_request.c 	if (!t[0].contexts) {
t                1153 drivers/gpu/drm/i915/selftests/i915_request.c 	for (n = 0; n < t[0].ncontexts; n++) {
t                1154 drivers/gpu/drm/i915/selftests/i915_request.c 		t[0].contexts[n] = live_context(i915, file);
t                1155 drivers/gpu/drm/i915/selftests/i915_request.c 		if (!t[0].contexts[n]) {
t                1166 drivers/gpu/drm/i915/selftests/i915_request.c 		t[id] = t[0];
t                1167 drivers/gpu/drm/i915/selftests/i915_request.c 		t[id].engine = engine;
t                1168 drivers/gpu/drm/i915/selftests/i915_request.c 		t[id].max_batch = max_batches(t[0].contexts[0], engine);
t                1169 drivers/gpu/drm/i915/selftests/i915_request.c 		if (t[id].max_batch < 0) {
t                1170 drivers/gpu/drm/i915/selftests/i915_request.c 			ret = t[id].max_batch;
t                1175 drivers/gpu/drm/i915/selftests/i915_request.c 		t[id].max_batch /= num_online_cpus() + 1;
t                1177 drivers/gpu/drm/i915/selftests/i915_request.c 			 t[id].max_batch, engine->name);
t                1183 drivers/gpu/drm/i915/selftests/i915_request.c 					  &t[id], "igt/%d.%d", id, n);
t                1216 drivers/gpu/drm/i915/selftests/i915_request.c 		num_waits += atomic_long_read(&t[id].num_waits);
t                1217 drivers/gpu/drm/i915/selftests/i915_request.c 		num_fences += atomic_long_read(&t[id].num_fences);
t                1226 drivers/gpu/drm/i915/selftests/i915_request.c 	kfree(t[0].contexts);
t                 500 drivers/gpu/drm/i915/selftests/i915_vma.c 	}, *t;
t                 512 drivers/gpu/drm/i915/selftests/i915_vma.c 	for (t = types; *t; t++) {
t                 523 drivers/gpu/drm/i915/selftests/i915_vma.c 			view.type = *t;
t                 873 drivers/gpu/drm/i915/selftests/i915_vma.c 	}, *t;
t                 886 drivers/gpu/drm/i915/selftests/i915_vma.c 	for (t = types; *t; t++) {
t                 889 drivers/gpu/drm/i915/selftests/i915_vma.c 				.type = *t,
t                 909 drivers/gpu/drm/i915/selftests/i915_vma.c 			GEM_BUG_ON(vma->ggtt_view.type != *t);
t                 923 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (*t == I915_GGTT_VIEW_ROTATED)
t                 955 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (*t == I915_GGTT_VIEW_ROTATED)
t                 964 drivers/gpu/drm/i915/selftests/i915_vma.c 						       *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
t                  13 drivers/gpu/drm/i915/selftests/igt_live_test.c int igt_live_test_begin(struct igt_live_test *t,
t                  24 drivers/gpu/drm/i915/selftests/igt_live_test.c 	t->i915 = i915;
t                  25 drivers/gpu/drm/i915/selftests/igt_live_test.c 	t->func = func;
t                  26 drivers/gpu/drm/i915/selftests/igt_live_test.c 	t->name = name;
t                  38 drivers/gpu/drm/i915/selftests/igt_live_test.c 	t->reset_global = i915_reset_count(&i915->gpu_error);
t                  41 drivers/gpu/drm/i915/selftests/igt_live_test.c 		t->reset_engine[id] =
t                  47 drivers/gpu/drm/i915/selftests/igt_live_test.c int igt_live_test_end(struct igt_live_test *t)
t                  49 drivers/gpu/drm/i915/selftests/igt_live_test.c 	struct drm_i915_private *i915 = t->i915;
t                  58 drivers/gpu/drm/i915/selftests/igt_live_test.c 	if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
t                  60 drivers/gpu/drm/i915/selftests/igt_live_test.c 		       t->func, t->name,
t                  61 drivers/gpu/drm/i915/selftests/igt_live_test.c 		       i915_reset_count(&i915->gpu_error) - t->reset_global);
t                  66 drivers/gpu/drm/i915/selftests/igt_live_test.c 		if (t->reset_engine[id] ==
t                  71 drivers/gpu/drm/i915/selftests/igt_live_test.c 		       t->func, t->name, engine->name,
t                  73 drivers/gpu/drm/i915/selftests/igt_live_test.c 		       t->reset_engine[id]);
t                  29 drivers/gpu/drm/i915/selftests/igt_live_test.h int igt_live_test_begin(struct igt_live_test *t,
t                  33 drivers/gpu/drm/i915/selftests/igt_live_test.h int igt_live_test_end(struct igt_live_test *t);
t                  56 drivers/gpu/drm/i915/selftests/lib_sw_fence.c static void timed_fence_wake(struct timer_list *t)
t                  58 drivers/gpu/drm/i915/selftests/lib_sw_fence.c 	struct timed_fence *tf = from_timer(tf, t, timer);
t                 510 drivers/gpu/drm/mediatek/mtk_dsi.c static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
t                 515 drivers/gpu/drm/mediatek/mtk_dsi.c 	if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
t                  76 drivers/gpu/drm/msm/adreno/a5xx_preempt.c static void a5xx_preempt_timer(struct timer_list *t)
t                  78 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
t                1729 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c static void dpu_encoder_vsync_event_handler(struct timer_list *t)
t                1731 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
t                2131 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c static void dpu_encoder_frame_done_timeout(struct timer_list *t)
t                2133 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
t                  70 drivers/gpu/drm/msm/msm_atomic.c static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
t                  72 drivers/gpu/drm/msm/msm_atomic.c 	struct msm_pending_timer *timer = container_of(t,
t                 518 drivers/gpu/drm/msm/msm_gpu.c static void hangcheck_handler(struct timer_list *t)
t                 520 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
t                  87 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_fill(t,s,o,a,d,c) do {                                            \
t                  89 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h 	u##t __iomem *_m = nvkm_kmap(o);                                       \
t                  93 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h 				iowrite##t##_native(_d, &_m[_o++]);            \
t                  99 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h 			nvkm_wo##t((o), _a, _d);                               \
t                 260 drivers/gpu/drm/nouveau/nouveau_fence.c 	unsigned long t = jiffies, timeout = t + wait;
t                 265 drivers/gpu/drm/nouveau/nouveau_fence.c 		t = jiffies;
t                 267 drivers/gpu/drm/nouveau/nouveau_fence.c 		if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
t                 287 drivers/gpu/drm/nouveau/nouveau_fence.c 	return timeout - t;
t                 188 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c 	const u16 t = outp->info.hasht;
t                 190 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c 	u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
t                 192 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c 		OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
t                 760 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c 			const u32 t = timeslice_mode;
t                 762 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c 			mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
t                 763 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c 			mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
t                 268 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c 			const u32 t = timeslice_mode;
t                 272 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c 			mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
t                 273 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c 			mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
t                  41 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h #define GPC_UNIT(t, r)    (0x500000 + (t) * 0x8000 + (r))
t                  42 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h #define PPC_UNIT(t, m, r) (0x503000 + (t) * 0x8000 + (m) * 0x200 + (r))
t                  43 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h #define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
t                 346 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c #define T(t) cfg->timing_10_##t
t                  71 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c #define T(t) cfg->timing_10_##t
t                 485 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 	struct ls_ucode_img *img, *t;
t                 584 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 	list_for_each_entry_safe(img, t, &imgs, node) {
t                 474 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	unsigned long t;
t                 477 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	r = kstrtoul(buf, 0, &t);
t                 486 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 		if (t)
t                 507 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	unsigned int t;
t                 510 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	t = ddata->ulps_enabled;
t                 513 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	return snprintf(buf, PAGE_SIZE, "%u\n", t);
t                 522 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	unsigned long t;
t                 525 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	r = kstrtoul(buf, 0, &t);
t                 530 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	ddata->ulps_timeout = t;
t                 552 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	unsigned int t;
t                 555 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	t = ddata->ulps_timeout;
t                 558 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c 	return snprintf(buf, PAGE_SIZE, "%u\n", t);
t                3216 drivers/gpu/drm/omapdrm/dss/dispc.c 	struct videomode t = *vm;
t                3218 drivers/gpu/drm/omapdrm/dss/dispc.c 	DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
t                3220 drivers/gpu/drm/omapdrm/dss/dispc.c 	if (dispc_mgr_check_timings(dispc, channel, &t)) {
t                3226 drivers/gpu/drm/omapdrm/dss/dispc.c 		_dispc_mgr_set_lcd_timings(dispc, channel, &t);
t                3228 drivers/gpu/drm/omapdrm/dss/dispc.c 		xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
t                3229 drivers/gpu/drm/omapdrm/dss/dispc.c 		ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
t                3236 drivers/gpu/drm/omapdrm/dss/dispc.c 			t.hsync_len, t.hfront_porch, t.hback_porch,
t                3237 drivers/gpu/drm/omapdrm/dss/dispc.c 			t.vsync_len, t.vfront_porch, t.vback_porch);
t                3239 drivers/gpu/drm/omapdrm/dss/dispc.c 			vm_flag_to_int(t.flags, DISPLAY_FLAGS_VSYNC_HIGH, DISPLAY_FLAGS_VSYNC_LOW),
t                3240 drivers/gpu/drm/omapdrm/dss/dispc.c 			vm_flag_to_int(t.flags, DISPLAY_FLAGS_HSYNC_HIGH, DISPLAY_FLAGS_HSYNC_LOW),
t                3241 drivers/gpu/drm/omapdrm/dss/dispc.c 			vm_flag_to_int(t.flags, DISPLAY_FLAGS_PIXDATA_POSEDGE, DISPLAY_FLAGS_PIXDATA_NEGEDGE),
t                3242 drivers/gpu/drm/omapdrm/dss/dispc.c 			vm_flag_to_int(t.flags, DISPLAY_FLAGS_DE_HIGH, DISPLAY_FLAGS_DE_LOW),
t                3243 drivers/gpu/drm/omapdrm/dss/dispc.c 			vm_flag_to_int(t.flags, DISPLAY_FLAGS_SYNC_POSEDGE, DISPLAY_FLAGS_SYNC_NEGEDGE));
t                3247 drivers/gpu/drm/omapdrm/dss/dispc.c 		if (t.flags & DISPLAY_FLAGS_INTERLACED)
t                3248 drivers/gpu/drm/omapdrm/dss/dispc.c 			t.vactive /= 2;
t                3252 drivers/gpu/drm/omapdrm/dss/dispc.c 				    !!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
t                3256 drivers/gpu/drm/omapdrm/dss/dispc.c 	dispc_mgr_set_size(dispc, channel, t.hactive, t.vactive);
t                 494 drivers/gpu/drm/omapdrm/dss/dsi.c 	int t;
t                 497 drivers/gpu/drm/omapdrm/dss/dsi.c 	t = 100;
t                 498 drivers/gpu/drm/omapdrm/dss/dsi.c 	while (t-- > 0) {
t                 546 drivers/gpu/drm/omapdrm/dss/dsi.c 	ktime_t t, setup_time, trans_time;
t                 553 drivers/gpu/drm/omapdrm/dss/dsi.c 	t = ktime_get();
t                 560 drivers/gpu/drm/omapdrm/dss/dsi.c 	trans_time = ktime_sub(t, dsi->perf_start_time);
t                1283 drivers/gpu/drm/omapdrm/dss/dsi.c 	int t = 0;
t                1295 drivers/gpu/drm/omapdrm/dss/dsi.c 		if (++t > 1000) {
t                1628 drivers/gpu/drm/omapdrm/dss/dsi.c 	int t = 0;
t                1636 drivers/gpu/drm/omapdrm/dss/dsi.c 		if (++t > 1000) {
t                1699 drivers/gpu/drm/omapdrm/dss/dsi.c 		unsigned int t;
t                1701 drivers/gpu/drm/omapdrm/dss/dsi.c 		for (t = 0; t < dsi->num_lanes_supported; ++t)
t                1702 drivers/gpu/drm/omapdrm/dss/dsi.c 			if (dsi->lanes[t].function == functions[i])
t                1705 drivers/gpu/drm/omapdrm/dss/dsi.c 		if (t == dsi->num_lanes_supported)
t                1708 drivers/gpu/drm/omapdrm/dss/dsi.c 		lane_number = t;
t                1709 drivers/gpu/drm/omapdrm/dss/dsi.c 		polarity = dsi->lanes[t].polarity;
t                1872 drivers/gpu/drm/omapdrm/dss/dsi.c 	int t, i;
t                1886 drivers/gpu/drm/omapdrm/dss/dsi.c 	t = 100000;
t                1902 drivers/gpu/drm/omapdrm/dss/dsi.c 		if (--t == 0) {
t                4213 drivers/gpu/drm/omapdrm/dss/dsi.c 		const struct omap_dss_dsi_videomode_timings *t)
t                4215 drivers/gpu/drm/omapdrm/dss/dsi.c 	unsigned long byteclk = t->hsclk / 4;
t                4218 drivers/gpu/drm/omapdrm/dss/dsi.c 	wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
t                4219 drivers/gpu/drm/omapdrm/dss/dsi.c 	pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
t                4220 drivers/gpu/drm/omapdrm/dss/dsi.c 	bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
t                4229 drivers/gpu/drm/omapdrm/dss/dsi.c 			t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
t                4231 drivers/gpu/drm/omapdrm/dss/dsi.c 			TO_DSI_T(t->hss),
t                4232 drivers/gpu/drm/omapdrm/dss/dsi.c 			TO_DSI_T(t->hsa),
t                4233 drivers/gpu/drm/omapdrm/dss/dsi.c 			TO_DSI_T(t->hse),
t                4234 drivers/gpu/drm/omapdrm/dss/dsi.c 			TO_DSI_T(t->hbp),
t                4236 drivers/gpu/drm/omapdrm/dss/dsi.c 			TO_DSI_T(t->hfp),
t                4274 drivers/gpu/drm/omapdrm/dss/dsi.c 		const struct omap_dss_dsi_videomode_timings *t)
t                4277 drivers/gpu/drm/omapdrm/dss/dsi.c 	unsigned long byteclk = t->hsclk / 4;
t                4282 drivers/gpu/drm/omapdrm/dss/dsi.c 	dsi_tput = (u64)byteclk * t->ndl * 8;
t                4283 drivers/gpu/drm/omapdrm/dss/dsi.c 	pck = (u32)div64_u64(dsi_tput, t->bitspp);
t                4284 drivers/gpu/drm/omapdrm/dss/dsi.c 	dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
t                4285 drivers/gpu/drm/omapdrm/dss/dsi.c 	dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
t                4288 drivers/gpu/drm/omapdrm/dss/dsi.c 	vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
t                4289 drivers/gpu/drm/omapdrm/dss/dsi.c 	vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk);
t                4290 drivers/gpu/drm/omapdrm/dss/dsi.c 	vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk);
t                4291 drivers/gpu/drm/omapdrm/dss/dsi.c 	vm.hactive = t->hact;
t                4500 drivers/gpu/drm/omapdrm/dss/dsi.c 		int t;
t                4503 drivers/gpu/drm/omapdrm/dss/dsi.c 		t = 1 - hfp;
t                4504 drivers/gpu/drm/omapdrm/dss/dsi.c 		hbp = max(hbp - t, 1);
t                4509 drivers/gpu/drm/omapdrm/dss/dsi.c 			t = 1 - hfp;
t                4510 drivers/gpu/drm/omapdrm/dss/dsi.c 			hsa = max(hsa - t, 1);
t                4559 drivers/gpu/drm/omapdrm/dss/dsi.c 		int t;
t                4562 drivers/gpu/drm/omapdrm/dss/dsi.c 		t = 1 - hfp;
t                4563 drivers/gpu/drm/omapdrm/dss/dsi.c 		hbp = max(hbp - t, 1);
t                4568 drivers/gpu/drm/omapdrm/dss/dsi.c 			t = 1 - hfp;
t                4569 drivers/gpu/drm/omapdrm/dss/dsi.c 			hsa = max(hsa - t, 1);
t                 285 drivers/gpu/drm/omapdrm/dss/hdmi.h 	u32 t = 0, v;
t                 287 drivers/gpu/drm/omapdrm/dss/hdmi.h 		if (t++ > 10000)
t                 126 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c 		int t;
t                 134 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c 		t = 0;
t                 137 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c 			if (t++ > 10000) {
t                 327 drivers/gpu/drm/omapdrm/dss/pll.c 	int t;
t                 330 drivers/gpu/drm/omapdrm/dss/pll.c 	t = 100;
t                 331 drivers/gpu/drm/omapdrm/dss/pll.c 	while (t-- > 0) {
t                 362 drivers/gpu/drm/omapdrm/dss/pll.c 	int t = 100;
t                 364 drivers/gpu/drm/omapdrm/dss/pll.c 	while (t-- > 0) {
t                 382 drivers/gpu/drm/omapdrm/dss/venc.c 	int t = 1000;
t                 386 drivers/gpu/drm/omapdrm/dss/venc.c 		if (--t == 0) {
t                 203 drivers/gpu/drm/panel/panel-tpo-tpg110.c 	struct spi_transfer t[2];
t                 208 drivers/gpu/drm/panel/panel-tpo-tpg110.c 	memset(t, 0, sizeof(t));
t                 220 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[0].bits_per_word = 8;
t                 221 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[0].tx_buf = &buf[0];
t                 222 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[0].len = 1;
t                 224 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[1].tx_buf = &buf[1];
t                 225 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[1].len = 1;
t                 226 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[1].bits_per_word = 8;
t                 237 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[0].bits_per_word = 7;
t                 238 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[0].tx_buf = &buf[0];
t                 239 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[0].len = 1;
t                 241 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[1].rx_buf = &buf[1];
t                 242 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[1].len = 1;
t                 243 drivers/gpu/drm/panel/panel-tpo-tpg110.c 		t[1].bits_per_word = 8;
t                 246 drivers/gpu/drm/panel/panel-tpo-tpg110.c 	spi_message_add_tail(&t[0], &m);
t                 247 drivers/gpu/drm/panel/panel-tpo-tpg110.c 	spi_message_add_tail(&t[1], &m);
t                 824 drivers/gpu/drm/r128/r128_cce.c 	int i, t;
t                 835 drivers/gpu/drm/r128/r128_cce.c 	for (t = 0; t < dev_priv->usec_timeout; t++) {
t                 273 drivers/gpu/drm/radeon/kv_dpm.c 			data |= ((values->t << local_cac_reg->t_shift) &
t                  55 drivers/gpu/drm/radeon/kv_dpm.h 	u32 t;
t                 131 drivers/gpu/drm/radeon/mkregtable.c static void table_offset_add(struct table *t, struct offset *offset)
t                 133 drivers/gpu/drm/radeon/mkregtable.c 	list_add_tail(&offset->list, &t->offsets);
t                 136 drivers/gpu/drm/radeon/mkregtable.c static void table_init(struct table *t)
t                 138 drivers/gpu/drm/radeon/mkregtable.c 	INIT_LIST_HEAD(&t->offsets);
t                 139 drivers/gpu/drm/radeon/mkregtable.c 	t->offset_max = 0;
t                 140 drivers/gpu/drm/radeon/mkregtable.c 	t->nentry = 0;
t                 141 drivers/gpu/drm/radeon/mkregtable.c 	t->table = NULL;
t                 144 drivers/gpu/drm/radeon/mkregtable.c static void table_print(struct table *t)
t                 148 drivers/gpu/drm/radeon/mkregtable.c 	nlloop = (t->nentry + 3) / 4;
t                 149 drivers/gpu/drm/radeon/mkregtable.c 	c = t->nentry;
t                 150 drivers/gpu/drm/radeon/mkregtable.c 	printf("static const unsigned %s_reg_safe_bm[%d] = {\n", t->gpu_prefix,
t                 151 drivers/gpu/drm/radeon/mkregtable.c 	       t->nentry);
t                 162 drivers/gpu/drm/radeon/mkregtable.c 			printf("0x%08X,", t->table[id++]);
t                 169 drivers/gpu/drm/radeon/mkregtable.c static int table_build(struct table *t)
t                 174 drivers/gpu/drm/radeon/mkregtable.c 	t->nentry = ((t->offset_max >> 2) + 31) / 32;
t                 175 drivers/gpu/drm/radeon/mkregtable.c 	t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry);
t                 176 drivers/gpu/drm/radeon/mkregtable.c 	if (t->table == NULL)
t                 178 drivers/gpu/drm/radeon/mkregtable.c 	memset(t->table, 0xff, sizeof(unsigned) * t->nentry);
t                 179 drivers/gpu/drm/radeon/mkregtable.c 	list_for_each_entry(offset, &t->offsets, list) {
t                 183 drivers/gpu/drm/radeon/mkregtable.c 		t->table[i] ^= m;
t                 189 drivers/gpu/drm/radeon/mkregtable.c static int parser_auth(struct table *t, const char *filename)
t                 227 drivers/gpu/drm/radeon/mkregtable.c 	t->gpu_prefix = gpu_name;
t                 253 drivers/gpu/drm/radeon/mkregtable.c 				table_offset_add(t, offset);
t                 254 drivers/gpu/drm/radeon/mkregtable.c 				if (o > t->offset_max)
t                 255 drivers/gpu/drm/radeon/mkregtable.c 					t->offset_max = o;
t                 260 drivers/gpu/drm/radeon/mkregtable.c 	if (t->offset_max < last_reg)
t                 261 drivers/gpu/drm/radeon/mkregtable.c 		t->offset_max = last_reg;
t                 262 drivers/gpu/drm/radeon/mkregtable.c 	return table_build(t);
t                 267 drivers/gpu/drm/radeon/mkregtable.c 	struct table t;
t                 273 drivers/gpu/drm/radeon/mkregtable.c 	table_init(&t);
t                 274 drivers/gpu/drm/radeon/mkregtable.c 	if (parser_auth(&t, argv[1])) {
t                 278 drivers/gpu/drm/radeon/mkregtable.c 	table_print(&t);
t                 743 drivers/gpu/drm/radeon/ni_dpm.c 						     u16 v, s32 t,
t                 751 drivers/gpu/drm/radeon/ni_dpm.c 	temperature = div64_s64(drm_int2fixp(t), 1000);
t                 766 drivers/gpu/drm/radeon/ni_dpm.c 					     s32 t,
t                 770 drivers/gpu/drm/radeon/ni_dpm.c 	ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
t                3053 drivers/gpu/drm/radeon/ni_dpm.c 	s32 t;
t                3066 drivers/gpu/drm/radeon/ni_dpm.c 			t = (1000 * ((i + 1) * 8));
t                3068 drivers/gpu/drm/radeon/ni_dpm.c 			if (t < ni_pi->cac_data.leakage_minimum_temperature)
t                3069 drivers/gpu/drm/radeon/ni_dpm.c 				t = ni_pi->cac_data.leakage_minimum_temperature;
t                3074 drivers/gpu/drm/radeon/ni_dpm.c 							 t,
t                1337 drivers/gpu/drm/radeon/nid.h #define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
t                1338 drivers/gpu/drm/radeon/nid.h 					 (((t) & 0x1) << 23) |		\
t                2077 drivers/gpu/drm/radeon/r100.c static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
t                2079 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("pitch                      %d\n", t->pitch);
t                2080 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
t                2081 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("width                      %d\n", t->width);
t                2082 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("width_11                   %d\n", t->width_11);
t                2083 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("height                     %d\n", t->height);
t                2084 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("height_11                  %d\n", t->height_11);
t                2085 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("num levels                 %d\n", t->num_levels);
t                2086 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("depth                      %d\n", t->txdepth);
t                2087 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("bpp                        %d\n", t->cpp);
t                2088 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
t                2089 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
t                2090 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
t                2091 drivers/gpu/drm/radeon/r100.c 	DRM_ERROR("compress format            %d\n", t->compress_format);
t                 221 drivers/gpu/drm/radeon/r600_dpm.c int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
t                 230 drivers/gpu/drm/radeon/r600_dpm.c 	t1 = (t * (k - 100));
t                 233 drivers/gpu/drm/radeon/r600_dpm.c 	ah = ((a * t) + 5000) / 10000;
t                 236 drivers/gpu/drm/radeon/r600_dpm.c 	*th = t - ah;
t                 237 drivers/gpu/drm/radeon/r600_dpm.c 	*tl = t + al;
t                 390 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_sst(struct radeon_device *rdev, u32 t)
t                 392 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
t                 395 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_git(struct radeon_device *rdev, u32 t)
t                 397 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
t                 405 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_fct(struct radeon_device *rdev, u32 t)
t                 407 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
t                 142 drivers/gpu/drm/radeon/r600_dpm.h int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
t                 163 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_sst(struct radeon_device *rdev, u32 t);
t                 164 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_git(struct radeon_device *rdev, u32 t);
t                 166 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_fct(struct radeon_device *rdev, u32 t);
t                 645 drivers/gpu/drm/radeon/r600d.h #define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
t                 646 drivers/gpu/drm/radeon/r600d.h 					 (((t) & 0x1) << 23) |		\
t                 777 drivers/gpu/drm/radeon/radeon_connectors.c 	struct drm_display_mode *t, *mode;
t                 780 drivers/gpu/drm/radeon/radeon_connectors.c 	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
t                 790 drivers/gpu/drm/radeon/radeon_connectors.c 		list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
t                1080 drivers/gpu/drm/radeon/radeon_fence.c 					     signed long t)
t                1089 drivers/gpu/drm/radeon/radeon_fence.c 		return t;
t                1091 drivers/gpu/drm/radeon/radeon_fence.c 	while (t > 0) {
t                1105 drivers/gpu/drm/radeon/radeon_fence.c 			t = -EDEADLK;
t                1109 drivers/gpu/drm/radeon/radeon_fence.c 		t = schedule_timeout(t);
t                1111 drivers/gpu/drm/radeon/radeon_fence.c 		if (t > 0 && intr && signal_pending(current))
t                1112 drivers/gpu/drm/radeon/radeon_fence.c 			t = -ERESTARTSYS;
t                1118 drivers/gpu/drm/radeon/radeon_fence.c 	return t;
t                 247 drivers/gpu/drm/radeon/radeon_gart.c 	unsigned t;
t                 255 drivers/gpu/drm/radeon/radeon_gart.c 	t = offset / RADEON_GPU_PAGE_SIZE;
t                 256 drivers/gpu/drm/radeon/radeon_gart.c 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
t                 260 drivers/gpu/drm/radeon/radeon_gart.c 			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
t                 261 drivers/gpu/drm/radeon/radeon_gart.c 				rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
t                 263 drivers/gpu/drm/radeon/radeon_gart.c 					radeon_gart_set_page(rdev, t,
t                 293 drivers/gpu/drm/radeon/radeon_gart.c 	unsigned t;
t                 302 drivers/gpu/drm/radeon/radeon_gart.c 	t = offset / RADEON_GPU_PAGE_SIZE;
t                 303 drivers/gpu/drm/radeon/radeon_gart.c 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
t                 308 drivers/gpu/drm/radeon/radeon_gart.c 		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
t                 310 drivers/gpu/drm/radeon/radeon_gart.c 			rdev->gart.pages_entry[t] = page_entry;
t                 312 drivers/gpu/drm/radeon/radeon_gart.c 				radeon_gart_set_page(rdev, t, page_entry);
t                 663 drivers/gpu/drm/radeon/rv770d.h #define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
t                 664 drivers/gpu/drm/radeon/rv770d.h 					 (((t) & 0x1) << 23) |		\
t                1773 drivers/gpu/drm/radeon/si_dpm.c 						     u16 v, s32 t, u32 ileakage, u32 *leakage)
t                1781 drivers/gpu/drm/radeon/si_dpm.c 	temperature = div64_s64(drm_int2fixp(t), 1000);
t                1802 drivers/gpu/drm/radeon/si_dpm.c 					     s32 t,
t                1806 drivers/gpu/drm/radeon/si_dpm.c 	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
t                2587 drivers/gpu/drm/radeon/si_dpm.c 	s32 t;
t                2595 drivers/gpu/drm/radeon/si_dpm.c 		t = (1000 * (i * t_step + t0));
t                2603 drivers/gpu/drm/radeon/si_dpm.c 							 t,
t                1853 drivers/gpu/drm/radeon/sid.h #define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
t                1855 drivers/gpu/drm/radeon/sid.h 					 (((t) & 0x1) << 23) |		\
t                 893 drivers/gpu/drm/radeon/sumo_dpm.c 		u32 t = 1;
t                 897 drivers/gpu/drm/radeon/sumo_dpm.c 		deep_sleep_cntl |= HS(t > 4095 ? 4095 : t);
t                 446 drivers/gpu/drm/savage/savage_drv.h #define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
t                 102 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PKT_CTRL_TYPE(n, t)		((t) << (((n) % 4) * 4))
t                 558 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
t                 606 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	for (i = 0; i < ARRAY_SIZE(t); i++) {
t                 607 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (type & t[i]) {
t                1199 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct device_pools *p, *t;
t                1204 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
t                  51 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c 	struct hgsmi_buffer_tail *t;
t                  55 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c 	total_size = size + sizeof(*h) + sizeof(*t);
t                  60 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c 	t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
t                  68 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c 	t->reserved = 0;
t                  69 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c 	t->checksum = hgsmi_checksum(offset, h, t);
t                 651 drivers/gpu/drm/vc4/vc4_bo.c static void vc4_bo_cache_time_timer(struct timer_list *t)
t                 653 drivers/gpu/drm/vc4/vc4_bo.c 	struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
t                 323 drivers/gpu/drm/vc4/vc4_gem.c vc4_hangcheck_elapsed(struct timer_list *t)
t                 325 drivers/gpu/drm/vc4/vc4_gem.c 	struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
t                  77 drivers/gpu/drm/vgem/vgem_fence.c static void vgem_fence_timeout(struct timer_list *t)
t                  79 drivers/gpu/drm/vgem/vgem_fence.c 	struct vgem_fence *fence = from_timer(fence, t, timer);
t                 457 drivers/gpu/drm/via/via_dmablit.c via_dmablit_timer(struct timer_list *t)
t                 459 drivers/gpu/drm/via/via_dmablit.c 	drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
t                 296 drivers/greybus/operation.c static void gb_operation_timeout(struct timer_list *t)
t                 298 drivers/greybus/operation.c 	struct gb_operation *operation = from_timer(operation, t, timer);
t                 168 drivers/hid/hid-appleir.c static void key_up_tick(struct timer_list *t)
t                 170 drivers/hid/hid-appleir.c 	struct appleir *appleir = from_timer(appleir, t, key_up_timer);
t                 340 drivers/hid/hid-hyperv.c 	unsigned long t;
t                 364 drivers/hid/hid-hyperv.c 	t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
t                 365 drivers/hid/hid-hyperv.c 	if (!t) {
t                 379 drivers/hid/hid-hyperv.c 	t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
t                 380 drivers/hid/hid-hyperv.c 	if (!t) {
t                1668 drivers/hid/hid-multitouch.c static void mt_expired_timeout(struct timer_list *t)
t                1670 drivers/hid/hid-multitouch.c 	struct mt_device *td = from_timer(td, t, release_timer);
t                 238 drivers/hid/hid-prodikeys.c static void pcmidi_sustained_note_release(struct timer_list *t)
t                 240 drivers/hid/hid-prodikeys.c 	struct pcmidi_sustain *pms = from_timer(pms, t, timer);
t                  52 drivers/hid/hid-uclogic-core.c static void uclogic_inrange_timeout(struct timer_list *t)
t                  54 drivers/hid/hid-uclogic-core.c 	struct uclogic_drvdata *drvdata = from_timer(drvdata, t,
t                1236 drivers/hid/hid-wiimote-core.c static void wiimote_init_timeout(struct timer_list *t)
t                1238 drivers/hid/hid-wiimote-core.c 	struct wiimote_data *wdata = from_timer(wdata, t, timer);
t                 105 drivers/hid/usbhid/hid-core.c static void hid_retry_timeout(struct timer_list *t)
t                 107 drivers/hid/usbhid/hid-core.c 	struct usbhid_device *usbhid = from_timer(usbhid, t, io_retry);
t                 844 drivers/hid/wacom_wac.c 	unsigned int x, y, distance, t;
t                 900 drivers/hid/wacom_wac.c 		t = (data[6] << 3) | ((data[7] & 0xC0) >> 5) | (data[1] & 1);
t                 902 drivers/hid/wacom_wac.c 			t >>= 1;
t                 903 drivers/hid/wacom_wac.c 		input_report_abs(input, ABS_PRESSURE, t);
t                 911 drivers/hid/wacom_wac.c 		input_report_key(input, BTN_TOUCH, t > 10);
t                 927 drivers/hid/wacom_wac.c 			t = (data[6] << 3) | ((data[7] >> 5) & 7);
t                 928 drivers/hid/wacom_wac.c 			t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) :
t                 929 drivers/hid/wacom_wac.c 				((t-1) / 2 + 450)) : (450 - t / 2) ;
t                 930 drivers/hid/wacom_wac.c 			input_report_abs(input, ABS_Z, t);
t                 933 drivers/hid/wacom_wac.c 			t = (data[6] << 3) | ((data[7] >> 5) & 7);
t                 935 drivers/hid/wacom_wac.c 				((t - 1) / 2) : -t / 2);
t                 947 drivers/hid/wacom_wac.c 		t = (data[6] << 2) | ((data[7] >> 6) & 3);
t                 948 drivers/hid/wacom_wac.c 		input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
t                 455 drivers/hsi/clients/ssi_protocol.c static void ssip_keep_alive(struct timer_list *t)
t                 457 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive);
t                 482 drivers/hsi/clients/ssi_protocol.c static void ssip_rx_wd(struct timer_list *t)
t                 484 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd);
t                 491 drivers/hsi/clients/ssi_protocol.c static void ssip_tx_wd(struct timer_list *t)
t                 493 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd);
t                1564 drivers/hv/hv_balloon.c 	unsigned long t;
t                1592 drivers/hv/hv_balloon.c 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
t                1593 drivers/hv/hv_balloon.c 	if (t == 0) {
t                1642 drivers/hv/hv_balloon.c 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
t                1643 drivers/hv/hv_balloon.c 	if (t == 0) {
t                2409 drivers/hv/vmbus_drv.c 	int ret, t;
t                2424 drivers/hv/vmbus_drv.c 	t = wait_for_completion_timeout(&probe_event, 5*HZ);
t                2425 drivers/hv/vmbus_drv.c 	if (t == 0) {
t                  86 drivers/hwmon/hih6130.c 	int t;
t                 137 drivers/hwmon/hih6130.c 		t = (tmp[0] << 8) + tmp[1];
t                 138 drivers/hwmon/hih6130.c 		hih6130->humidity = hih6130_rh_ticks_to_per_cent_mille(t);
t                 140 drivers/hwmon/hih6130.c 		t = (tmp[2] << 8) + tmp[3];
t                 141 drivers/hwmon/hih6130.c 		hih6130->temperature = hih6130_temp_ticks_to_millicelsius(t);
t                 143 drivers/hwmon/hwmon.c 	long t;
t                 146 drivers/hwmon/hwmon.c 				     tdata->index, &t);
t                 150 drivers/hwmon/hwmon.c 	*temp = t;
t                  71 drivers/hwmon/lm95234.c static int lm95234_read_temp(struct i2c_client *client, int index, int *t)
t                  87 drivers/hwmon/lm95234.c 		*t = temp;
t                 104 drivers/hwmon/lm95234.c 		*t = (s16)temp;
t                 400 drivers/hwmon/nct6683.c 	struct sensor_device_template **t;
t                 409 drivers/hwmon/nct6683.c 	t = tg->templates;
t                 410 drivers/hwmon/nct6683.c 	for (count = 0; *t; t++, count++)
t                 434 drivers/hwmon/nct6683.c 		t = tg->templates;
t                 435 drivers/hwmon/nct6683.c 		for (j = 0; *t != NULL; j++) {
t                 437 drivers/hwmon/nct6683.c 				 (*t)->dev_attr.attr.name, tg->base + i);
t                 438 drivers/hwmon/nct6683.c 			if ((*t)->s2) {
t                 442 drivers/hwmon/nct6683.c 				a2->nr = (*t)->u.s.nr + i;
t                 443 drivers/hwmon/nct6683.c 				a2->index = (*t)->u.s.index;
t                 445 drivers/hwmon/nct6683.c 				  (*t)->dev_attr.attr.mode;
t                 446 drivers/hwmon/nct6683.c 				a2->dev_attr.show = (*t)->dev_attr.show;
t                 447 drivers/hwmon/nct6683.c 				a2->dev_attr.store = (*t)->dev_attr.store;
t                 453 drivers/hwmon/nct6683.c 				a->index = (*t)->u.index + i;
t                 455 drivers/hwmon/nct6683.c 				  (*t)->dev_attr.attr.mode;
t                 456 drivers/hwmon/nct6683.c 				a->dev_attr.show = (*t)->dev_attr.show;
t                 457 drivers/hwmon/nct6683.c 				a->dev_attr.store = (*t)->dev_attr.store;
t                 462 drivers/hwmon/nct6683.c 			t++;
t                1290 drivers/hwmon/nct6775.c 	struct sensor_device_template **t;
t                1296 drivers/hwmon/nct6775.c 	t = tg->templates;
t                1297 drivers/hwmon/nct6775.c 	for (count = 0; *t; t++, count++)
t                1321 drivers/hwmon/nct6775.c 		t = tg->templates;
t                1322 drivers/hwmon/nct6775.c 		while (*t != NULL) {
t                1324 drivers/hwmon/nct6775.c 				 (*t)->dev_attr.attr.name, tg->base + i);
t                1325 drivers/hwmon/nct6775.c 			if ((*t)->s2) {
t                1329 drivers/hwmon/nct6775.c 				a2->nr = (*t)->u.s.nr + i;
t                1330 drivers/hwmon/nct6775.c 				a2->index = (*t)->u.s.index;
t                1332 drivers/hwmon/nct6775.c 				  (*t)->dev_attr.attr.mode;
t                1333 drivers/hwmon/nct6775.c 				a2->dev_attr.show = (*t)->dev_attr.show;
t                1334 drivers/hwmon/nct6775.c 				a2->dev_attr.store = (*t)->dev_attr.store;
t                1340 drivers/hwmon/nct6775.c 				a->index = (*t)->u.index + i;
t                1342 drivers/hwmon/nct6775.c 				  (*t)->dev_attr.attr.mode;
t                1343 drivers/hwmon/nct6775.c 				a->dev_attr.show = (*t)->dev_attr.show;
t                1344 drivers/hwmon/nct6775.c 				a->dev_attr.store = (*t)->dev_attr.store;
t                1349 drivers/hwmon/nct6775.c 			t++;
t                1656 drivers/hwmon/nct6775.c 			u8 t = fanmodecfg & 0x0f;
t                1659 drivers/hwmon/nct6775.c 				t |= (nct6775_read_value(data,
t                1662 drivers/hwmon/nct6775.c 			data->target_speed_tolerance[i] = t;
t                 325 drivers/hwmon/npcm750-pwm-fan.c static void npcm7xx_fan_polling(struct timer_list *t)
t                 330 drivers/hwmon/npcm750-pwm-fan.c 	data = from_timer(data, t, fan_timer);
t                  54 drivers/hwmon/pwm-fan.c static void sample_timer(struct timer_list *t)
t                  56 drivers/hwmon/pwm-fan.c 	struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
t                  56 drivers/hwtracing/stm/p_sys-t.c #define MIPI_SYST_TYPE(t)		((u32)(MIPI_SYST_TYPE_ ## t))
t                  68 drivers/hwtracing/stm/p_sys-t.c #define MIPI_SYST_TYPES(t, s)		(MIPI_SYST_TYPE(t) | \
t                  69 drivers/hwtracing/stm/p_sys-t.c 					 MIPI_SYST_SUBTYPE(t ## _ ## s))
t                  52 drivers/i2c/busses/i2c-at91-master.c 	struct i2c_timings timings, *t = &timings;
t                  54 drivers/i2c/busses/i2c-at91-master.c 	i2c_parse_fw_timings(dev->dev, t, true);
t                  57 drivers/i2c/busses/i2c-at91-master.c 				       2 * t->bus_freq_hz) - offset);
t                  74 drivers/i2c/busses/i2c-at91-master.c 		hold = DIV_ROUND_UP(t->sda_hold_ns
t                  91 drivers/i2c/busses/i2c-at91-master.c 		cdiv, ckdiv, hold, t->sda_hold_ns);
t                  40 drivers/i2c/busses/i2c-designware-master.c 	struct i2c_timings *t = &dev->timings;
t                  51 drivers/i2c/busses/i2c-designware-master.c 	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
t                  52 drivers/i2c/busses/i2c-designware-master.c 	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
t                  77 drivers/i2c/busses/i2c-designware-master.c 	if (t->bus_freq_hz == 1000000) {
t                  86 drivers/i2c/busses/i2c-designware-platdrv.c 	struct i2c_timings *t = &dev->timings;
t                 101 drivers/i2c/busses/i2c-designware-platdrv.c 	switch (t->bus_freq_hz) {
t                 190 drivers/i2c/busses/i2c-designware-platdrv.c 	struct i2c_timings *t = &dev->timings;
t                 199 drivers/i2c/busses/i2c-designware-platdrv.c 	switch (t->bus_freq_hz) {
t                 256 drivers/i2c/busses/i2c-designware-platdrv.c 	struct i2c_timings *t;
t                 287 drivers/i2c/busses/i2c-designware-platdrv.c 	t = &dev->timings;
t                 289 drivers/i2c/busses/i2c-designware-platdrv.c 		t->bus_freq_hz = pdata->i2c_scl_freq;
t                 291 drivers/i2c/busses/i2c-designware-platdrv.c 		i2c_parse_fw_timings(&pdev->dev, t, false);
t                 308 drivers/i2c/busses/i2c-designware-platdrv.c 	if (acpi_speed && t->bus_freq_hz)
t                 309 drivers/i2c/busses/i2c-designware-platdrv.c 		t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed);
t                 310 drivers/i2c/busses/i2c-designware-platdrv.c 	else if (acpi_speed || t->bus_freq_hz)
t                 311 drivers/i2c/busses/i2c-designware-platdrv.c 		t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed);
t                 313 drivers/i2c/busses/i2c-designware-platdrv.c 		t->bus_freq_hz = 400000;
t                 327 drivers/i2c/busses/i2c-designware-platdrv.c 	if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 &&
t                 328 drivers/i2c/busses/i2c-designware-platdrv.c 	    t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) {
t                 331 drivers/i2c/busses/i2c-designware-platdrv.c 			t->bus_freq_hz);
t                 359 drivers/i2c/busses/i2c-designware-platdrv.c 		if (!dev->sda_hold_time && t->sda_hold_ns)
t                 361 drivers/i2c/busses/i2c-designware-platdrv.c 				div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
t                 241 drivers/i2c/busses/i2c-ibm_iic.c 	const struct ibm_iic_timings *t = &timings[dev->fast_mode ? 1 : 0];
t                 261 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->buf);
t                 266 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->hd_sta);
t                 272 drivers/i2c/busses/i2c-ibm_iic.c 		ndelay(t->low / 2);
t                 275 drivers/i2c/busses/i2c-ibm_iic.c 		ndelay(t->low / 2);
t                 280 drivers/i2c/busses/i2c-ibm_iic.c 		ndelay(t->high);
t                 285 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->low / 2);
t                 287 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->low / 2);
t                 292 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->high);
t                 296 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->low);
t                 300 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->su_sto);
t                 303 drivers/i2c/busses/i2c-ibm_iic.c 	ndelay(t->buf);
t                 832 drivers/i2c/busses/i2c-img-scb.c static void img_i2c_check_timer(struct timer_list *t)
t                 834 drivers/i2c/busses/i2c-img-scb.c 	struct img_i2c *i2c = from_timer(i2c, t, check_timer);
t                 456 drivers/i2c/busses/i2c-pnx.c static void i2c_pnx_timeout(struct timer_list *t)
t                 458 drivers/i2c/busses/i2c-pnx.c 	struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
t                 238 drivers/i2c/busses/i2c-rcar.c static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timings *t)
t                 245 drivers/i2c/busses/i2c-rcar.c 	t->bus_freq_hz = t->bus_freq_hz ?: 100000;
t                 246 drivers/i2c/busses/i2c-rcar.c 	t->scl_fall_ns = t->scl_fall_ns ?: 35;
t                 247 drivers/i2c/busses/i2c-rcar.c 	t->scl_rise_ns = t->scl_rise_ns ?: 200;
t                 248 drivers/i2c/busses/i2c-rcar.c 	t->scl_int_delay_ns = t->scl_int_delay_ns ?: 50;
t                 294 drivers/i2c/busses/i2c-rcar.c 	sum = t->scl_fall_ns + t->scl_rise_ns + t->scl_int_delay_ns;
t                 312 drivers/i2c/busses/i2c-rcar.c 		if (scl <= t->bus_freq_hz)
t                 320 drivers/i2c/busses/i2c-rcar.c 		scl, t->bus_freq_hz, clk_get_rate(priv->clk), round, cdf, scgd);
t                 282 drivers/i2c/busses/i2c-riic.c static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
t                 290 drivers/i2c/busses/i2c-riic.c 	if (t->bus_freq_hz > 400000) {
t                 293 drivers/i2c/busses/i2c-riic.c 			t->bus_freq_hz);
t                 315 drivers/i2c/busses/i2c-riic.c 	total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
t                 332 drivers/i2c/busses/i2c-riic.c 			(unsigned long)t->bus_freq_hz);
t                 352 drivers/i2c/busses/i2c-riic.c 	brl -= t->scl_fall_ns / (1000000000 / rate);
t                 353 drivers/i2c/busses/i2c-riic.c 	brh -= t->scl_rise_ns / (1000000000 / rate);
t                 363 drivers/i2c/busses/i2c-riic.c 		 t->scl_fall_ns / (1000000000 / rate),
t                 364 drivers/i2c/busses/i2c-riic.c 		 t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
t                 204 drivers/i2c/busses/i2c-rk3x.c 	struct i2c_timings t;
t                 562 drivers/i2c/busses/i2c-rk3x.c 				    struct i2c_timings *t,
t                 581 drivers/i2c/busses/i2c-rk3x.c 	if (WARN_ON(t->bus_freq_hz > 400000))
t                 582 drivers/i2c/busses/i2c-rk3x.c 		t->bus_freq_hz = 400000;
t                 585 drivers/i2c/busses/i2c-rk3x.c 	if (WARN_ON(t->bus_freq_hz < 1000))
t                 586 drivers/i2c/busses/i2c-rk3x.c 		t->bus_freq_hz = 1000;
t                 600 drivers/i2c/busses/i2c-rk3x.c 	spec = rk3x_i2c_get_spec(t->bus_freq_hz);
t                 601 drivers/i2c/busses/i2c-rk3x.c 	min_high_ns = t->scl_rise_ns + spec->min_high_ns;
t                 612 drivers/i2c/busses/i2c-rk3x.c 		(t->scl_rise_ns + spec->min_setup_start_ns) * 1000, 875));
t                 614 drivers/i2c/busses/i2c-rk3x.c 		(t->scl_rise_ns + spec->min_setup_start_ns + t->sda_fall_ns +
t                 617 drivers/i2c/busses/i2c-rk3x.c 	min_low_ns = t->scl_fall_ns + spec->min_low_ns;
t                 623 drivers/i2c/busses/i2c-rk3x.c 	scl_rate_khz = t->bus_freq_hz / 1000;
t                 742 drivers/i2c/busses/i2c-rk3x.c 				    struct i2c_timings *t,
t                 761 drivers/i2c/busses/i2c-rk3x.c 	if (WARN_ON(t->bus_freq_hz > 1000000))
t                 762 drivers/i2c/busses/i2c-rk3x.c 		t->bus_freq_hz = 1000000;
t                 765 drivers/i2c/busses/i2c-rk3x.c 	if (WARN_ON(t->bus_freq_hz < 1000))
t                 766 drivers/i2c/busses/i2c-rk3x.c 		t->bus_freq_hz = 1000;
t                 774 drivers/i2c/busses/i2c-rk3x.c 	spec = rk3x_i2c_get_spec(t->bus_freq_hz);
t                 778 drivers/i2c/busses/i2c-rk3x.c 	scl_rate_khz = t->bus_freq_hz / 1000;
t                 781 drivers/i2c/busses/i2c-rk3x.c 	min_high_ns = t->scl_rise_ns + spec->min_high_ns;
t                 784 drivers/i2c/busses/i2c-rk3x.c 	min_low_ns = t->scl_fall_ns + spec->min_low_ns;
t                 841 drivers/i2c/busses/i2c-rk3x.c 	min_setup_start_ns = t->scl_rise_ns + spec->min_setup_start_ns;
t                 846 drivers/i2c/busses/i2c-rk3x.c 	min_setup_stop_ns = t->scl_rise_ns + spec->min_setup_stop_ns;
t                 873 drivers/i2c/busses/i2c-rk3x.c 	struct i2c_timings *t = &i2c->t;
t                 880 drivers/i2c/busses/i2c-rk3x.c 	ret = i2c->soc_data->calc_timings(clk_rate, t, &calc);
t                 881 drivers/i2c/busses/i2c-rk3x.c 	WARN_ONCE(ret != 0, "Could not reach SCL freq %u", t->bus_freq_hz);
t                 902 drivers/i2c/busses/i2c-rk3x.c 		1000000000 / t->bus_freq_hz,
t                 937 drivers/i2c/busses/i2c-rk3x.c 		if (i2c->soc_data->calc_timings(ndata->new_rate, &i2c->t,
t                1211 drivers/i2c/busses/i2c-rk3x.c 	i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
t                 274 drivers/i2c/busses/i2c-st.c 	struct st_i2c_timings *t = &i2c_timings[i2c_dev->mode];
t                 290 drivers/i2c/busses/i2c-st.c 	val = rate / (2 * t->rate);
t                 300 drivers/i2c/busses/i2c-st.c 	val = t->rep_start_hold / ns_per_clk;
t                 304 drivers/i2c/busses/i2c-st.c 	val = t->rep_start_setup / ns_per_clk;
t                 308 drivers/i2c/busses/i2c-st.c 	val = t->start_hold / ns_per_clk;
t                 312 drivers/i2c/busses/i2c-st.c 	val = t->data_setup_time / ns_per_clk;
t                 316 drivers/i2c/busses/i2c-st.c 	val = t->stop_setup_time / ns_per_clk;
t                 320 drivers/i2c/busses/i2c-st.c 	val = t->bus_free_time / ns_per_clk;
t                 642 drivers/i2c/busses/i2c-stm32f7.c 	struct stm32f7_i2c_timings *t = &i2c_dev->timing;
t                 646 drivers/i2c/busses/i2c-stm32f7.c 	timing |= STM32F7_I2C_TIMINGR_PRESC(t->presc);
t                 647 drivers/i2c/busses/i2c-stm32f7.c 	timing |= STM32F7_I2C_TIMINGR_SCLDEL(t->scldel);
t                 648 drivers/i2c/busses/i2c-stm32f7.c 	timing |= STM32F7_I2C_TIMINGR_SDADEL(t->sdadel);
t                 649 drivers/i2c/busses/i2c-stm32f7.c 	timing |= STM32F7_I2C_TIMINGR_SCLH(t->sclh);
t                 650 drivers/i2c/busses/i2c-stm32f7.c 	timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll);
t                 106 drivers/i2c/busses/i2c-xlr.c 	int t;
t                 108 drivers/i2c/busses/i2c-xlr.c 	t = wait_event_timeout(priv->wait, xlr_i2c_idle(priv),
t                 110 drivers/i2c/busses/i2c-xlr.c 	if (!t)
t                1641 drivers/i2c/i2c-core-base.c void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults)
t                1645 drivers/i2c/i2c-core-base.c 	memset(t, 0, sizeof(*t));
t                1647 drivers/i2c/i2c-core-base.c 	ret = device_property_read_u32(dev, "clock-frequency", &t->bus_freq_hz);
t                1649 drivers/i2c/i2c-core-base.c 		t->bus_freq_hz = 100000;
t                1651 drivers/i2c/i2c-core-base.c 	ret = device_property_read_u32(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns);
t                1653 drivers/i2c/i2c-core-base.c 		if (t->bus_freq_hz <= 100000)
t                1654 drivers/i2c/i2c-core-base.c 			t->scl_rise_ns = 1000;
t                1655 drivers/i2c/i2c-core-base.c 		else if (t->bus_freq_hz <= 400000)
t                1656 drivers/i2c/i2c-core-base.c 			t->scl_rise_ns = 300;
t                1658 drivers/i2c/i2c-core-base.c 			t->scl_rise_ns = 120;
t                1661 drivers/i2c/i2c-core-base.c 	ret = device_property_read_u32(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns);
t                1663 drivers/i2c/i2c-core-base.c 		if (t->bus_freq_hz <= 400000)
t                1664 drivers/i2c/i2c-core-base.c 			t->scl_fall_ns = 300;
t                1666 drivers/i2c/i2c-core-base.c 			t->scl_fall_ns = 120;
t                1669 drivers/i2c/i2c-core-base.c 	device_property_read_u32(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns);
t                1671 drivers/i2c/i2c-core-base.c 	ret = device_property_read_u32(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns);
t                1673 drivers/i2c/i2c-core-base.c 		t->sda_fall_ns = t->scl_fall_ns;
t                1675 drivers/i2c/i2c-core-base.c 	device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
t                 242 drivers/i3c/master/i3c-master-cdns.c #define IBIR_THR(t)			((t) << 24)
t                 243 drivers/i3c/master/i3c-master-cdns.c #define CMDR_THR(t)			((t) << 16)
t                 244 drivers/i3c/master/i3c-master-cdns.c #define IBI_THR(t)			((t) << 8)
t                 245 drivers/i3c/master/i3c-master-cdns.c #define CMD_THR(t)			(t)
t                 248 drivers/i3c/master/i3c-master-cdns.c #define RX_THR(t)			((t) << 16)
t                 249 drivers/i3c/master/i3c-master-cdns.c #define TX_THR(t)			(t)
t                 252 drivers/i3c/master/i3c-master-cdns.c #define SLV_DDR_RX_THR(t)		((t) << 16)
t                 253 drivers/i3c/master/i3c-master-cdns.c #define SLV_DDR_TX_THR(t)		(t)
t                 121 drivers/ide/ali14xx.c 	struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
t                 125 drivers/ide/ali14xx.c 	time2 = t->active;
t                 153 drivers/ide/ali14xx.c 	u8 t;
t                 165 drivers/ide/ali14xx.c 				t = inReg(0) & 0xf0;
t                 168 drivers/ide/ali14xx.c 				if (t != 0x50)
t                 185 drivers/ide/ali14xx.c 	u8 t;
t                 193 drivers/ide/ali14xx.c 	t = inb(regPort) & 0x01;
t                 196 drivers/ide/ali14xx.c 	return t;
t                  65 drivers/ide/alim15x3.c 				struct ide_timing *t, u8 ultra)
t                  79 drivers/ide/alim15x3.c 	if (t == NULL)
t                  82 drivers/ide/alim15x3.c 	t->setup = clamp_val(t->setup, 1, 8) & 7;
t                  83 drivers/ide/alim15x3.c 	t->act8b = clamp_val(t->act8b, 1, 8) & 7;
t                  84 drivers/ide/alim15x3.c 	t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
t                  85 drivers/ide/alim15x3.c 	t->active = clamp_val(t->active, 1, 8) & 7;
t                  86 drivers/ide/alim15x3.c 	t->recover = clamp_val(t->recover, 1, 16) & 15;
t                  88 drivers/ide/alim15x3.c 	pci_write_config_byte(dev, port, t->setup);
t                  89 drivers/ide/alim15x3.c 	pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
t                  91 drivers/ide/alim15x3.c 			      (t->active << 4) | t->recover);
t                 107 drivers/ide/alim15x3.c 	struct ide_timing t;
t                 109 drivers/ide/alim15x3.c 	ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
t                 114 drivers/ide/alim15x3.c 		ide_timing_merge(&p, &t, &t,
t                 118 drivers/ide/alim15x3.c 			ide_timing_merge(&p, &t, &t,
t                 128 drivers/ide/alim15x3.c 	ali_program_timings(hwif, drive, &t, 0);
t                 173 drivers/ide/alim15x3.c 	struct ide_timing t;
t                 176 drivers/ide/alim15x3.c 		ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
t                 181 drivers/ide/alim15x3.c 			ide_timing_merge(&p, &t, &t,
t                 186 drivers/ide/alim15x3.c 				ide_timing_merge(&p, &t, &t,
t                 190 drivers/ide/alim15x3.c 		ali_program_timings(hwif, drive, &t, 0);
t                  49 drivers/ide/amd74xx.c 	u8 t = 0, offset = amd_offset(dev);
t                  51 drivers/ide/amd74xx.c 	pci_read_config_byte(dev, AMD_ADDRESS_SETUP + offset, &t);
t                  52 drivers/ide/amd74xx.c 	t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(timing->setup, 1, 4) - 1) << ((3 - dn) << 1));
t                  53 drivers/ide/amd74xx.c 	pci_write_config_byte(dev, AMD_ADDRESS_SETUP + offset, t);
t                  62 drivers/ide/amd74xx.c 	case ATA_UDMA2: t = timing->udma ? (0xc0 | (clamp_val(timing->udma, 2, 5) - 2)) : 0x03; break;
t                  63 drivers/ide/amd74xx.c 	case ATA_UDMA4: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 2, 10)]) : 0x03; break;
t                  64 drivers/ide/amd74xx.c 	case ATA_UDMA5: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 1, 10)]) : 0x03; break;
t                  65 drivers/ide/amd74xx.c 	case ATA_UDMA6: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 1, 15)]) : 0x03; break;
t                  70 drivers/ide/amd74xx.c 		pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t);
t                  82 drivers/ide/amd74xx.c 	struct ide_timing t, p;
t                  90 drivers/ide/amd74xx.c 	ide_timing_compute(drive, speed, &t, T, UT);
t                  94 drivers/ide/amd74xx.c 		ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
t                  97 drivers/ide/amd74xx.c 	if (speed == XFER_UDMA_5 && amd_clock <= 33333) t.udma = 1;
t                  98 drivers/ide/amd74xx.c 	if (speed == XFER_UDMA_6 && amd_clock <= 33333) t.udma = 15;
t                 100 drivers/ide/amd74xx.c 	amd_set_speed(dev, drive->dn, udma_mask, &t);
t                 123 drivers/ide/amd74xx.c 	u8 t = 0, offset = amd_offset(dev);
t                 125 drivers/ide/amd74xx.c 	pci_read_config_byte(dev, AMD_CABLE_DETECT + offset, &t);
t                 127 drivers/ide/amd74xx.c 	amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0);
t                 143 drivers/ide/amd74xx.c 	u8 t = 0, offset = amd_offset(dev);
t                 162 drivers/ide/amd74xx.c 	pci_read_config_byte(dev, AMD_IDE_CONFIG + offset, &t);
t                 168 drivers/ide/amd74xx.c 		t &= 0x0f;
t                 170 drivers/ide/amd74xx.c 		t |= 0xf0;
t                 171 drivers/ide/amd74xx.c 	pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t);
t                 518 drivers/ide/cmd640.c 	struct ide_timing *t;
t                 531 drivers/ide/cmd640.c 	t = ide_timing_find_mode(XFER_PIO_0 + pio_mode);
t                 532 drivers/ide/cmd640.c 	setup_time  = t->setup;
t                 533 drivers/ide/cmd640.c 	active_time = t->active;
t                  66 drivers/ide/cmd64x.c 	struct ide_timing t;
t                  72 drivers/ide/cmd64x.c 	ide_timing_compute(drive, mode, &t, T, 0);
t                  78 drivers/ide/cmd64x.c 	if (t.recover > 16) {
t                  79 drivers/ide/cmd64x.c 		t.active += t.recover - 16;
t                  80 drivers/ide/cmd64x.c 		t.recover = 16;
t                  82 drivers/ide/cmd64x.c 	if (t.active > 16)		/* shouldn't actually happen... */
t                  83 drivers/ide/cmd64x.c 		t.active = 16;
t                  88 drivers/ide/cmd64x.c 	t.recover = recovery_values[t.recover];
t                  89 drivers/ide/cmd64x.c 	t.active &= 0x0f;
t                  93 drivers/ide/cmd64x.c 			      (t.active << 4) | t.recover);
t                 108 drivers/ide/cmd64x.c 			ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
t                 112 drivers/ide/cmd64x.c 				ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
t                 117 drivers/ide/cmd64x.c 	if (t.setup > 5)		/* shouldn't actually happen... */
t                 118 drivers/ide/cmd64x.c 		t.setup = 5;
t                 128 drivers/ide/cmd64x.c 	arttim |= setup_values[t.setup];
t                  90 drivers/ide/cy82c693.c 	struct ide_timing t;
t                 104 drivers/ide/cy82c693.c 	ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
t                 106 drivers/ide/cy82c693.c 	time_16 = clamp_val(t.recover - 1, 0, 15) |
t                 107 drivers/ide/cy82c693.c 		  (clamp_val(t.active - 1, 0, 15) << 4);
t                 108 drivers/ide/cy82c693.c 	time_8 = clamp_val(t.act8b - 1, 0, 15) |
t                 109 drivers/ide/cy82c693.c 		 (clamp_val(t.rec8b - 1, 0, 15) << 4);
t                 121 drivers/ide/cy82c693.c 		addrCtrl |= clamp_val(t.setup - 1, 0, 15);
t                 137 drivers/ide/cy82c693.c 		addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
t                 632 drivers/ide/hpt366.c 	struct hpt_timings *t	= info->timings;
t                 637 drivers/ide/hpt366.c 	u32 itr_mask		= speed < XFER_MW_DMA_0 ? t->pio_mask :
t                 638 drivers/ide/hpt366.c 				 (speed < XFER_UDMA_0   ? t->dma_mask :
t                 639 drivers/ide/hpt366.c 							  t->ultra_mask);
t                 208 drivers/ide/ht6560b.c 		struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
t                 217 drivers/ide/ht6560b.c 		active_time = t->active;
t                 218 drivers/ide/ht6560b.c 		recovery_time = cycle_time - active_time - t->setup;
t                 255 drivers/ide/ht6560b.c 	int t = HT_PREFETCH_MODE << 8;
t                 265 drivers/ide/ht6560b.c 		config |= t;   /* enable prefetch mode */
t                 269 drivers/ide/ht6560b.c 		config &= ~t;  /* disable prefetch mode */
t                 313 drivers/ide/ht6560b.c 	int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
t                 316 drivers/ide/ht6560b.c 		t |= (HT_SECONDARY_IF << 8);
t                 318 drivers/ide/ht6560b.c 	ide_set_drivedata(drive, (void *)t);
t                 619 drivers/ide/ide-io.c void ide_timer_expiry (struct timer_list *t)
t                 621 drivers/ide/ide-io.c 	ide_hwif_t	*hwif = from_timer(hwif, t, timer);
t                1747 drivers/ide/ide-tape.c 	unsigned long t;
t                1787 drivers/ide/ide-tape.c 	t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
t                1793 drivers/ide/ide-tape.c 	tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
t                  59 drivers/ide/ide-timings.c 	struct ide_timing *t;
t                  61 drivers/ide/ide-timings.c 	for (t = ide_timing; t->mode != speed; t++)
t                  62 drivers/ide/ide-timings.c 		if (t->mode == 0xff)
t                  64 drivers/ide/ide-timings.c 	return t;
t                  71 drivers/ide/ide-timings.c 	struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
t                  81 drivers/ide/ide-timings.c 		if (pio < 3 && cycle < t->cycle)
t                  89 drivers/ide/ide-timings.c 	return cycle ? cycle : t->cycle;
t                  96 drivers/ide/ide-timings.c static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
t                  99 drivers/ide/ide-timings.c 	q->setup   = EZ(t->setup,   T);
t                 100 drivers/ide/ide-timings.c 	q->act8b   = EZ(t->act8b,   T);
t                 101 drivers/ide/ide-timings.c 	q->rec8b   = EZ(t->rec8b,   T);
t                 102 drivers/ide/ide-timings.c 	q->cyc8b   = EZ(t->cyc8b,   T);
t                 103 drivers/ide/ide-timings.c 	q->active  = EZ(t->active,  T);
t                 104 drivers/ide/ide-timings.c 	q->recover = EZ(t->recover, T);
t                 105 drivers/ide/ide-timings.c 	q->cycle   = EZ(t->cycle,   T);
t                 106 drivers/ide/ide-timings.c 	q->udma    = EZ(t->udma,    UT);
t                 132 drivers/ide/ide-timings.c 		       struct ide_timing *t, int T, int UT)
t                 147 drivers/ide/ide-timings.c 	*t = *s;
t                 165 drivers/ide/ide-timings.c 		ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
t                 171 drivers/ide/ide-timings.c 	ide_timing_quantize(t, t, T, UT);
t                 180 drivers/ide/ide-timings.c 		ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
t                 186 drivers/ide/ide-timings.c 	if (t->act8b + t->rec8b < t->cyc8b) {
t                 187 drivers/ide/ide-timings.c 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
t                 188 drivers/ide/ide-timings.c 		t->rec8b = t->cyc8b - t->act8b;
t                 191 drivers/ide/ide-timings.c 	if (t->active + t->recover < t->cycle) {
t                 192 drivers/ide/ide-timings.c 		t->active += (t->cycle - (t->active + t->recover)) / 2;
t                 193 drivers/ide/ide-timings.c 		t->recover = t->cycle - t->active;
t                 105 drivers/ide/palm_bk3710.c 	struct ide_timing *t;
t                 108 drivers/ide/palm_bk3710.c 	t = ide_timing_find_mode(mode);
t                 109 drivers/ide/palm_bk3710.c 	cycletime = max_t(int, t->cycle, min_cycle);
t                 113 drivers/ide/palm_bk3710.c 	td = DIV_ROUND_UP(t->active, ideclk_period);
t                 136 drivers/ide/palm_bk3710.c 	struct ide_timing *t;
t                 138 drivers/ide/palm_bk3710.c 	t = ide_timing_find_mode(XFER_PIO_0 + mode);
t                 142 drivers/ide/palm_bk3710.c 	t2 = DIV_ROUND_UP(t->active, ideclk_period);
t                 163 drivers/ide/palm_bk3710.c 	t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
t                 164 drivers/ide/palm_bk3710.c 	t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
t                 108 drivers/ide/pmac.c #define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
t                 109 drivers/ide/pmac.c #define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
t                 497 drivers/ide/pmac.c 	u32 *timings, t;
t                 504 drivers/ide/pmac.c 	t = *timings;
t                 512 drivers/ide/pmac.c 		t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
t                 519 drivers/ide/pmac.c 		t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
t                 532 drivers/ide/pmac.c 		t = (t & ~TR_66_PIO_MASK) |
t                 553 drivers/ide/pmac.c 		t = (t & ~TR_33_PIO_MASK) |
t                 557 drivers/ide/pmac.c 			t |= TR_33_PIO_E;
t                 567 drivers/ide/pmac.c 	*timings = t;
t                 605 drivers/ide/pmac.c 	struct ide_timing *t = ide_timing_find_mode(speed);
t                 608 drivers/ide/pmac.c 	if (speed > XFER_UDMA_5 || t == NULL)
t                 610 drivers/ide/pmac.c 	tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
t                 623 drivers/ide/pmac.c 	struct ide_timing *t = ide_timing_find_mode(speed);
t                 626 drivers/ide/pmac.c 	if (speed > XFER_UDMA_6 || t == NULL)
t                 628 drivers/ide/pmac.c 	tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
t                 215 drivers/ide/qd65xx.c 	struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
t                 245 drivers/ide/qd65xx.c 					active_time = t->active;
t                  45 drivers/ide/sl82c105.c 	struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
t                  49 drivers/ide/sl82c105.c 	cmd_on  = (t->active + 29) / 30;
t                  26 drivers/ide/tx4938ide.c 	struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
t                  35 drivers/ide/tx4938ide.c 	wt = DIV_ROUND_UP(t->act8b, cycle) - 2;
t                  43 drivers/ide/tx4938ide.c 	shwt = DIV_ROUND_UP(t->setup, cycle);
t                  46 drivers/ide/tx4938ide.c 	while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle)
t                 126 drivers/ide/via82cxxx.c 	u8 t;
t                 129 drivers/ide/via82cxxx.c 		pci_read_config_byte(dev, VIA_ADDRESS_SETUP, &t);
t                 130 drivers/ide/via82cxxx.c 		t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(timing->setup, 1, 4) - 1) << ((3 - dn) << 1));
t                 131 drivers/ide/via82cxxx.c 		pci_write_config_byte(dev, VIA_ADDRESS_SETUP, t);
t                 141 drivers/ide/via82cxxx.c 	case ATA_UDMA2: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 5) - 2)) : 0x03; break;
t                 142 drivers/ide/via82cxxx.c 	case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break;
t                 143 drivers/ide/via82cxxx.c 	case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
t                 144 drivers/ide/via82cxxx.c 	case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
t                 159 drivers/ide/via82cxxx.c 			udma_etc |= t;
t                 181 drivers/ide/via82cxxx.c 	struct ide_timing t, p;
t                 195 drivers/ide/via82cxxx.c 	ide_timing_compute(drive, speed, &t, T, UT);
t                 199 drivers/ide/via82cxxx.c 		ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
t                 202 drivers/ide/via82cxxx.c 	via_set_speed(hwif, drive->dn, &t);
t                 298 drivers/ide/via82cxxx.c 	u8 t, v;
t                 326 drivers/ide/via82cxxx.c 	pci_read_config_byte(dev, VIA_FIFO_CONFIG, &t);
t                 331 drivers/ide/via82cxxx.c 		t &= 0x7f;
t                 336 drivers/ide/via82cxxx.c 		t &= (t & 0x9f);
t                 338 drivers/ide/via82cxxx.c 			case 2: t |= 0x00; break;	/* 16 on primary */
t                 339 drivers/ide/via82cxxx.c 			case 1: t |= 0x60; break;	/* 16 on secondary */
t                 340 drivers/ide/via82cxxx.c 			case 3: t |= 0x20; break;	/* 8 pri 8 sec */
t                 344 drivers/ide/via82cxxx.c 	pci_write_config_byte(dev, VIA_FIFO_CONFIG, t);
t                 166 drivers/iio/accel/bmc150-accel-core.c 	int (*setup)(struct bmc150_accel_trigger *t, bool state);
t                 337 drivers/iio/accel/bmc150-accel-core.c static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t,
t                 341 drivers/iio/accel/bmc150-accel-core.c 		return bmc150_accel_update_slope(t->data);
t                1130 drivers/iio/accel/bmc150-accel-core.c 	struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
t                1131 drivers/iio/accel/bmc150-accel-core.c 	struct bmc150_accel_data *data = t->data;
t                1136 drivers/iio/accel/bmc150-accel-core.c 	if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY])
t                1156 drivers/iio/accel/bmc150-accel-core.c 	struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
t                1157 drivers/iio/accel/bmc150-accel-core.c 	struct bmc150_accel_data *data = t->data;
t                1162 drivers/iio/accel/bmc150-accel-core.c 	if (t->enabled == state) {
t                1167 drivers/iio/accel/bmc150-accel-core.c 	if (t->setup) {
t                1168 drivers/iio/accel/bmc150-accel-core.c 		ret = t->setup(t, state);
t                1175 drivers/iio/accel/bmc150-accel-core.c 	ret = bmc150_accel_set_interrupt(data, t->intr, state);
t                1181 drivers/iio/accel/bmc150-accel-core.c 	t->enabled = state;
t                1312 drivers/iio/accel/bmc150-accel-core.c 	int (*setup)(struct bmc150_accel_trigger *t, bool state);
t                1345 drivers/iio/accel/bmc150-accel-core.c 		struct bmc150_accel_trigger *t = &data->triggers[i];
t                1347 drivers/iio/accel/bmc150-accel-core.c 		t->indio_trig = devm_iio_trigger_alloc(dev,
t                1351 drivers/iio/accel/bmc150-accel-core.c 		if (!t->indio_trig) {
t                1356 drivers/iio/accel/bmc150-accel-core.c 		t->indio_trig->dev.parent = dev;
t                1357 drivers/iio/accel/bmc150-accel-core.c 		t->indio_trig->ops = &bmc150_accel_trigger_ops;
t                1358 drivers/iio/accel/bmc150-accel-core.c 		t->intr = bmc150_accel_triggers[i].intr;
t                1359 drivers/iio/accel/bmc150-accel-core.c 		t->data = data;
t                1360 drivers/iio/accel/bmc150-accel-core.c 		t->setup = bmc150_accel_triggers[i].setup;
t                1361 drivers/iio/accel/bmc150-accel-core.c 		iio_trigger_set_drvdata(t->indio_trig, t);
t                1363 drivers/iio/accel/bmc150-accel-core.c 		ret = iio_trigger_register(t->indio_trig);
t                  31 drivers/iio/accel/ssp_accel_sensor.c 	u32 t;
t                  36 drivers/iio/accel/ssp_accel_sensor.c 		t = ssp_get_sensor_delay(data, SSP_ACCELEROMETER_SENSOR);
t                  37 drivers/iio/accel/ssp_accel_sensor.c 		ssp_convert_to_freq(t, val, val2);
t                 126 drivers/iio/adc/ad7606_spi.c 	struct spi_transfer t[] = {
t                 140 drivers/iio/adc/ad7606_spi.c 	ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t));
t                  61 drivers/iio/adc/ad_sigma_delta.c 	struct spi_transfer t = {
t                  90 drivers/iio/adc/ad_sigma_delta.c 	spi_message_add_tail(&t, &m);
t                 106 drivers/iio/adc/ad_sigma_delta.c 	struct spi_transfer t[] = {
t                 124 drivers/iio/adc/ad_sigma_delta.c 		spi_message_add_tail(&t[0], &m);
t                 126 drivers/iio/adc/ad_sigma_delta.c 	spi_message_add_tail(&t[1], &m);
t                  80 drivers/iio/adc/berlin2-adc.c #define BERLIN2_ADC_CHANNEL(n, t)					\
t                  84 drivers/iio/adc/berlin2-adc.c 		.type			= t,				\
t                 491 drivers/iio/adc/dln2-adc.c 	const struct dln2_adc_demux_table *t;
t                 502 drivers/iio/adc/dln2-adc.c 		t = &dln2->demux[i];
t                 503 drivers/iio/adc/dln2-adc.c 		memcpy((void *)data.values + t->to,
t                 504 drivers/iio/adc/dln2-adc.c 		       (void *)dev_data.values + t->from, t->length);
t                 115 drivers/iio/adc/ep93xx_adc.c 			u32 t;
t                 117 drivers/iio/adc/ep93xx_adc.c 			t = readl_relaxed(priv->base + EP93XX_ADC_RESULT);
t                 118 drivers/iio/adc/ep93xx_adc.c 			if (t & EP93XX_ADC_SDR) {
t                 119 drivers/iio/adc/ep93xx_adc.c 				*value = sign_extend32(t, 15);
t                 192 drivers/iio/adc/ti-ads124s08.c 	struct spi_transfer t[] = {
t                 207 drivers/iio/adc/ti-ads124s08.c 	ret = spi_sync_transfer(priv->spi, t, ARRAY_SIZE(t));
t                 216 drivers/iio/adc/ti-ads8688.c 	struct spi_transfer t[] = {
t                 236 drivers/iio/adc/ti-ads8688.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 168 drivers/iio/common/ssp_sensors/ssp_dev.c static void ssp_wdt_timer_func(struct timer_list *t)
t                 170 drivers/iio/common/ssp_sensors/ssp_dev.c 	struct ssp_data *data = from_timer(data, t, wdt_timer);
t                 221 drivers/iio/dac/ad5360.c 	struct spi_transfer t[] = {
t                 239 drivers/iio/dac/ad5360.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 158 drivers/iio/dac/ad5421.c 	struct spi_transfer t[] = {
t                 173 drivers/iio/dac/ad5421.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 103 drivers/iio/dac/ad5449.c 	struct spi_transfer t[] = {
t                 119 drivers/iio/dac/ad5449.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                  80 drivers/iio/dac/ad5504.c 	struct spi_transfer t = {
t                  87 drivers/iio/dac/ad5504.c 	ret = spi_sync_transfer(st->spi, &t, 1);
t                  23 drivers/iio/dac/ad5592r.c 	struct spi_transfer t = {
t                  31 drivers/iio/dac/ad5592r.c 	return spi_sync_transfer(spi, &t, 1);
t                  51 drivers/iio/dac/ad5686-spi.c 	struct spi_transfer t[] = {
t                  83 drivers/iio/dac/ad5686-spi.c 	ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t));
t                 202 drivers/iio/dac/ad5755.c 	struct spi_transfer t[] = {
t                 219 drivers/iio/dac/ad5755.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 189 drivers/iio/dac/ad5758.c 	struct spi_transfer t[] = {
t                 207 drivers/iio/dac/ad5758.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 143 drivers/iio/dac/ad5764.c 	struct spi_transfer t[] = {
t                 158 drivers/iio/dac/ad5764.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 309 drivers/iio/frequency/ad9523.c 	struct spi_transfer t[] = {
t                 323 drivers/iio/frequency/ad9523.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                 338 drivers/iio/frequency/ad9523.c 	struct spi_transfer t[] = {
t                 353 drivers/iio/frequency/ad9523.c 	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
t                  54 drivers/iio/gyro/adis16080.c 	struct spi_transfer	t[] = {
t                  67 drivers/iio/gyro/adis16080.c 	ret = spi_sync_transfer(st->us, t, ARRAY_SIZE(t));
t                 175 drivers/iio/gyro/adis16136.c 	unsigned int t;
t                 177 drivers/iio/gyro/adis16136.c 	t = 32768 / freq;
t                 178 drivers/iio/gyro/adis16136.c 	if (t < 0xf)
t                 179 drivers/iio/gyro/adis16136.c 		t = 0xf;
t                 180 drivers/iio/gyro/adis16136.c 	else if (t > 0xffff)
t                 181 drivers/iio/gyro/adis16136.c 		t = 0xffff;
t                 183 drivers/iio/gyro/adis16136.c 		t--;
t                 185 drivers/iio/gyro/adis16136.c 	return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, t);
t                 190 drivers/iio/gyro/adis16136.c 	uint16_t t;
t                 193 drivers/iio/gyro/adis16136.c 	ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
t                 197 drivers/iio/gyro/adis16136.c 	*freq = 32768 / (t + 1);
t                 280 drivers/iio/gyro/adis16260.c 	u8 t;
t                 298 drivers/iio/gyro/adis16260.c 			t = 256 / val;
t                 300 drivers/iio/gyro/adis16260.c 			t = 2048 / val;
t                 302 drivers/iio/gyro/adis16260.c 		if (t > ADIS16260_SMPL_PRD_DIV_MASK)
t                 303 drivers/iio/gyro/adis16260.c 			t = ADIS16260_SMPL_PRD_DIV_MASK;
t                 304 drivers/iio/gyro/adis16260.c 		else if (t > 0)
t                 305 drivers/iio/gyro/adis16260.c 			t--;
t                 307 drivers/iio/gyro/adis16260.c 		if (t >= 0x0A)
t                 311 drivers/iio/gyro/adis16260.c 		ret = adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
t                 237 drivers/iio/gyro/adxrs450.c 	u32 t;
t                 243 drivers/iio/gyro/adxrs450.c 	ret = adxrs450_spi_initial(st, &t, 1);
t                 246 drivers/iio/gyro/adxrs450.c 	if (t != 0x01)
t                 250 drivers/iio/gyro/adxrs450.c 	ret = adxrs450_spi_initial(st, &t, 0);
t                 255 drivers/iio/gyro/adxrs450.c 	ret = adxrs450_spi_initial(st, &t, 0);
t                 258 drivers/iio/gyro/adxrs450.c 	if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) {
t                 263 drivers/iio/gyro/adxrs450.c 	ret = adxrs450_spi_initial(st, &t, 0);
t                 266 drivers/iio/gyro/adxrs450.c 	if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) {
t                 310 drivers/iio/gyro/adxrs450.c 	s16 t;
t                 316 drivers/iio/gyro/adxrs450.c 			ret = adxrs450_spi_sensor_data(indio_dev, &t);
t                 319 drivers/iio/gyro/adxrs450.c 			*val = t;
t                 324 drivers/iio/gyro/adxrs450.c 						       ADXRS450_TEMP1, &t);
t                 327 drivers/iio/gyro/adxrs450.c 			*val = (t >> 6) + 225;
t                 349 drivers/iio/gyro/adxrs450.c 		ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
t                 352 drivers/iio/gyro/adxrs450.c 		*val = t;
t                 356 drivers/iio/gyro/adxrs450.c 		ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
t                 359 drivers/iio/gyro/adxrs450.c 		*val = sign_extend32(t, 9);
t                 136 drivers/iio/gyro/itg3200_core.c 	u8 t;
t                 145 drivers/iio/gyro/itg3200_core.c 		ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &t);
t                 150 drivers/iio/gyro/itg3200_core.c 		t = ((t & ITG3200_DLPF_CFG_MASK) ? 1000u : 8000u) / val - 1;
t                 154 drivers/iio/gyro/itg3200_core.c 					  t);
t                  31 drivers/iio/gyro/ssp_gyro_sensor.c 	u32 t;
t                  36 drivers/iio/gyro/ssp_gyro_sensor.c 		t = ssp_get_sensor_delay(data, SSP_GYROSCOPE_SENSOR);
t                  37 drivers/iio/gyro/ssp_gyro_sensor.c 		ssp_convert_to_freq(t, val, val2);
t                 124 drivers/iio/humidity/dht11.c 	int i, t;
t                 129 drivers/iio/humidity/dht11.c 		t = dht11->edges[offset + 2 * i + 2].ts -
t                 137 drivers/iio/humidity/dht11.c 		bits[i] = t > DHT11_THRESHOLD;
t                 327 drivers/iio/imu/adis16400.c 	uint16_t t;
t                 329 drivers/iio/imu/adis16400.c 	ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
t                 333 drivers/iio/imu/adis16400.c 	t >>= ADIS16334_RATE_DIV_SHIFT;
t                 335 drivers/iio/imu/adis16400.c 	return 819200 >> t;
t                 340 drivers/iio/imu/adis16400.c 	unsigned int t;
t                 343 drivers/iio/imu/adis16400.c 		t = ilog2(819200 / freq);
t                 345 drivers/iio/imu/adis16400.c 		t = 0;
t                 347 drivers/iio/imu/adis16400.c 	if (t > 0x31)
t                 348 drivers/iio/imu/adis16400.c 		t = 0x31;
t                 350 drivers/iio/imu/adis16400.c 	t <<= ADIS16334_RATE_DIV_SHIFT;
t                 351 drivers/iio/imu/adis16400.c 	t |= ADIS16334_RATE_INT_CLK;
t                 353 drivers/iio/imu/adis16400.c 	return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
t                 359 drivers/iio/imu/adis16400.c 	uint16_t t;
t                 361 drivers/iio/imu/adis16400.c 	ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
t                 365 drivers/iio/imu/adis16400.c 	sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
t                 366 drivers/iio/imu/adis16400.c 	sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
t                 373 drivers/iio/imu/adis16400.c 	unsigned int t;
t                 376 drivers/iio/imu/adis16400.c 	t = 1638404 / freq;
t                 377 drivers/iio/imu/adis16400.c 	if (t >= 128) {
t                 379 drivers/iio/imu/adis16400.c 		t = 52851 / freq;
t                 380 drivers/iio/imu/adis16400.c 		if (t >= 128)
t                 381 drivers/iio/imu/adis16400.c 			t = 127;
t                 382 drivers/iio/imu/adis16400.c 	} else if (t != 0) {
t                 383 drivers/iio/imu/adis16400.c 		t--;
t                 386 drivers/iio/imu/adis16400.c 	val |= t;
t                 388 drivers/iio/imu/adis16400.c 	if (t >= 0x0A || (val & ADIS16400_SMPL_PRD_TIME_BASE))
t                 155 drivers/iio/imu/adis16460.c 	int t;
t                 157 drivers/iio/imu/adis16460.c 	t =  val * 1000 + val2 / 1000;
t                 158 drivers/iio/imu/adis16460.c 	if (t <= 0)
t                 161 drivers/iio/imu/adis16460.c 	t = 2048000 / t;
t                 162 drivers/iio/imu/adis16460.c 	if (t > 2048)
t                 163 drivers/iio/imu/adis16460.c 		t = 2048;
t                 165 drivers/iio/imu/adis16460.c 	if (t != 0)
t                 166 drivers/iio/imu/adis16460.c 		t--;
t                 168 drivers/iio/imu/adis16460.c 	return adis_write_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, t);
t                 174 drivers/iio/imu/adis16460.c 	uint16_t t;
t                 178 drivers/iio/imu/adis16460.c 	ret = adis_read_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, &t);
t                 182 drivers/iio/imu/adis16460.c 	freq = 2048000 / (t + 1);
t                 318 drivers/iio/imu/adis16480.c 	unsigned int t, reg;
t                 323 drivers/iio/imu/adis16480.c 	t =  val * 1000 + val2 / 1000;
t                 324 drivers/iio/imu/adis16480.c 	if (t == 0)
t                 335 drivers/iio/imu/adis16480.c 		t = t / st->clk_freq;
t                 338 drivers/iio/imu/adis16480.c 		t = st->clk_freq / t;
t                 342 drivers/iio/imu/adis16480.c 	if (t > st->chip_info->max_dec_rate)
t                 343 drivers/iio/imu/adis16480.c 		t = st->chip_info->max_dec_rate;
t                 345 drivers/iio/imu/adis16480.c 	if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
t                 346 drivers/iio/imu/adis16480.c 		t--;
t                 348 drivers/iio/imu/adis16480.c 	return adis_write_reg_16(&st->adis, reg, t);
t                 354 drivers/iio/imu/adis16480.c 	uint16_t t;
t                 364 drivers/iio/imu/adis16480.c 	ret = adis_read_reg_16(&st->adis, reg, &t);
t                 376 drivers/iio/imu/adis16480.c 		freq = st->clk_freq * t;
t                 378 drivers/iio/imu/adis16480.c 		freq = st->clk_freq / (t + 1);
t                 290 drivers/iio/imu/bmi160/bmi160_core.c int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
t                 297 drivers/iio/imu/bmi160/bmi160_core.c 		cmd = bmi160_regs[t].pmu_cmd_normal;
t                 299 drivers/iio/imu/bmi160/bmi160_core.c 		cmd = bmi160_regs[t].pmu_cmd_suspend;
t                 305 drivers/iio/imu/bmi160/bmi160_core.c 	usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000);
t                 311 drivers/iio/imu/bmi160/bmi160_core.c int bmi160_set_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
t                 316 drivers/iio/imu/bmi160/bmi160_core.c 	for (i = 0; i < bmi160_scale_table[t].num; i++)
t                 317 drivers/iio/imu/bmi160/bmi160_core.c 		if (bmi160_scale_table[t].tbl[i].uscale == uscale)
t                 320 drivers/iio/imu/bmi160/bmi160_core.c 	if (i == bmi160_scale_table[t].num)
t                 323 drivers/iio/imu/bmi160/bmi160_core.c 	return regmap_write(data->regmap, bmi160_regs[t].range,
t                 324 drivers/iio/imu/bmi160/bmi160_core.c 			    bmi160_scale_table[t].tbl[i].bits);
t                 328 drivers/iio/imu/bmi160/bmi160_core.c int bmi160_get_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
t                 333 drivers/iio/imu/bmi160/bmi160_core.c 	ret = regmap_read(data->regmap, bmi160_regs[t].range, &val);
t                 337 drivers/iio/imu/bmi160/bmi160_core.c 	for (i = 0; i < bmi160_scale_table[t].num; i++)
t                 338 drivers/iio/imu/bmi160/bmi160_core.c 		if (bmi160_scale_table[t].tbl[i].bits == val) {
t                 339 drivers/iio/imu/bmi160/bmi160_core.c 			*uscale = bmi160_scale_table[t].tbl[i].uscale;
t                 352 drivers/iio/imu/bmi160/bmi160_core.c 	enum bmi160_sensor_type t = bmi160_to_sensor(chan_type);
t                 354 drivers/iio/imu/bmi160/bmi160_core.c 	reg = bmi160_regs[t].data + (axis - IIO_MOD_X) * sizeof(sample);
t                 366 drivers/iio/imu/bmi160/bmi160_core.c int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
t                 371 drivers/iio/imu/bmi160/bmi160_core.c 	for (i = 0; i < bmi160_odr_table[t].num; i++)
t                 372 drivers/iio/imu/bmi160/bmi160_core.c 		if (bmi160_odr_table[t].tbl[i].odr == odr &&
t                 373 drivers/iio/imu/bmi160/bmi160_core.c 		    bmi160_odr_table[t].tbl[i].uodr == uodr)
t                 376 drivers/iio/imu/bmi160/bmi160_core.c 	if (i >= bmi160_odr_table[t].num)
t                 380 drivers/iio/imu/bmi160/bmi160_core.c 				  bmi160_regs[t].config,
t                 381 drivers/iio/imu/bmi160/bmi160_core.c 				  bmi160_regs[t].config_odr_mask,
t                 382 drivers/iio/imu/bmi160/bmi160_core.c 				  bmi160_odr_table[t].tbl[i].bits);
t                 385 drivers/iio/imu/bmi160/bmi160_core.c static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
t                 390 drivers/iio/imu/bmi160/bmi160_core.c 	ret = regmap_read(data->regmap, bmi160_regs[t].config, &val);
t                 394 drivers/iio/imu/bmi160/bmi160_core.c 	val &= bmi160_regs[t].config_odr_mask;
t                 396 drivers/iio/imu/bmi160/bmi160_core.c 	for (i = 0; i < bmi160_odr_table[t].num; i++)
t                 397 drivers/iio/imu/bmi160/bmi160_core.c 		if (val == bmi160_odr_table[t].tbl[i].bits)
t                 400 drivers/iio/imu/bmi160/bmi160_core.c 	if (i >= bmi160_odr_table[t].num)
t                 403 drivers/iio/imu/bmi160/bmi160_core.c 	*odr = bmi160_odr_table[t].tbl[i].odr;
t                 404 drivers/iio/imu/bmi160/bmi160_core.c 	*uodr = bmi160_odr_table[t].tbl[i].uodr;
t                1372 drivers/iio/industrialio-buffer.c 	struct iio_demux_table *t;
t                1376 drivers/iio/industrialio-buffer.c 	list_for_each_entry(t, &buffer->demux_list, l)
t                1377 drivers/iio/industrialio-buffer.c 		memcpy(buffer->demux_bounce + t->to,
t                1378 drivers/iio/industrialio-buffer.c 		       datain + t->from, t->length);
t                1024 drivers/iio/industrialio-core.c 	struct iio_dev_attr *iio_attr, *t;
t                1036 drivers/iio/industrialio-core.c 	list_for_each_entry(t, attr_list, l)
t                1037 drivers/iio/industrialio-core.c 		if (strcmp(t->dev_attr.attr.name,
t                1041 drivers/iio/industrialio-core.c 					t->dev_attr.attr.name);
t                  32 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger_type *t = NULL, *iter;
t                  36 drivers/iio/industrialio-sw-trigger.c 			t = iter;
t                  40 drivers/iio/industrialio-sw-trigger.c 	return t;
t                  43 drivers/iio/industrialio-sw-trigger.c int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
t                  49 drivers/iio/industrialio-sw-trigger.c 	iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
t                  53 drivers/iio/industrialio-sw-trigger.c 		list_add_tail(&t->list, &iio_trigger_types_list);
t                  59 drivers/iio/industrialio-sw-trigger.c 	t->group = configfs_register_default_group(iio_triggers_group, t->name,
t                  61 drivers/iio/industrialio-sw-trigger.c 	if (IS_ERR(t->group))
t                  62 drivers/iio/industrialio-sw-trigger.c 		ret = PTR_ERR(t->group);
t                  68 drivers/iio/industrialio-sw-trigger.c void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *t)
t                  73 drivers/iio/industrialio-sw-trigger.c 	iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
t                  75 drivers/iio/industrialio-sw-trigger.c 		list_del(&t->list);
t                  78 drivers/iio/industrialio-sw-trigger.c 	configfs_unregister_default_group(t->group);
t                  85 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger_type *t;
t                  88 drivers/iio/industrialio-sw-trigger.c 	t = __iio_find_sw_trigger_type(name, strlen(name));
t                  89 drivers/iio/industrialio-sw-trigger.c 	if (t && !try_module_get(t->owner))
t                  90 drivers/iio/industrialio-sw-trigger.c 		t = NULL;
t                  93 drivers/iio/industrialio-sw-trigger.c 	return t;
t                  98 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger *t;
t                 106 drivers/iio/industrialio-sw-trigger.c 	t = tt->ops->probe(name);
t                 107 drivers/iio/industrialio-sw-trigger.c 	if (IS_ERR(t))
t                 110 drivers/iio/industrialio-sw-trigger.c 	t->trigger_type = tt;
t                 112 drivers/iio/industrialio-sw-trigger.c 	return t;
t                 115 drivers/iio/industrialio-sw-trigger.c 	return t;
t                 119 drivers/iio/industrialio-sw-trigger.c void iio_sw_trigger_destroy(struct iio_sw_trigger *t)
t                 121 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger_type *tt = t->trigger_type;
t                 123 drivers/iio/industrialio-sw-trigger.c 	tt->ops->remove(t);
t                 131 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger *t;
t                 133 drivers/iio/industrialio-sw-trigger.c 	t = iio_sw_trigger_create(group->cg_item.ci_name, name);
t                 134 drivers/iio/industrialio-sw-trigger.c 	if (IS_ERR(t))
t                 135 drivers/iio/industrialio-sw-trigger.c 		return ERR_CAST(t);
t                 137 drivers/iio/industrialio-sw-trigger.c 	config_item_set_name(&t->group.cg_item, "%s", name);
t                 139 drivers/iio/industrialio-sw-trigger.c 	return &t->group;
t                 145 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger *t = to_iio_sw_trigger(item);
t                 147 drivers/iio/industrialio-sw-trigger.c 	iio_sw_trigger_destroy(t);
t                 195 drivers/iio/light/zopt2201.c 		unsigned long t = zopt2201_resolution[data->res].us;
t                 197 drivers/iio/light/zopt2201.c 		if (t <= 20000)
t                 198 drivers/iio/light/zopt2201.c 			usleep_range(t, t + 1000);
t                 200 drivers/iio/light/zopt2201.c 			msleep(t / 1000);
t                 151 drivers/iio/potentiometer/mcp4131.c 	struct spi_transfer t = {
t                 159 drivers/iio/potentiometer/mcp4131.c 	spi_message_add_tail(&t, &m);
t                  61 drivers/iio/pressure/dps310.c #define DPS310_POLL_SLEEP_US(t)		min(20000, (t) / 8)
t                 475 drivers/iio/pressure/dps310.c 	s64 t;
t                 501 drivers/iio/pressure/dps310.c 	t = (s64)data->temp_raw;
t                 512 drivers/iio/pressure/dps310.c 	nums[4] = t * (s64)data->c01;
t                 514 drivers/iio/pressure/dps310.c 	nums[5] = t * p * (s64)data->c11;
t                 516 drivers/iio/pressure/dps310.c 	nums[6] = t * p * p * (s64)data->c21;
t                 586 drivers/iio/pressure/dps310.c 	s64 t;
t                 596 drivers/iio/pressure/dps310.c 	t = c0 + ((s64)data->temp_raw * (s64)data->c1);
t                 599 drivers/iio/pressure/dps310.c 	return (int)div_s64(t * 1000LL, kt);
t                 125 drivers/iio/pressure/ms5611_core.c 	s32 t = *temp, p = *pressure;
t                 128 drivers/iio/pressure/ms5611_core.c 	dt = t - (chip_info->prom[5] << 8);
t                 132 drivers/iio/pressure/ms5611_core.c 	t = 2000 + ((chip_info->prom[6] * dt) >> 23);
t                 133 drivers/iio/pressure/ms5611_core.c 	if (t < 2000) {
t                 137 drivers/iio/pressure/ms5611_core.c 		off2 = (5 * (t - 2000) * (t - 2000)) >> 1;
t                 140 drivers/iio/pressure/ms5611_core.c 		if (t < -1500) {
t                 141 drivers/iio/pressure/ms5611_core.c 			s64 tmp = (t + 1500) * (t + 1500);
t                 147 drivers/iio/pressure/ms5611_core.c 		t -= t2;
t                 152 drivers/iio/pressure/ms5611_core.c 	*temp = t;
t                 161 drivers/iio/pressure/ms5611_core.c 	s32 t = *temp, p = *pressure;
t                 164 drivers/iio/pressure/ms5611_core.c 	dt = t - (chip_info->prom[5] << 8);
t                 168 drivers/iio/pressure/ms5611_core.c 	t = 2000 + ((chip_info->prom[6] * dt) >> 23);
t                 169 drivers/iio/pressure/ms5611_core.c 	if (t < 2000) {
t                 173 drivers/iio/pressure/ms5611_core.c 		tmp = (t - 2000) * (t - 2000);
t                 177 drivers/iio/pressure/ms5611_core.c 		if (t < -1500) {
t                 178 drivers/iio/pressure/ms5611_core.c 			tmp = (t + 1500) * (t + 1500);
t                 183 drivers/iio/pressure/ms5611_core.c 		t -= t2;
t                 188 drivers/iio/pressure/ms5611_core.c 	*temp = t;
t                 132 drivers/iio/trigger/iio-trig-sysfs.c 	struct iio_sysfs_trig *t;
t                 137 drivers/iio/trigger/iio-trig-sysfs.c 	list_for_each_entry(t, &iio_sysfs_trig_list, l)
t                 138 drivers/iio/trigger/iio-trig-sysfs.c 		if (id == t->id) {
t                 146 drivers/iio/trigger/iio-trig-sysfs.c 	t = kmalloc(sizeof(*t), GFP_KERNEL);
t                 147 drivers/iio/trigger/iio-trig-sysfs.c 	if (t == NULL) {
t                 151 drivers/iio/trigger/iio-trig-sysfs.c 	t->id = id;
t                 152 drivers/iio/trigger/iio-trig-sysfs.c 	t->trig = iio_trigger_alloc("sysfstrig%d", id);
t                 153 drivers/iio/trigger/iio-trig-sysfs.c 	if (!t->trig) {
t                 158 drivers/iio/trigger/iio-trig-sysfs.c 	t->trig->dev.groups = iio_sysfs_trigger_attr_groups;
t                 159 drivers/iio/trigger/iio-trig-sysfs.c 	t->trig->ops = &iio_sysfs_trigger_ops;
t                 160 drivers/iio/trigger/iio-trig-sysfs.c 	t->trig->dev.parent = &iio_sysfs_trig_dev;
t                 161 drivers/iio/trigger/iio-trig-sysfs.c 	iio_trigger_set_drvdata(t->trig, t);
t                 163 drivers/iio/trigger/iio-trig-sysfs.c 	init_irq_work(&t->work, iio_sysfs_trigger_work);
t                 165 drivers/iio/trigger/iio-trig-sysfs.c 	ret = iio_trigger_register(t->trig);
t                 168 drivers/iio/trigger/iio-trig-sysfs.c 	list_add(&t->l, &iio_sysfs_trig_list);
t                 174 drivers/iio/trigger/iio-trig-sysfs.c 	iio_trigger_free(t->trig);
t                 176 drivers/iio/trigger/iio-trig-sysfs.c 	kfree(t);
t                 185 drivers/iio/trigger/iio-trig-sysfs.c 	struct iio_sysfs_trig *t;
t                 188 drivers/iio/trigger/iio-trig-sysfs.c 	list_for_each_entry(t, &iio_sysfs_trig_list, l)
t                 189 drivers/iio/trigger/iio-trig-sysfs.c 		if (id == t->id) {
t                 198 drivers/iio/trigger/iio-trig-sysfs.c 	iio_trigger_unregister(t->trig);
t                 199 drivers/iio/trigger/iio-trig-sysfs.c 	iio_trigger_free(t->trig);
t                 201 drivers/iio/trigger/iio-trig-sysfs.c 	list_del(&t->l);
t                 202 drivers/iio/trigger/iio-trig-sysfs.c 	kfree(t);
t                 996 drivers/infiniband/core/mad.c 	struct ib_rmpp_segment *s, *t;
t                 998 drivers/infiniband/core/mad.c 	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
t                1310 drivers/infiniband/core/sysfs.c 	struct kobject *p, *t;
t                1312 drivers/infiniband/core/sysfs.c 	list_for_each_entry_safe(p, t, &coredev->port_list, entry) {
t                1398 drivers/infiniband/core/sysfs.c 	struct kobject *p, *t;
t                1401 drivers/infiniband/core/sysfs.c 	list_for_each_entry_safe(p, t, &device->coredev.port_list, entry) {
t                 110 drivers/infiniband/hw/cxgb3/iwch_cm.c static void ep_timeout(struct timer_list *t);
t                1720 drivers/infiniband/hw/cxgb3/iwch_cm.c static void ep_timeout(struct timer_list *t)
t                1722 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct iwch_ep *ep = from_timer(ep, t, timer);
t                 143 drivers/infiniband/hw/cxgb4/cm.c static void ep_timeout(struct timer_list *t);
t                1221 drivers/infiniband/hw/cxgb4/cm.c 	struct tid_info *t = dev->rdev.lldi.tids;
t                1224 drivers/infiniband/hw/cxgb4/cm.c 	ep = lookup_atid(t, atid);
t                1234 drivers/infiniband/hw/cxgb4/cm.c 	cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
t                1245 drivers/infiniband/hw/cxgb4/cm.c 	cxgb4_free_atid(t, atid);
t                2270 drivers/infiniband/hw/cxgb4/cm.c 	struct tid_info *t = dev->rdev.lldi.tids;
t                2278 drivers/infiniband/hw/cxgb4/cm.c 	ep = lookup_atid(t, atid);
t                2332 drivers/infiniband/hw/cxgb4/cm.c 			cxgb4_free_atid(t, atid);
t                2369 drivers/infiniband/hw/cxgb4/cm.c 	cxgb4_free_atid(t, atid);
t                2518 drivers/infiniband/hw/cxgb4/cm.c 	struct tid_info *t = dev->rdev.lldi.tids;
t                2651 drivers/infiniband/hw/cxgb4/cm.c 	cxgb4_insert_tid(t, child_ep, hwtid,
t                3825 drivers/infiniband/hw/cxgb4/cm.c 	u64 t;
t                3828 drivers/infiniband/hw/cxgb4/cm.c 	t = (thi << shift) | (tlo >> shift);
t                3830 drivers/infiniband/hw/cxgb4/cm.c 	return t;
t                3836 drivers/infiniband/hw/cxgb4/cm.c 	u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
t                3840 drivers/infiniband/hw/cxgb4/cm.c 	v = (t >> shift) & mask;
t                4319 drivers/infiniband/hw/cxgb4/cm.c static void ep_timeout(struct timer_list *t)
t                4321 drivers/infiniband/hw/cxgb4/cm.c 	struct c4iw_ep *ep = from_timer(ep, t, timer);
t                 170 drivers/infiniband/hw/hfi1/aspm.c static  void aspm_ctx_timer_function(struct timer_list *t)
t                 172 drivers/infiniband/hw/hfi1/aspm.c 	struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer);
t                5571 drivers/infiniband/hw/hfi1/chip.c static void update_rcverr_timer(struct timer_list *t)
t                5573 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
t                11562 drivers/infiniband/hw/hfi1/chip.c int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
t                11576 drivers/infiniband/hw/hfi1/chip.c 		vl_arb_get_cache(vlc, t);
t                11586 drivers/infiniband/hw/hfi1/chip.c 		vl_arb_get_cache(vlc, t);
t                11590 drivers/infiniband/hw/hfi1/chip.c 		size = get_buffer_control(ppd->dd, t, NULL);
t                11593 drivers/infiniband/hw/hfi1/chip.c 		size = get_sc2vlnt(ppd->dd, t);
t                11598 drivers/infiniband/hw/hfi1/chip.c 		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
t                11616 drivers/infiniband/hw/hfi1/chip.c int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
t                11624 drivers/infiniband/hw/hfi1/chip.c 		if (vl_arb_match_cache(vlc, t)) {
t                11628 drivers/infiniband/hw/hfi1/chip.c 		vl_arb_set_cache(vlc, t);
t                11631 drivers/infiniband/hw/hfi1/chip.c 				     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
t                11635 drivers/infiniband/hw/hfi1/chip.c 		if (vl_arb_match_cache(vlc, t)) {
t                11639 drivers/infiniband/hw/hfi1/chip.c 		vl_arb_set_cache(vlc, t);
t                11642 drivers/infiniband/hw/hfi1/chip.c 				     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
t                11645 drivers/infiniband/hw/hfi1/chip.c 		ret = set_buffer_control(ppd, t);
t                11648 drivers/infiniband/hw/hfi1/chip.c 		set_sc2vlnt(ppd->dd, t);
t                12469 drivers/infiniband/hw/hfi1/chip.c static void update_synth_timer(struct timer_list *t)
t                12471 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
t                1290 drivers/infiniband/hw/hfi1/driver.c static void run_led_override(struct timer_list *t)
t                1292 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
t                1812 drivers/infiniband/hw/hfi1/hfi.h int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
t                1813 drivers/infiniband/hw/hfi1/hfi.h int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
t                 589 drivers/infiniband/hw/hfi1/init.c static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
t                 599 drivers/infiniband/hw/hfi1/init.c 	cca_timer = container_of(t, struct cca_timer, hrtimer);
t                 631 drivers/infiniband/hw/hfi1/init.c 		hrtimer_forward_now(t, ns_to_ktime(nsec));
t                 412 drivers/infiniband/hw/hfi1/mad.c void hfi1_handle_trap_timer(struct timer_list *t)
t                 414 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
t                 440 drivers/infiniband/hw/hfi1/mad.h void hfi1_handle_trap_timer(struct timer_list *t);
t                 509 drivers/infiniband/hw/hfi1/sdma.c static void sdma_err_progress_check(struct timer_list *t)
t                 512 drivers/infiniband/hw/hfi1/sdma.c 	struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
t                 117 drivers/infiniband/hw/hfi1/tid_rdma.c static void hfi1_tid_timeout(struct timer_list *t);
t                 122 drivers/infiniband/hw/hfi1/tid_rdma.c static void hfi1_tid_retry_timeout(struct timer_list *t);
t                3969 drivers/infiniband/hw/hfi1/tid_rdma.c static void hfi1_tid_timeout(struct timer_list *t)
t                3971 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
t                4785 drivers/infiniband/hw/hfi1/tid_rdma.c static void hfi1_tid_retry_timeout(struct timer_list *t)
t                4787 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
t                1443 drivers/infiniband/hw/hfi1/user_sdma.c 		struct sdma_txreq *t, *p;
t                1445 drivers/infiniband/hw/hfi1/user_sdma.c 		list_for_each_entry_safe(t, p, &req->txps, list) {
t                1447 drivers/infiniband/hw/hfi1/user_sdma.c 				container_of(t, struct user_sdma_txreq, txreq);
t                1448 drivers/infiniband/hw/hfi1/user_sdma.c 			list_del_init(&t->list);
t                1449 drivers/infiniband/hw/hfi1/user_sdma.c 			sdma_txclean(req->pq->dd, t);
t                 596 drivers/infiniband/hw/hfi1/verbs.c static void mem_timer(struct timer_list *t)
t                 598 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
t                 182 drivers/infiniband/hw/hns/hns_roce_alloc.c 	dma_addr_t t;
t                 199 drivers/infiniband/hw/hns/hns_roce_alloc.c 		buf->direct.buf = dma_alloc_coherent(dev, size, &t,
t                 204 drivers/infiniband/hw/hns/hns_roce_alloc.c 		buf->direct.map = t;
t                 206 drivers/infiniband/hw/hns/hns_roce_alloc.c 		while (t & ((1 << buf->page_shift) - 1)) {
t                 223 drivers/infiniband/hw/hns/hns_roce_alloc.c 								   &t,
t                 229 drivers/infiniband/hw/hns/hns_roce_alloc.c 			buf->page_list[i].map = t;
t                1867 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	dma_addr_t t;
t                1909 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 							      &t, GFP_KERNEL);
t                1913 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		link_tbl->pg_list[i].map = t;
t                1915 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		entry[i].blk_ba0 = (u32)(t >> 12);
t                1916 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
t                1210 drivers/infiniband/hw/i40iw/i40iw_cm.c static void i40iw_cm_timer_tick(struct timer_list *t)
t                1218 drivers/infiniband/hw/i40iw/i40iw_cm.c 	struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
t                 935 drivers/infiniband/hw/i40iw/i40iw_utils.c static void i40iw_terminate_timeout(struct timer_list *t)
t                 937 drivers/infiniband/hw/i40iw/i40iw_utils.c 	struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
t                1507 drivers/infiniband/hw/i40iw/i40iw_utils.c static void i40iw_hw_stats_timeout(struct timer_list *t)
t                1509 drivers/infiniband/hw/i40iw/i40iw_utils.c 	struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
t                3448 drivers/infiniband/hw/mlx4/qp.c 	u32 *t = dseg;
t                3451 drivers/infiniband/hw/mlx4/qp.c 	t[1] = 0;
t                 709 drivers/infiniband/hw/mlx4/sysfs.c 	struct kobject *p, *t;
t                 746 drivers/infiniband/hw/mlx4/sysfs.c 	list_for_each_entry_safe(p, t,
t                 784 drivers/infiniband/hw/mlx4/sysfs.c 	struct kobject *p, *t;
t                 791 drivers/infiniband/hw/mlx4/sysfs.c 		list_for_each_entry_safe(p, t,
t                 600 drivers/infiniband/hw/mlx5/mr.c static void delay_time_func(struct timer_list *t)
t                 602 drivers/infiniband/hw/mlx5/mr.c 	struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
t                 202 drivers/infiniband/hw/mthca/mthca_allocator.c 	dma_addr_t t;
t                 211 drivers/infiniband/hw/mthca/mthca_allocator.c 						     size, &t, GFP_KERNEL);
t                 215 drivers/infiniband/hw/mthca/mthca_allocator.c 		dma_unmap_addr_set(&buf->direct, mapping, t);
t                 217 drivers/infiniband/hw/mthca/mthca_allocator.c 		while (t & ((1 << shift) - 1)) {
t                 228 drivers/infiniband/hw/mthca/mthca_allocator.c 			dma_list[i] = t + i * (1 << shift);
t                 251 drivers/infiniband/hw/mthca/mthca_allocator.c 						   &t, GFP_KERNEL);
t                 255 drivers/infiniband/hw/mthca/mthca_allocator.c 			dma_list[i] = t;
t                 256 drivers/infiniband/hw/mthca/mthca_allocator.c 			dma_unmap_addr_set(&buf->page_list[i], mapping, t);
t                 133 drivers/infiniband/hw/mthca/mthca_catas.c static void poll_catas(struct timer_list *t)
t                 135 drivers/infiniband/hw/mthca/mthca_catas.c 	struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
t                 472 drivers/infiniband/hw/mthca/mthca_eq.c 	dma_addr_t t;
t                 501 drivers/infiniband/hw/mthca/mthca_eq.c 							  PAGE_SIZE, &t, GFP_KERNEL);
t                 505 drivers/infiniband/hw/mthca/mthca_eq.c 		dma_list[i] = t;
t                 506 drivers/infiniband/hw/mthca/mthca_eq.c 		dma_unmap_addr_set(&eq->page_list[i], mapping, t);
t                1283 drivers/infiniband/hw/qib/qib.h void qib_clear_symerror_on_linkup(struct timer_list *t);
t                 665 drivers/infiniband/hw/qib/qib_driver.c static void qib_run_led_override(struct timer_list *t)
t                 667 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_pportdata *ppd = from_timer(ppd, t,
t                 151 drivers/infiniband/hw/qib/qib_eeprom.c 	int t = dd->unit;
t                 154 drivers/infiniband/hw/qib/qib_eeprom.c 	if (t && dd0->nguid > 1 && t <= dd0->nguid) {
t                 161 drivers/infiniband/hw/qib/qib_eeprom.c 		bguid[7] += t;
t                2620 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_get_6120_faststats(struct timer_list *t)
t                2622 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
t                2910 drivers/infiniband/hw/qib/qib_iba6120.c static void pma_6120_timer(struct timer_list *t)
t                2912 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_chip_specific *cs = from_timer(cs, t, pma_timer);
t                1045 drivers/infiniband/hw/qib/qib_iba7220.c static void reenable_7220_chase(struct timer_list *t)
t                1047 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_chippport_specific *cpspec = from_timer(cpspec, t,
t                3241 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_get_7220_faststats(struct timer_list *t)
t                3243 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
t                1744 drivers/infiniband/hw/qib/qib_iba7322.c static void reenable_chase(struct timer_list *t)
t                1746 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
t                4397 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
t                4401 drivers/infiniband/hw/qib/qib_iba7322.c 		get_vl_weights(ppd, krp_highprio_0, t);
t                4405 drivers/infiniband/hw/qib/qib_iba7322.c 		get_vl_weights(ppd, krp_lowprio_0, t);
t                4414 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
t                4418 drivers/infiniband/hw/qib/qib_iba7322.c 		set_vl_weights(ppd, krp_highprio_0, t);
t                4422 drivers/infiniband/hw/qib/qib_iba7322.c 		set_vl_weights(ppd, krp_lowprio_0, t);
t                5110 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_get_7322_faststats(struct timer_list *t)
t                5112 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
t                 494 drivers/infiniband/hw/qib/qib_init.c static void verify_interrupt(struct timer_list *t)
t                 496 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
t                 173 drivers/infiniband/hw/qib/qib_intr.c void qib_clear_symerror_on_linkup(struct timer_list *t)
t                 175 drivers/infiniband/hw/qib/qib_intr.c 	struct qib_pportdata *ppd = from_timer(ppd, t, symerr_clear_timer);
t                2449 drivers/infiniband/hw/qib/qib_mad.c static void xmit_wait_timer_func(struct timer_list *t)
t                2451 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_pportdata *ppd = from_timer(ppd, t, cong_stats.timer);
t                1387 drivers/infiniband/hw/qib/qib_sd7220.c static void qib_run_relock(struct timer_list *t)
t                1389 drivers/infiniband/hw/qib/qib_sd7220.c 	struct qib_chip_specific *cs = from_timer(cs, t, relock_timer);
t                 551 drivers/infiniband/hw/qib/qib_tx.c void qib_hol_event(struct timer_list *t)
t                 553 drivers/infiniband/hw/qib/qib_tx.c 	struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
t                 363 drivers/infiniband/hw/qib/qib_verbs.c static void mem_timer(struct timer_list *t)
t                 365 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_ibdev *dev = from_timer(dev, t, mem_timer);
t                 486 drivers/infiniband/sw/rdmavt/mr.c static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
t                 502 drivers/infiniband/sw/rdmavt/mr.c 			   t, mr, mr->pd, mr->lkey,
t                  63 drivers/infiniband/sw/rdmavt/qp.c static void rvt_rc_timeout(struct timer_list *t);
t                2650 drivers/infiniband/sw/rdmavt/qp.c static void rvt_rc_timeout(struct timer_list *t)
t                2652 drivers/infiniband/sw/rdmavt/qp.c 	struct rvt_qp *qp = from_timer(qp, t, s_timer);
t                2678 drivers/infiniband/sw/rdmavt/qp.c enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
t                2680 drivers/infiniband/sw/rdmavt/qp.c 	struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
t                 139 drivers/infiniband/sw/rxe/rxe_comp.c void retransmit_timer(struct timer_list *t)
t                 141 drivers/infiniband/sw/rxe/rxe_comp.c 	struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
t                 216 drivers/infiniband/sw/rxe/rxe_loc.h void retransmit_timer(struct timer_list *t);
t                 217 drivers/infiniband/sw/rxe/rxe_loc.h void rnr_nak_timer(struct timer_list *t);
t                 124 drivers/infiniband/sw/rxe/rxe_req.c void rnr_nak_timer(struct timer_list *t)
t                 126 drivers/infiniband/sw/rxe/rxe_req.c 	struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
t                 509 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_ib_tx_timer_func(struct timer_list *t);
t                 354 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	} *t;
t                 363 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	t = kmalloc(sizeof(*t), GFP_KERNEL);
t                 364 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (!t) {
t                 369 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
t                 391 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
t                 402 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	kfree(t);
t                 412 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	kfree(t);
t                 456 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	int t;
t                 465 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		t = min(IPOIB_NUM_WC, max);
t                 466 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
t                 482 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (n != t)
t                3360 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_target_port *t;
t                3369 drivers/infiniband/ulp/srp/ib_srp.c 	list_for_each_entry(t, &host->target_list, list) {
t                3370 drivers/infiniband/ulp/srp/ib_srp.c 		if (t != target &&
t                3371 drivers/infiniband/ulp/srp/ib_srp.c 		    target->id_ext == t->id_ext &&
t                3372 drivers/infiniband/ulp/srp/ib_srp.c 		    target->ioc_guid == t->ioc_guid &&
t                3373 drivers/infiniband/ulp/srp/ib_srp.c 		    target->initiator_ext == t->initiator_ext) {
t                1038 drivers/input/evdev.c 	unsigned int i, t, u, v;
t                1208 drivers/input/evdev.c 			t = _IOC_NR(cmd) & ABS_MAX;
t                1209 drivers/input/evdev.c 			abs = dev->absinfo[t];
t                1226 drivers/input/evdev.c 			t = _IOC_NR(cmd) & ABS_MAX;
t                1236 drivers/input/evdev.c 			if (t == ABS_MT_SLOT)
t                1245 drivers/input/evdev.c 			dev->absinfo[t] = abs;
t                 403 drivers/input/ff-memless.c static void ml_effect_timer(struct timer_list *t)
t                 405 drivers/input/ff-memless.c 	struct ml_device *ml = from_timer(ml, t, timer);
t                  80 drivers/input/gameport/gameport.c 	unsigned int i, t, tx;
t                  92 drivers/input/gameport/gameport.c 		for (t = 0; t < 50; t++)
t                  98 drivers/input/gameport/gameport.c 		t = (t2 - t1) - (t3 - t2);
t                  99 drivers/input/gameport/gameport.c 		if (t < tx)
t                 100 drivers/input/gameport/gameport.c 			tx = t;
t                 104 drivers/input/gameport/gameport.c 	t = 1000000 * 50;
t                 106 drivers/input/gameport/gameport.c 		t /= tx;
t                 107 drivers/input/gameport/gameport.c 	return t;
t                 114 drivers/input/gameport/gameport.c 	unsigned int i, t, t1, t2, t3, tx;
t                 125 drivers/input/gameport/gameport.c 		for (t = 0; t < 50; t++) gameport_read(gameport);
t                 130 drivers/input/gameport/gameport.c 		if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
t                 138 drivers/input/gameport/gameport.c 	unsigned int i, t;
t                 149 drivers/input/gameport/gameport.c 		for (t = 0; t < 50; t++) gameport_read(gameport);
t                 162 drivers/input/gameport/gameport.c 	unsigned int j, t = 0;
t                 168 drivers/input/gameport/gameport.c 	j = jiffies; while (j == jiffies) { t++; gameport_read(gameport); }
t                 171 drivers/input/gameport/gameport.c 	return t * HZ / 1000;
t                 201 drivers/input/gameport/gameport.c static void gameport_run_poll_handler(struct timer_list *t)
t                 203 drivers/input/gameport/gameport.c 	struct gameport *gameport = from_timer(gameport, t, poll_timer);
t                  50 drivers/input/gameport/lightning.c 	unsigned int t = L4_TIMEOUT;
t                  52 drivers/input/gameport/lightning.c 	while ((inb(L4_PORT) & L4_BUSY) && t > 0) t--;
t                  53 drivers/input/gameport/lightning.c 	return -(t <= 0);
t                 182 drivers/input/gameport/lightning.c 	int i, t;
t                 190 drivers/input/gameport/lightning.c 		t = (max[i] * cal[i]) / 200;
t                 191 drivers/input/gameport/lightning.c 		t = (t < 1) ? 1 : ((t > 255) ? 255 : t);
t                 192 drivers/input/gameport/lightning.c 		axes[i] = (axes[i] < 0) ? -1 : (axes[i] * cal[i]) / t;
t                 194 drivers/input/gameport/lightning.c 		cal[i] = t;
t                 179 drivers/input/input.c static void input_repeat_key(struct timer_list *t)
t                 181 drivers/input/input.c 	struct input_dev *dev = from_timer(dev, t, timer);
t                 903 drivers/input/joydev.c 	int i, j, t, minor, dev_no;
t                 965 drivers/input/joydev.c 		t = (input_abs_get_max(dev, j) + input_abs_get_min(dev, j)) / 2;
t                 966 drivers/input/joydev.c 		joydev->corr[i].coef[0] = t - input_abs_get_flat(dev, j);
t                 967 drivers/input/joydev.c 		joydev->corr[i].coef[1] = t + input_abs_get_flat(dev, j);
t                 969 drivers/input/joydev.c 		t = (input_abs_get_max(dev, j) - input_abs_get_min(dev, j)) / 2
t                 971 drivers/input/joydev.c 		if (t) {
t                 972 drivers/input/joydev.c 			joydev->corr[i].coef[2] = (1 << 29) / t;
t                 973 drivers/input/joydev.c 			joydev->corr[i].coef[3] = (1 << 29) / t;
t                  59 drivers/input/joystick/a3d.c 	unsigned int t, s;
t                  63 drivers/input/joystick/a3d.c 	t = gameport_time(gameport, A3D_MAX_START);
t                  70 drivers/input/joystick/a3d.c 	while (t > 0 && i < length) {
t                  71 drivers/input/joystick/a3d.c 		t--;
t                  75 drivers/input/joystick/a3d.c 			t = s;
t                 127 drivers/input/joystick/adi.c 	int t[2], s[2], i;
t                 132 drivers/input/joystick/adi.c 		t[i] = gameport_time(gameport, ADI_MAX_START);
t                 145 drivers/input/joystick/adi.c 			t[i]--;
t                 147 drivers/input/joystick/adi.c 				if ((w & 0x30) < 0x30 && adi[i].ret < ADI_MAX_LENGTH && t[i] > 0) {
t                 149 drivers/input/joystick/adi.c 					t[i] = gameport_time(gameport, ADI_MAX_STROBE);
t                 150 drivers/input/joystick/adi.c 				} else t[i] = 0;
t                 153 drivers/input/joystick/adi.c 	} while (t[0] > 0 || t[1] > 0);
t                 205 drivers/input/joystick/adi.c 	int i, t;
t                 218 drivers/input/joystick/adi.c 			t = adi_get_bits(adi, 4);
t                 219 drivers/input/joystick/adi.c 			input_report_abs(dev, *abs++, ((t >> 2) & 1) - ( t       & 1));
t                 220 drivers/input/joystick/adi.c 			input_report_abs(dev, *abs++, ((t >> 1) & 1) - ((t >> 3) & 1));
t                 226 drivers/input/joystick/adi.c 		if ((t = adi_get_bits(adi, 4)) > 8) t = 0;
t                 227 drivers/input/joystick/adi.c 		input_report_abs(dev, *abs++, adi_hat_to_axis[t].x);
t                 228 drivers/input/joystick/adi.c 		input_report_abs(dev, *abs++, adi_hat_to_axis[t].y);
t                 316 drivers/input/joystick/adi.c 	int i, t;
t                 321 drivers/input/joystick/adi.c 	if (adi->ret < (t = adi_get_bits(adi, 10))) {
t                 322 drivers/input/joystick/adi.c 		printk(KERN_WARNING "adi: Short ID packet: reported: %d != read: %d\n", t, adi->ret);
t                 328 drivers/input/joystick/adi.c 	if ((t = adi_get_bits(adi, 4)) & ADI_FLAG_HAT) adi->hats++;
t                 352 drivers/input/joystick/adi.c 	if (t & ADI_FLAG_10BIT) {
t                 357 drivers/input/joystick/adi.c 	t = adi_get_bits(adi, 4);
t                 359 drivers/input/joystick/adi.c 	for (i = 0; i < t; i++)
t                 363 drivers/input/joystick/adi.c 	t = 8 + adi->buttons + adi->axes10 * 10 + adi->axes8 * 8 + adi->hats * 4;
t                 364 drivers/input/joystick/adi.c 	if (adi->length != t && adi->length != t + (t & 1)) {
t                 365 drivers/input/joystick/adi.c 		printk(KERN_WARNING "adi: Expected length %d != data length %d\n", t, adi->length);
t                 389 drivers/input/joystick/adi.c 	int i, t;
t                 395 drivers/input/joystick/adi.c 	t = adi->id < ADI_ID_MAX ? adi->id : ADI_ID_MAX;
t                 397 drivers/input/joystick/adi.c 	snprintf(buf, ADI_MAX_PHYS_LENGTH, adi_names[t], adi->id);
t                 401 drivers/input/joystick/adi.c 	adi->abs = adi_abs[t];
t                 402 drivers/input/joystick/adi.c 	adi->key = adi_key[t];
t                 430 drivers/input/joystick/adi.c 	int i, t, x;
t                 437 drivers/input/joystick/adi.c 		t = adi->abs[i];
t                 438 drivers/input/joystick/adi.c 		x = input_abs_get_val(adi->dev, t);
t                 440 drivers/input/joystick/adi.c 		if (t == ABS_THROTTLE || t == ABS_RUDDER || adi->id == ADI_ID_WGPE)
t                 444 drivers/input/joystick/adi.c 			input_set_abs_params(adi->dev, t, 64, x * 2 - 64, 2, 16);
t                 446 drivers/input/joystick/adi.c 			input_set_abs_params(adi->dev, t, 48, x * 2 - 48, 1, 16);
t                 448 drivers/input/joystick/adi.c 			input_set_abs_params(adi->dev, t, -1, 1, 0, 0);
t                 287 drivers/input/joystick/analog.c 	int t = 1, i = 0;
t                 299 drivers/input/joystick/analog.c 	while ((~u & 0xf0) && (i < 16) && t) {
t                 303 drivers/input/joystick/analog.c 		t = strobe;
t                 305 drivers/input/joystick/analog.c 		while (((u = gameport_read(port->gameport)) & port->mask) && t) t--;
t                 309 drivers/input/joystick/analog.c 	return -(!t || (i == 16));
t                 377 drivers/input/joystick/analog.c 	unsigned int i, t, tx;
t                 402 drivers/input/joystick/analog.c 		for (t = 0; t < 50; t++) {
t                 409 drivers/input/joystick/analog.c 		t = delta(t1, t2) - delta(t2, t3);
t                 410 drivers/input/joystick/analog.c 		if (t < tx) tx = t;
t                 448 drivers/input/joystick/analog.c 	int i, j, t, v, w, x, y, z;
t                 478 drivers/input/joystick/analog.c 			t = analog_axes[j];
t                 495 drivers/input/joystick/analog.c 			input_set_abs_params(input_dev, t, v, (x << 1) - v, port->fuzz, w);
t                 502 drivers/input/joystick/analog.c 				t = analog_hats[j++];
t                 503 drivers/input/joystick/analog.c 				input_set_abs_params(input_dev, t, -1, 1, 0, 0);
t                 600 drivers/input/joystick/analog.c 	int i, t, u, v;
t                 611 drivers/input/joystick/analog.c 		t = gameport_read(gameport);
t                 613 drivers/input/joystick/analog.c 		port->mask = (gameport_read(gameport) ^ t) & t & 0xf;
t                 625 drivers/input/joystick/analog.c 		t = gameport_time(gameport, ANALOG_MAX_TIME * 1000);
t                 627 drivers/input/joystick/analog.c 		while ((gameport_read(port->gameport) & port->mask) && (u < t))
t                 630 drivers/input/joystick/analog.c 		t = gameport_time(gameport, ANALOG_SAITEK_TIME);
t                 632 drivers/input/joystick/analog.c 		while ((gameport_read(port->gameport) & port->mask) && (v < t))
t                  45 drivers/input/joystick/cobra.c 	int r[2], t[2];
t                  52 drivers/input/joystick/cobra.c 		t[i] = COBRA_MAX_STROBE;
t                  60 drivers/input/joystick/cobra.c 		t[0]--; t[1]--;
t                  64 drivers/input/joystick/cobra.c 				if ((w & 0x30) < 0x30 && r[i] < COBRA_LENGTH && t[i] > 0) {
t                  66 drivers/input/joystick/cobra.c 					t[i] = strobe;
t                  68 drivers/input/joystick/cobra.c 				} else t[i] = 0;
t                  70 drivers/input/joystick/cobra.c 	} while (t[0] > 0 || t[1] > 0);
t                 355 drivers/input/joystick/db9.c static void db9_timer(struct timer_list *t)
t                 357 drivers/input/joystick/db9.c 	struct db9 *db9 = from_timer(db9, t, timer);
t                 730 drivers/input/joystick/gamecon.c static void gc_timer(struct timer_list *t)
t                 732 drivers/input/joystick/gamecon.c 	struct gc *gc = from_timer(gc, t, timer);
t                  82 drivers/input/joystick/gf2k.c 	unsigned int t, p;
t                  85 drivers/input/joystick/gf2k.c 	t = gameport_time(gameport, GF2K_START);
t                  95 drivers/input/joystick/gf2k.c 	while (t > 0 && i < length) {
t                  96 drivers/input/joystick/gf2k.c 		t--; u = v;
t                 100 drivers/input/joystick/gf2k.c 			t = p;
t                 118 drivers/input/joystick/gf2k.c 	int i, t;
t                 125 drivers/input/joystick/gf2k.c 		t = gameport_time(gameport, GF2K_TIMEOUT * 1000);
t                 126 drivers/input/joystick/gf2k.c 		while ((gameport_read(gameport) & 1) && t) t--;
t                 161 drivers/input/joystick/gf2k.c 	int i, t;
t                 169 drivers/input/joystick/gf2k.c 	t = GB(40,4,0);
t                 172 drivers/input/joystick/gf2k.c 		input_report_abs(dev, ABS_HAT0X + i, gf2k_hat_to_axis[t][i]);
t                 174 drivers/input/joystick/gf2k.c 	t = GB(44,2,0) | GB(32,8,2) | GB(78,2,10);
t                 177 drivers/input/joystick/gf2k.c 		input_report_key(dev, gf2k_btn_joy[i], (t >> i) & 1);
t                 180 drivers/input/joystick/gf2k.c 		input_report_key(dev, gf2k_btn_pad[i], (t >> i) & 1);
t                  72 drivers/input/joystick/grip.c 	unsigned int t;
t                  78 drivers/input/joystick/grip.c 	t = strobe;
t                  86 drivers/input/joystick/grip.c 		t--;
t                  90 drivers/input/joystick/grip.c 			t = strobe;
t                  92 drivers/input/joystick/grip.c 	} while (i < GRIP_LENGTH_GPP && t > 0);
t                 113 drivers/input/joystick/grip.c 	unsigned int t;
t                 120 drivers/input/joystick/grip.c 	t = strobe;
t                 127 drivers/input/joystick/grip.c 		t--;
t                 134 drivers/input/joystick/grip.c 				t = strobe;
t                 147 drivers/input/joystick/grip.c 				t = strobe;
t                 155 drivers/input/joystick/grip.c 	} while (status != 0xf && i < GRIP_MAX_BITS_XT && j < GRIP_MAX_CHUNKS_XT && t > 0);
t                 287 drivers/input/joystick/grip.c 	int i, j, t;
t                 356 drivers/input/joystick/grip.c 		for (j = 0; (t = grip_abs[grip->mode[i]][j]) >= 0; j++) {
t                 359 drivers/input/joystick/grip.c 				input_set_abs_params(input_dev, t, 14, 52, 1, 2);
t                 361 drivers/input/joystick/grip.c 				input_set_abs_params(input_dev, t, 3, 57, 1, 0);
t                 363 drivers/input/joystick/grip.c 				input_set_abs_params(input_dev, t, -1, 1, 0, 0);
t                 366 drivers/input/joystick/grip.c 		for (j = 0; (t = grip_btn[grip->mode[i]][j]) >= 0; j++)
t                 367 drivers/input/joystick/grip.c 			if (t > 0)
t                 368 drivers/input/joystick/grip.c 				set_bit(t, input_dev->keybit);
t                 588 drivers/input/joystick/grip_mp.c 	int j, t;
t                 609 drivers/input/joystick/grip_mp.c 	for (j = 0; (t = grip_abs[port->mode][j]) >= 0; j++)
t                 610 drivers/input/joystick/grip_mp.c 		input_set_abs_params(input_dev, t, -1, 1, 0, 0);
t                 612 drivers/input/joystick/grip_mp.c 	for (j = 0; (t = grip_btn[port->mode][j]) >= 0; j++)
t                 613 drivers/input/joystick/grip_mp.c 		if (t > 0)
t                 614 drivers/input/joystick/grip_mp.c 			set_bit(t, input_dev->keybit);
t                  72 drivers/input/joystick/guillemot.c 	unsigned int t, s;
t                  79 drivers/input/joystick/guillemot.c 	t = gameport_time(gameport, GUILLEMOT_MAX_START);
t                  86 drivers/input/joystick/guillemot.c 	while (t > 0 && i < GUILLEMOT_MAX_LENGTH * 8) {
t                  87 drivers/input/joystick/guillemot.c 		t--;
t                  92 drivers/input/joystick/guillemot.c 			t = s;
t                 166 drivers/input/joystick/guillemot.c 	int i, t;
t                 224 drivers/input/joystick/guillemot.c 	for (i = 0; (t = guillemot->type->abs[i]) >= 0; i++)
t                 225 drivers/input/joystick/guillemot.c 		input_set_abs_params(input_dev, t, 0, 255, 0, 0);
t                 232 drivers/input/joystick/guillemot.c 	for (i = 0; (t = guillemot->type->btn[i]) >= 0; i++)
t                 233 drivers/input/joystick/guillemot.c 		set_bit(t, input_dev->keybit);
t                 340 drivers/input/joystick/iforce/iforce-main.c 		signed short t = iforce->type->abs[i];
t                 342 drivers/input/joystick/iforce/iforce-main.c 		switch (t) {
t                 346 drivers/input/joystick/iforce/iforce-main.c 			input_set_abs_params(input_dev, t, -1920, 1920, 16, 128);
t                 347 drivers/input/joystick/iforce/iforce-main.c 			set_bit(t, input_dev->ffbit);
t                 353 drivers/input/joystick/iforce/iforce-main.c 			input_set_abs_params(input_dev, t, 0, 255, 0, 0);
t                 357 drivers/input/joystick/iforce/iforce-main.c 			input_set_abs_params(input_dev, t, -128, 127, 0, 0);
t                 364 drivers/input/joystick/iforce/iforce-main.c 			input_set_abs_params(input_dev, t, -1, 1, 0, 0);
t                  79 drivers/input/joystick/interact.c 	unsigned int t, s;
t                  84 drivers/input/joystick/interact.c 	t = gameport_time(gameport, INTERACT_MAX_START);
t                  91 drivers/input/joystick/interact.c 	while (t > 0 && i < length) {
t                  92 drivers/input/joystick/interact.c 		t--;
t                  99 drivers/input/joystick/interact.c 			t = s;
t                 195 drivers/input/joystick/interact.c 	int i, t;
t                 255 drivers/input/joystick/interact.c 	for (i = 0; (t = interact_type[interact->type].abs[i]) >= 0; i++) {
t                 257 drivers/input/joystick/interact.c 			input_set_abs_params(input_dev, t, 0, 255, 0, 0);
t                 259 drivers/input/joystick/interact.c 			input_set_abs_params(input_dev, t, -1, 1, 0, 0);
t                 262 drivers/input/joystick/interact.c 	for (i = 0; (t = interact_type[interact->type].btn[i]) >= 0; i++)
t                 263 drivers/input/joystick/interact.c 		__set_bit(t, input_dev->keybit);
t                  38 drivers/input/joystick/joydump.c 	int i, j, t, timeout;
t                  73 drivers/input/joystick/joydump.c 	t = 0;
t                  81 drivers/input/joystick/joydump.c 	dump->time = t;
t                  86 drivers/input/joystick/joydump.c 	while (i < BUF_SIZE && t < timeout) {
t                  92 drivers/input/joystick/joydump.c 			dump->time = t;
t                  96 drivers/input/joystick/joydump.c 		t++;
t                 105 drivers/input/joystick/joydump.c 	t = i;
t                 116 drivers/input/joystick/joydump.c 	for (i = 1; i < t; i++, dump++, prev++) {
t                  70 drivers/input/joystick/magellan.c 	int i, t;
t                  88 drivers/input/joystick/magellan.c 			t = (data[1] << 1) | (data[2] << 5) | data[3];
t                  89 drivers/input/joystick/magellan.c 			for (i = 0; i < 9; i++) input_report_key(dev, magellan_buttons[i], (t >> i) & 1);
t                 228 drivers/input/joystick/sidewinder.c 	int i, t;
t                 235 drivers/input/joystick/sidewinder.c 		t = gameport_time(gameport, SW_TIMEOUT * 1000);
t                 236 drivers/input/joystick/sidewinder.c 		while ((gameport_read(gameport) & 1) && t) t--;	/* Wait for axis to fall back to 0 */
t                 249 drivers/input/joystick/sidewinder.c static int sw_parity(__u64 t)
t                 251 drivers/input/joystick/sidewinder.c 	int x = t ^ (t >> 32);
t                 265 drivers/input/joystick/sidewinder.c static int sw_check(__u64 t)
t                 269 drivers/input/joystick/sidewinder.c 	if ((t & 0x8080808080808080ULL) ^ 0x80)			/* Sync */
t                 272 drivers/input/joystick/sidewinder.c 	while (t) {						/* Sum */
t                 273 drivers/input/joystick/sidewinder.c 		sum += t & 0xf;
t                 274 drivers/input/joystick/sidewinder.c 		t >>= 4;
t                 134 drivers/input/joystick/tmdc.c 	int i[2], j[2], t[2], p, k;
t                 139 drivers/input/joystick/tmdc.c 		t[k] = gameport_time(gameport, TMDC_MAX_START);
t                 154 drivers/input/joystick/tmdc.c 				if (t[k] <= 0 || i[k] >= TMDC_MAX_LENGTH) continue;
t                 155 drivers/input/joystick/tmdc.c 				t[k] = p;
t                 157 drivers/input/joystick/tmdc.c 					if (~v & 1) t[k] = 0;
t                 161 drivers/input/joystick/tmdc.c 					if (v & 1) t[k] = 0;
t                 166 drivers/input/joystick/tmdc.c 			t[k]--;
t                 168 drivers/input/joystick/tmdc.c 	} while (t[0] > 0 || t[1] > 0);
t                  76 drivers/input/joystick/turbografx.c static void tgfx_timer(struct timer_list *t)
t                  78 drivers/input/joystick/turbografx.c 	struct tgfx *tgfx = from_timer(tgfx, t, timer);
t                1011 drivers/input/keyboard/applespi.c 			    struct touchpad_protocol *t)
t                1025 drivers/input/keyboard/applespi.c 	for (i = 0; i < t->number_of_fingers; i++) {
t                1026 drivers/input/keyboard/applespi.c 		f = &t->fingers[i];
t                1042 drivers/input/keyboard/applespi.c 				   &applespi->pos[i], &t->fingers[i]);
t                1045 drivers/input/keyboard/applespi.c 	input_report_key(input, BTN_LEFT, t->clicked);
t                 418 drivers/input/keyboard/gpio_keys.c static void gpio_keys_irq_timer(struct timer_list *t)
t                 420 drivers/input/keyboard/gpio_keys.c 	struct gpio_button_data *bdata = from_timer(bdata, t, release_timer);
t                 183 drivers/input/keyboard/imx_keypad.c static void imx_keypad_check_for_events(struct timer_list *t)
t                 185 drivers/input/keyboard/imx_keypad.c 	struct imx_keypad *keypad = from_timer(keypad, t, check_matrix_timer);
t                 120 drivers/input/keyboard/jornada680_kbd.c 	}, *t = matrix_switch;
t                 149 drivers/input/keyboard/jornada680_kbd.c 		__raw_writeb(*t++, PDDR);
t                 150 drivers/input/keyboard/jornada680_kbd.c 		__raw_writeb(*t++, PEDR);
t                 102 drivers/input/keyboard/lm8323.c #define PWM_RAMP(s, t, n, u)		((!!(s) << 14) | ((t) & 0x3f) << 8 | \
t                 198 drivers/input/keyboard/locomokbd.c static void locomokbd_timer_callback(struct timer_list *t)
t                 200 drivers/input/keyboard/locomokbd.c 	struct locomokbd *locomokbd = from_timer(locomokbd, t, timer);
t                 168 drivers/input/keyboard/mpr121_touchkey.c 	int i, t, vdd, ret;
t                 172 drivers/input/keyboard/mpr121_touchkey.c 		t = ELE0_TOUCH_THRESHOLD_ADDR + (i * 2);
t                 173 drivers/input/keyboard/mpr121_touchkey.c 		ret = i2c_smbus_write_byte_data(client, t, TOUCH_THRESHOLD);
t                 176 drivers/input/keyboard/mpr121_touchkey.c 		ret = i2c_smbus_write_byte_data(client, t + 1,
t                  42 drivers/input/keyboard/snvs_pwrkey.c static void imx_imx_snvs_check_for_events(struct timer_list *t)
t                  44 drivers/input/keyboard/snvs_pwrkey.c 	struct pwrkey_drv_data *pdata = from_timer(pdata, t, check_timer);
t                 241 drivers/input/keyboard/tegra-kbc.c static void tegra_kbc_keypress_timer(struct timer_list *t)
t                 243 drivers/input/keyboard/tegra-kbc.c 	struct tegra_kbc *kbc = from_timer(kbc, t, timer);
t                 100 drivers/input/keyboard/twl4030_keypad.c #define KEYP_PERIOD_US(t, prescale)	((t) / (31 << ((prescale) + 1)) - 1)
t                  87 drivers/input/misc/hp_sdc_rtc.c 	hp_sdc_transaction t;
t                 103 drivers/input/misc/hp_sdc_rtc.c 	t.endidx =		91;
t                 104 drivers/input/misc/hp_sdc_rtc.c 	t.seq =			tseq;
t                 105 drivers/input/misc/hp_sdc_rtc.c 	t.act.semaphore =	&tsem;
t                 108 drivers/input/misc/hp_sdc_rtc.c 	if (hp_sdc_enqueue_transaction(&t)) return -1;
t                 156 drivers/input/misc/hp_sdc_rtc.c 	hp_sdc_transaction t;
t                 171 drivers/input/misc/hp_sdc_rtc.c 	t.endidx = numreg * 5;
t                 174 drivers/input/misc/hp_sdc_rtc.c 	tseq[t.endidx - 4] |= HP_SDC_ACT_SEMAPHORE; /* numreg assumed > 1 */
t                 176 drivers/input/misc/hp_sdc_rtc.c 	t.seq =			tseq;
t                 177 drivers/input/misc/hp_sdc_rtc.c 	t.act.semaphore =	&i8042tregs;
t                 183 drivers/input/misc/hp_sdc_rtc.c 	if (hp_sdc_enqueue_transaction(&t)) {
t                 293 drivers/input/misc/hp_sdc_rtc.c 	hp_sdc_transaction t;
t                 301 drivers/input/misc/hp_sdc_rtc.c 	t.endidx = 10;
t                 322 drivers/input/misc/hp_sdc_rtc.c 	t.seq =	tseq;
t                 324 drivers/input/misc/hp_sdc_rtc.c 	if (hp_sdc_enqueue_transaction(&t)) return -1;
t                 332 drivers/input/misc/hp_sdc_rtc.c 	hp_sdc_transaction t;
t                 338 drivers/input/misc/hp_sdc_rtc.c 	t.endidx = 4;
t                 349 drivers/input/misc/hp_sdc_rtc.c 	t.seq =	tseq;
t                 351 drivers/input/misc/hp_sdc_rtc.c 	if (hp_sdc_enqueue_transaction(&t)) return -1;
t                 372 drivers/input/misc/hp_sdc_rtc.c 	hp_sdc_transaction t;
t                 378 drivers/input/misc/hp_sdc_rtc.c 	t.endidx = 6;
t                 391 drivers/input/misc/hp_sdc_rtc.c 	t.seq =			tseq;
t                 393 drivers/input/misc/hp_sdc_rtc.c 	if (hp_sdc_enqueue_transaction(&t)) { 
t                  61 drivers/input/misc/yealink.c #define _SEG(t, a, am, b, bm, c, cm, d, dm, e, em, f, fm, g, gm)	\
t                  62 drivers/input/misc/yealink.c 	{ .type	= (t),							\
t                  66 drivers/input/misc/yealink.c #define _PIC(t, h, hm, n)						\
t                  67 drivers/input/misc/yealink.c 	{ .type	= (t),							\
t                1577 drivers/input/mouse/alps.c static void alps_flush_packet(struct timer_list *t)
t                1579 drivers/input/mouse/alps.c 	struct alps_data *priv = from_timer(priv, t, timer);
t                 252 drivers/input/mouse/byd.c static void byd_clear_touch(struct timer_list *t)
t                 254 drivers/input/mouse/byd.c 	struct byd_data *priv = from_timer(priv, t, timer);
t                 241 drivers/input/mouse/cyapa.h #define PIP_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
t                 470 drivers/input/mouse/elantech.c 	u32 t;
t                 472 drivers/input/mouse/elantech.c 	t = get_unaligned_le32(&packet[0]);
t                 474 drivers/input/mouse/elantech.c 	switch (t & ~7U) {
t                 374 drivers/input/touchscreen/ad7877.c static void ad7877_timer(struct timer_list *t)
t                 376 drivers/input/touchscreen/ad7877.c 	struct ad7877 *ts = from_timer(ts, t, timer);
t                 238 drivers/input/touchscreen/ad7879.c static void ad7879_timer(struct timer_list *t)
t                 240 drivers/input/touchscreen/ad7879.c 	struct ad7879 *ts = from_timer(ts, t, timer);
t                 670 drivers/input/touchscreen/ads7846.c 	struct spi_transfer *t =
t                 674 drivers/input/touchscreen/ads7846.c 		value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
t                 680 drivers/input/touchscreen/ads7846.c 		value = be16_to_cpup((__be16 *)t->rx_buf);
t                 689 drivers/input/touchscreen/ads7846.c 	struct spi_transfer *t =
t                 692 drivers/input/touchscreen/ads7846.c 	*(u16 *)t->rx_buf = val;
t                 210 drivers/input/touchscreen/bu21029_ts.c static void bu21029_touch_release(struct timer_list *t)
t                 212 drivers/input/touchscreen/bu21029_ts.c 	struct bu21029_ts_data *bu21029 = from_timer(bu21029, t, timer);
t                 740 drivers/input/touchscreen/cyttsp4_core.c 	int t;
t                 745 drivers/input/touchscreen/cyttsp4_core.c 	for (t = 0; t < max_slots; t++) {
t                 746 drivers/input/touchscreen/cyttsp4_core.c 		input_mt_slot(md->input, t);
t                 842 drivers/input/touchscreen/cyttsp4_core.c 	int t;
t                 844 drivers/input/touchscreen/cyttsp4_core.c 	for (t = 0; t < max_slots; t++) {
t                 845 drivers/input/touchscreen/cyttsp4_core.c 		if (ids[t])
t                 847 drivers/input/touchscreen/cyttsp4_core.c 		input_mt_slot(input, t);
t                 860 drivers/input/touchscreen/cyttsp4_core.c 	int i, j, t = 0;
t                 882 drivers/input/touchscreen/cyttsp4_core.c 			t = tch.abs[CY_TCH_T] - md->pdata->frmwrk->abs
t                 886 drivers/input/touchscreen/cyttsp4_core.c 					__func__, t, tch.abs[CY_TCH_E]);
t                 889 drivers/input/touchscreen/cyttsp4_core.c 			input_mt_slot(md->input, t);
t                 892 drivers/input/touchscreen/cyttsp4_core.c 			ids[t] = true;
t                 928 drivers/input/touchscreen/cyttsp4_core.c 				__func__, t,
t                 939 drivers/input/touchscreen/cyttsp4_core.c 				t,
t                1258 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_watchdog_timer(struct timer_list *t)
t                1260 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4 *cd = from_timer(cd, t, watchdog_timer);
t                1272 drivers/input/touchscreen/cyttsp4_core.c 	int t = msecs_to_jiffies(timeout_ms);
t                1285 drivers/input/touchscreen/cyttsp4_core.c 		t = wait_event_timeout(cd->wait_q, !cd->exclusive_dev, t);
t                1286 drivers/input/touchscreen/cyttsp4_core.c 		if (IS_TMO(t)) {
t                1329 drivers/input/touchscreen/cyttsp4_core.c 	long t;
t                1334 drivers/input/touchscreen/cyttsp4_core.c 	t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_BOOTLOADER,
t                1336 drivers/input/touchscreen/cyttsp4_core.c 	if (IS_TMO(t)) {
t                1347 drivers/input/touchscreen/cyttsp4_core.c 	long t;
t                1351 drivers/input/touchscreen/cyttsp4_core.c 	t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_SYSINFO,
t                1353 drivers/input/touchscreen/cyttsp4_core.c 	if (IS_TMO(t)) {
t                1391 drivers/input/touchscreen/cyttsp4_core.c 	long t;
t                1438 drivers/input/touchscreen/cyttsp4_core.c 	t = wait_event_timeout(cd->wait_q,
t                1442 drivers/input/touchscreen/cyttsp4_core.c 			__func__, t, cd->mode);
t                1444 drivers/input/touchscreen/cyttsp4_core.c 	if (IS_TMO(t)) {
t                1762 drivers/input/touchscreen/cyttsp4_core.c 	int t;
t                1792 drivers/input/touchscreen/cyttsp4_core.c 	t = wait_event_timeout(cd->wait_q,
t                1795 drivers/input/touchscreen/cyttsp4_core.c 	if (IS_TMO(t)) {
t                  55 drivers/input/touchscreen/cyttsp4_core.h #define IS_TMO(t)			((t) == 0)
t                  53 drivers/input/touchscreen/exc3000.c static void exc3000_timer(struct timer_list *t)
t                  55 drivers/input/touchscreen/exc3000.c 	struct exc3000_data *data = from_timer(data, t, timer);
t                  31 drivers/input/touchscreen/st1232.c 	u8 t;
t                  91 drivers/input/touchscreen/st1232.c 				finger[i].t = buf[i + 6];
t                 117 drivers/input/touchscreen/st1232.c 					 finger[i].t);
t                 117 drivers/input/touchscreen/sx8654.c static void sx865x_penrelease_timer_handler(struct timer_list *t)
t                 119 drivers/input/touchscreen/sx8654.c 	struct sx8654 *ts = from_timer(ts, t, timer);
t                 197 drivers/input/touchscreen/tsc200x-core.c static void tsc200x_penup_timer(struct timer_list *t)
t                 199 drivers/input/touchscreen/tsc200x-core.c 	struct tsc200x *ts = from_timer(ts, t, penup_timer);
t                  49 drivers/iommu/amd_iommu.c #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
t                 240 drivers/iommu/intel-svm.c 		struct intel_svm *t;
t                 242 drivers/iommu/intel-svm.c 		list_for_each_entry(t, &global_svm_list, list) {
t                 243 drivers/iommu/intel-svm.c 			if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
t                 246 drivers/iommu/intel-svm.c 			svm = t;
t                  97 drivers/iommu/iommu.c static const char *iommu_domain_type_str(unsigned int t)
t                  99 drivers/iommu/iommu.c 	switch (t) {
t                  27 drivers/iommu/iova.c static void fq_flush_timeout(struct timer_list *t);
t                 530 drivers/iommu/iova.c static void fq_flush_timeout(struct timer_list *t)
t                 532 drivers/iommu/iova.c 	struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
t                  37 drivers/iommu/msm_iommu_hw-8xxx.h 	int t = readl(addr); \
t                  38 drivers/iommu/msm_iommu_hw-8xxx.h 	writel((t & ~((mask) << (shift))) + (((v) & (mask)) << (shift)), addr);\
t                  64 drivers/irqchip/irq-ath79-misc.c 	u32 t;
t                  66 drivers/irqchip/irq-ath79-misc.c 	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
t                  67 drivers/irqchip/irq-ath79-misc.c 	__raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
t                  77 drivers/irqchip/irq-ath79-misc.c 	u32 t;
t                  79 drivers/irqchip/irq-ath79-misc.c 	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
t                  80 drivers/irqchip/irq-ath79-misc.c 	__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
t                  90 drivers/irqchip/irq-ath79-misc.c 	u32 t;
t                  92 drivers/irqchip/irq-ath79-misc.c 	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
t                  93 drivers/irqchip/irq-ath79-misc.c 	__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
t                  19 drivers/irqchip/irq-imx-irqsteer.c #define CHANMASK(n, t)		(CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
t                  20 drivers/irqchip/irq-imx-irqsteer.c #define CHANSET(n, t)		(CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
t                  21 drivers/irqchip/irq-imx-irqsteer.c #define CHANSTATUS(n, t)	(CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
t                  22 drivers/irqchip/irq-imx-irqsteer.c #define CHAN_MINTDIS(t)		(CTRL_STRIDE_OFF(t, 3) + 0x4)
t                  23 drivers/irqchip/irq-imx-irqsteer.c #define CHAN_MASTRSTAT(t)	(CTRL_STRIDE_OFF(t, 3) + 0x8)
t                2844 drivers/isdn/hardware/mISDN/hfcmulti.c hfcmulti_dbusy_timer(struct timer_list *t)
t                 290 drivers/isdn/hardware/mISDN/hfcpci.c hfcpci_Timer(struct timer_list *t)
t                 292 drivers/isdn/hardware/mISDN/hfcpci.c 	struct hfc_pci *hc = from_timer(hc, t, hw.timer);
t                1229 drivers/isdn/hardware/mISDN/hfcpci.c hfcpci_dbusy_timer(struct timer_list *t)
t                 713 drivers/isdn/hardware/mISDN/mISDNipac.c dbusy_timer_handler(struct timer_list *t)
t                 715 drivers/isdn/hardware/mISDN/mISDNipac.c 	struct isac_hw *isac = from_timer(isac, t, dch.timer);
t                  40 drivers/isdn/hardware/mISDN/mISDNisar.c 	int t = timeout;
t                  43 drivers/isdn/hardware/mISDN/mISDNisar.c 	while ((val & 1) && t) {
t                  45 drivers/isdn/hardware/mISDN/mISDNisar.c 		t--;
t                  48 drivers/isdn/hardware/mISDN/mISDNisar.c 	pr_debug("%s: HIA after %dus\n", isar->name, timeout - t);
t                 131 drivers/isdn/hardware/mISDN/mISDNisar.c 	int t = maxdelay;
t                 135 drivers/isdn/hardware/mISDN/mISDNisar.c 	while (t && !(irq & ISAR_IRQSTA)) {
t                 137 drivers/isdn/hardware/mISDN/mISDNisar.c 		t--;
t                 139 drivers/isdn/hardware/mISDN/mISDNisar.c 	if (t)	{
t                 144 drivers/isdn/hardware/mISDN/mISDNisar.c 		 isar->name, isar->clsb, maxdelay - t);
t                 145 drivers/isdn/hardware/mISDN/mISDNisar.c 	return t;
t                1136 drivers/isdn/hardware/mISDN/mISDNisar.c ftimer_handler(struct timer_list *t)
t                1138 drivers/isdn/hardware/mISDN/mISDNisar.c 	struct isar_ch *ch = from_timer(ch, t, ftimer);
t                 802 drivers/isdn/hardware/mISDN/w6692.c dbusy_timer_handler(struct timer_list *t)
t                 804 drivers/isdn/hardware/mISDN/w6692.c 	struct dchannel *dch = from_timer(dch, t, timer);
t                 262 drivers/isdn/mISDN/dsp.h extern void dsp_tone_timeout(struct timer_list *t);
t                1316 drivers/isdn/mISDN/dsp_cmx.c 	int r, rr, t, tt, o_r, o_rr;
t                1371 drivers/isdn/mISDN/dsp_cmx.c 	t = dsp->tx_R; /* tx-pointers */
t                1391 drivers/isdn/mISDN/dsp_cmx.c 	if (!dsp->tx_mix && t != tt) {
t                1394 drivers/isdn/mISDN/dsp_cmx.c 		sprintf(debugbuf, "TX sending (%04x-%04x)%p: ", t, tt, p);
t                1396 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr && t != tt) {
t                1400 drivers/isdn/mISDN/dsp_cmx.c 					p[t]);
t                1402 drivers/isdn/mISDN/dsp_cmx.c 			*d++ = p[t]; /* write tx_buff */
t                1403 drivers/isdn/mISDN/dsp_cmx.c 			t = (t + 1) & CMX_BUFF_MASK;
t                1407 drivers/isdn/mISDN/dsp_cmx.c 			dsp->tx_R = t;
t                1423 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr && t != tt) {
t                1424 drivers/isdn/mISDN/dsp_cmx.c 				*d++ = p[t]; /* write tx_buff */
t                1425 drivers/isdn/mISDN/dsp_cmx.c 				t = (t + 1) & CMX_BUFF_MASK;
t                1440 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr && t != tt) {
t                1441 drivers/isdn/mISDN/dsp_cmx.c 				*d++ = dsp_audio_mix_law[(p[t] << 8) | q[r]];
t                1442 drivers/isdn/mISDN/dsp_cmx.c 				t = (t + 1) & CMX_BUFF_MASK;
t                1450 drivers/isdn/mISDN/dsp_cmx.c 		dsp->tx_R = t;
t                1476 drivers/isdn/mISDN/dsp_cmx.c 			while (o_r != o_rr && t != tt) {
t                1477 drivers/isdn/mISDN/dsp_cmx.c 				*d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]];
t                1478 drivers/isdn/mISDN/dsp_cmx.c 				t = (t + 1) & CMX_BUFF_MASK;
t                1491 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr && t != tt) {
t                1492 drivers/isdn/mISDN/dsp_cmx.c 				sample = dsp_audio_law_to_s32[p[t]] +
t                1501 drivers/isdn/mISDN/dsp_cmx.c 				t = (t + 1) & CMX_BUFF_MASK;
t                1511 drivers/isdn/mISDN/dsp_cmx.c 		dsp->tx_R = t;
t                1521 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr && t != tt) {
t                1522 drivers/isdn/mISDN/dsp_cmx.c 			sample = dsp_audio_law_to_s32[p[t]] + *c++ -
t                1531 drivers/isdn/mISDN/dsp_cmx.c 			t = (t + 1) & CMX_BUFF_MASK;
t                1549 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr && t != tt) {
t                1550 drivers/isdn/mISDN/dsp_cmx.c 			sample = dsp_audio_law_to_s32[p[t]] + *c++;
t                1557 drivers/isdn/mISDN/dsp_cmx.c 			t = (t + 1) & CMX_BUFF_MASK;
t                1571 drivers/isdn/mISDN/dsp_cmx.c 	dsp->tx_R = t;
t                 460 drivers/isdn/mISDN/dsp_tones.c dsp_tone_timeout(struct timer_list *t)
t                 462 drivers/isdn/mISDN/dsp_tones.c 	struct dsp *dsp = from_timer(dsp, t, tone.tl);
t                  96 drivers/isdn/mISDN/fsm.c FsmExpireTimer(struct timer_list *t)
t                  98 drivers/isdn/mISDN/fsm.c 	struct FsmTimer *ft = from_timer(ft, t, tl);
t                 820 drivers/isdn/mISDN/l1oip_core.c l1oip_keepalive(struct timer_list *t)
t                 822 drivers/isdn/mISDN/l1oip_core.c 	struct l1oip *hc = from_timer(hc, t, keep_tl);
t                 828 drivers/isdn/mISDN/l1oip_core.c l1oip_timeout(struct timer_list *t)
t                 830 drivers/isdn/mISDN/l1oip_core.c 	struct l1oip			*hc = from_timer(hc, t,
t                 156 drivers/isdn/mISDN/timerdev.c dev_expire_timer(struct timer_list *t)
t                 158 drivers/isdn/mISDN/timerdev.c 	struct mISDNtimer *timer = from_timer(timer, t, tl);
t                  60 drivers/leds/led-core.c static void led_timer_function(struct timer_list *t)
t                  62 drivers/leds/led-core.c 	struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer);
t                  25 drivers/leds/leds-as3645a.c #define AS_TIMER_US_TO_CODE(t)			(((t) / 1000 - 100) / 50)
t                 182 drivers/leds/leds-lm3533.c static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
t                 187 drivers/leds/leds-lm3533.c 	val = (*t + t_step / 2 - t_min) / t_step + v_min;
t                 189 drivers/leds/leds-lm3533.c 	*t = t_step * (val - v_min) + t_min;
t                 209 drivers/leds/leds-lm3533.c 	unsigned t;
t                 212 drivers/leds/leds-lm3533.c 	t = *delay * 1000;
t                 214 drivers/leds/leds-lm3533.c 	if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
t                 215 drivers/leds/leds-lm3533.c 		t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
t                 216 drivers/leds/leds-lm3533.c 		val = time_to_val(&t,	LM3533_LED_DELAY3_TMIN,
t                 220 drivers/leds/leds-lm3533.c 	} else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
t                 221 drivers/leds/leds-lm3533.c 		t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
t                 222 drivers/leds/leds-lm3533.c 		val = time_to_val(&t,	LM3533_LED_DELAY2_TMIN,
t                 227 drivers/leds/leds-lm3533.c 		t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
t                 228 drivers/leds/leds-lm3533.c 		val = time_to_val(&t,	LM3533_LED_DELAY1_TMIN,
t                 234 drivers/leds/leds-lm3533.c 	*delay = (t + 500) / 1000;
t                 246 drivers/leds/leds-lm3533.c 	unsigned t;
t                 251 drivers/leds/leds-lm3533.c 	t = (unsigned)*delay;
t                 255 drivers/leds/leds-lm3533.c 		t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
t                 257 drivers/leds/leds-lm3533.c 	val = lm3533_led_get_hw_delay(&t);
t                 260 drivers/leds/leds-lm3533.c 							*delay, t, val);
t                 266 drivers/leds/leds-lm3533.c 	*delay = t;
t                 271 drivers/leds/leds-lm3533.c static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
t                 273 drivers/leds/leds-lm3533.c 	return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
t                 276 drivers/leds/leds-lm3533.c static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
t                 278 drivers/leds/leds-lm3533.c 	return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
t                 136 drivers/leds/leds-sc27xx-bltc.c 	u32 v, offset, t = *delta_t;
t                 138 drivers/leds/leds-sc27xx-bltc.c 	v = t + SC27XX_LEDS_STEP / 2;
t                 228 drivers/leds/leds-tca6507.c 		int t = time_codes[c1];
t                 229 drivers/leds/leds-tca6507.c 		if (t*2 < tmin)
t                 231 drivers/leds/leds-tca6507.c 		if (t > tmax)
t                 234 drivers/leds/leds-tca6507.c 			int tt = t + time_codes[c2];
t                  32 drivers/leds/trigger/ledtrig-activity.c static void led_activity_function(struct timer_list *t)
t                  34 drivers/leds/trigger/ledtrig-activity.c 	struct activity_data *activity_data = from_timer(activity_data, t,
t                  32 drivers/leds/trigger/ledtrig-heartbeat.c static void led_heartbeat_function(struct timer_list *t)
t                  35 drivers/leds/trigger/ledtrig-heartbeat.c 		from_timer(heartbeat_data, t, timer);
t                  74 drivers/leds/trigger/ledtrig-pattern.c static void pattern_trig_timer_function(struct timer_list *t)
t                  76 drivers/leds/trigger/ledtrig-pattern.c 	struct pattern_trig_data *data = from_timer(data, t, timer);
t                  32 drivers/leds/trigger/ledtrig-transient.c static void transient_timer_function(struct timer_list *t)
t                  35 drivers/leds/trigger/ledtrig-transient.c 		from_timer(transient_data, t, timer);
t                 315 drivers/lightnvm/core.c 	struct nvm_target *t;
t                 364 drivers/lightnvm/core.c 	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
t                 365 drivers/lightnvm/core.c 	if (!t) {
t                 421 drivers/lightnvm/core.c 	t->type = tt;
t                 422 drivers/lightnvm/core.c 	t->disk = tdisk;
t                 423 drivers/lightnvm/core.c 	t->dev = tgt_dev;
t                 426 drivers/lightnvm/core.c 	list_add_tail(&t->list, &dev->targets);
t                 443 drivers/lightnvm/core.c 	kfree(t);
t                 449 drivers/lightnvm/core.c static void __nvm_remove_target(struct nvm_target *t, bool graceful)
t                 451 drivers/lightnvm/core.c 	struct nvm_tgt_type *tt = t->type;
t                 452 drivers/lightnvm/core.c 	struct gendisk *tdisk = t->disk;
t                 464 drivers/lightnvm/core.c 	nvm_remove_tgt_dev(t->dev, 1);
t                 466 drivers/lightnvm/core.c 	module_put(t->type->owner);
t                 468 drivers/lightnvm/core.c 	list_del(&t->list);
t                 469 drivers/lightnvm/core.c 	kfree(t);
t                 483 drivers/lightnvm/core.c 	struct nvm_target *t = NULL;
t                 489 drivers/lightnvm/core.c 		t = nvm_find_target(dev, remove->tgtname);
t                 490 drivers/lightnvm/core.c 		if (t) {
t                 498 drivers/lightnvm/core.c 	if (!t) {
t                 504 drivers/lightnvm/core.c 	__nvm_remove_target(t, true);
t                1217 drivers/lightnvm/core.c 	struct nvm_target *t, *tmp;
t                1220 drivers/lightnvm/core.c 	list_for_each_entry_safe(t, tmp, &dev->targets, list) {
t                1221 drivers/lightnvm/core.c 		if (t->dev->parent != dev)
t                1223 drivers/lightnvm/core.c 		__nvm_remove_target(t, false);
t                 369 drivers/lightnvm/pblk-core.c void pblk_write_timer_fn(struct timer_list *t)
t                 371 drivers/lightnvm/pblk-core.c 	struct pblk *pblk = from_timer(pblk, t, wtimer);
t                 501 drivers/lightnvm/pblk-gc.c static void pblk_gc_timer(struct timer_list *t)
t                 503 drivers/lightnvm/pblk-gc.c 	struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
t                  28 drivers/lightnvm/pblk-rb.c 	struct pblk_rb_pages *p, *t;
t                  31 drivers/lightnvm/pblk-rb.c 	list_for_each_entry_safe(p, t, &rb->pages, list) {
t                 534 drivers/lightnvm/pblk-recovery.c 	struct pblk_line *t = NULL;
t                 536 drivers/lightnvm/pblk-recovery.c 	list_for_each_entry(t, head, list)
t                 537 drivers/lightnvm/pblk-recovery.c 		if (t->seq_nr > line->seq_nr)
t                 540 drivers/lightnvm/pblk-recovery.c 	__list_add(&line->list, t->list.prev, &t->list);
t                 197 drivers/lightnvm/pblk-rl.c static void pblk_rl_u_timer(struct timer_list *t)
t                 199 drivers/lightnvm/pblk-rl.c 	struct pblk_rl *rl = from_timer(rl, t, u_timer);
t                 864 drivers/lightnvm/pblk.h void pblk_write_timer_fn(struct timer_list *t);
t                 104 drivers/macintosh/smu.c static void smu_i2c_retry(struct timer_list *t);
t                 266 drivers/macintosh/windfarm_pm112.c 	int i, t, target = 0;
t                 302 drivers/macintosh/windfarm_pm112.c 		t = wf_cpu_pid_run(sp, power, temp);
t                 306 drivers/macintosh/windfarm_pm112.c 			target = t;
t                  76 drivers/mailbox/bcm-pdc-mailbox.c #define NTXDACTIVE(h, t, max_mask)    TXD((t) - (h), (max_mask))
t                  77 drivers/mailbox/bcm-pdc-mailbox.c #define NRXDACTIVE(h, t, max_mask)    RXD((t) - (h), (max_mask))
t                 131 drivers/mailbox/mailbox-altera.c static void altera_mbox_poll_rx(struct timer_list *t)
t                 133 drivers/mailbox/mailbox-altera.c 	struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
t                 250 drivers/mailbox/mailbox.c 	int t;
t                 255 drivers/mailbox/mailbox.c 	t = add_to_rbuf(chan, mssg);
t                 256 drivers/mailbox/mailbox.c 	if (t < 0) {
t                 258 drivers/mailbox/mailbox.c 		return t;
t                 274 drivers/mailbox/mailbox.c 			t = -ETIME;
t                 275 drivers/mailbox/mailbox.c 			tx_tick(chan, t);
t                 279 drivers/mailbox/mailbox.c 	return t;
t                  21 drivers/mailbox/mtk-cmdq-mailbox.c #define CMDQ_NUM_CMD(t)			(t->cmd_buf_size / CMDQ_INST_SIZE)
t                 292 drivers/md/bcache/bset.c 	struct bset_tree *t = b->set;
t                 295 drivers/md/bcache/bset.c 		kfree(t->prev);
t                 297 drivers/md/bcache/bset.c 		free_pages((unsigned long) t->prev,
t                 301 drivers/md/bcache/bset.c 		kfree(t->tree);
t                 303 drivers/md/bcache/bset.c 		free_pages((unsigned long) t->tree,
t                 306 drivers/md/bcache/bset.c 	free_pages((unsigned long) t->data, b->page_order);
t                 308 drivers/md/bcache/bset.c 	t->prev = NULL;
t                 309 drivers/md/bcache/bset.c 	t->tree = NULL;
t                 310 drivers/md/bcache/bset.c 	t->data = NULL;
t                 318 drivers/md/bcache/bset.c 	struct bset_tree *t = b->set;
t                 320 drivers/md/bcache/bset.c 	BUG_ON(t->data);
t                 324 drivers/md/bcache/bset.c 	t->data = (void *) __get_free_pages(gfp, b->page_order);
t                 325 drivers/md/bcache/bset.c 	if (!t->data)
t                 328 drivers/md/bcache/bset.c 	t->tree = bset_tree_bytes(b) < PAGE_SIZE
t                 331 drivers/md/bcache/bset.c 	if (!t->tree)
t                 334 drivers/md/bcache/bset.c 	t->prev = bset_prev_bytes(b) < PAGE_SIZE
t                 337 drivers/md/bcache/bset.c 	if (!t->prev)
t                 438 drivers/md/bcache/bset.c static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
t                 440 drivers/md/bcache/bset.c 	return __to_inorder(j, t->size, t->extra);
t                 464 drivers/md/bcache/bset.c static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
t                 466 drivers/md/bcache/bset.c 	return __inorder_to_tree(j, t->size, t->extra);
t                 526 drivers/md/bcache/bset.c static struct bkey *cacheline_to_bkey(struct bset_tree *t,
t                 530 drivers/md/bcache/bset.c 	return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
t                 533 drivers/md/bcache/bset.c static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
t                 535 drivers/md/bcache/bset.c 	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
t                 538 drivers/md/bcache/bset.c static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
t                 542 drivers/md/bcache/bset.c 	return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
t                 545 drivers/md/bcache/bset.c static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
t                 547 drivers/md/bcache/bset.c 	return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
t                 550 drivers/md/bcache/bset.c static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
t                 552 drivers/md/bcache/bset.c 	return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
t                 559 drivers/md/bcache/bset.c static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
t                 561 drivers/md/bcache/bset.c 	return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
t                 593 drivers/md/bcache/bset.c static void make_bfloat(struct bset_tree *t, unsigned int j)
t                 595 drivers/md/bcache/bset.c 	struct bkey_float *f = &t->tree[j];
t                 596 drivers/md/bcache/bset.c 	struct bkey *m = tree_to_bkey(t, j);
t                 597 drivers/md/bcache/bset.c 	struct bkey *p = tree_to_prev_bkey(t, j);
t                 600 drivers/md/bcache/bset.c 		? t->data->start
t                 601 drivers/md/bcache/bset.c 		: tree_to_prev_bkey(t, j >> ffs(j));
t                 604 drivers/md/bcache/bset.c 		? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
t                 605 drivers/md/bcache/bset.c 		: tree_to_bkey(t, j >> (ffz(j) + 1));
t                 638 drivers/md/bcache/bset.c static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
t                 640 drivers/md/bcache/bset.c 	if (t != b->set) {
t                 641 drivers/md/bcache/bset.c 		unsigned int j = roundup(t[-1].size,
t                 644 drivers/md/bcache/bset.c 		t->tree = t[-1].tree + j;
t                 645 drivers/md/bcache/bset.c 		t->prev = t[-1].prev + j;
t                 648 drivers/md/bcache/bset.c 	while (t < b->set + MAX_BSETS)
t                 649 drivers/md/bcache/bset.c 		t++->size = 0;
t                 654 drivers/md/bcache/bset.c 	struct bset_tree *t = bset_tree_last(b);
t                 659 drivers/md/bcache/bset.c 	bset_alloc_tree(b, t);
t                 661 drivers/md/bcache/bset.c 	if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
t                 662 drivers/md/bcache/bset.c 		t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
t                 663 drivers/md/bcache/bset.c 		t->size = 1;
t                 694 drivers/md/bcache/bset.c 	struct bset_tree *t = bset_tree_last(b);
t                 695 drivers/md/bcache/bset.c 	struct bkey *prev = NULL, *k = t->data->start;
t                 700 drivers/md/bcache/bset.c 	bset_alloc_tree(b, t);
t                 702 drivers/md/bcache/bset.c 	t->size = min_t(unsigned int,
t                 703 drivers/md/bcache/bset.c 			bkey_to_cacheline(t, bset_bkey_last(t->data)),
t                 704 drivers/md/bcache/bset.c 			b->set->tree + btree_keys_cachelines(b) - t->tree);
t                 706 drivers/md/bcache/bset.c 	if (t->size < 2) {
t                 707 drivers/md/bcache/bset.c 		t->size = 0;
t                 711 drivers/md/bcache/bset.c 	t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
t                 714 drivers/md/bcache/bset.c 	for (j = inorder_next(0, t->size);
t                 716 drivers/md/bcache/bset.c 	     j = inorder_next(j, t->size)) {
t                 717 drivers/md/bcache/bset.c 		while (bkey_to_cacheline(t, k) < cacheline)
t                 720 drivers/md/bcache/bset.c 		t->prev[j] = bkey_u64s(prev);
t                 721 drivers/md/bcache/bset.c 		t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
t                 724 drivers/md/bcache/bset.c 	while (bkey_next(k) != bset_bkey_last(t->data))
t                 727 drivers/md/bcache/bset.c 	t->end = *k;
t                 730 drivers/md/bcache/bset.c 	for (j = inorder_next(0, t->size);
t                 732 drivers/md/bcache/bset.c 	     j = inorder_next(j, t->size))
t                 733 drivers/md/bcache/bset.c 		make_bfloat(t, j);
t                 741 drivers/md/bcache/bset.c 	struct bset_tree *t;
t                 744 drivers/md/bcache/bset.c 	for (t = b->set; t <= bset_tree_last(b); t++)
t                 745 drivers/md/bcache/bset.c 		if (k < bset_bkey_last(t->data))
t                 750 drivers/md/bcache/bset.c 	if (!t->size || !bset_written(b, t))
t                 753 drivers/md/bcache/bset.c 	inorder = bkey_to_cacheline(t, k);
t                 755 drivers/md/bcache/bset.c 	if (k == t->data->start)
t                 758 drivers/md/bcache/bset.c 	if (bkey_next(k) == bset_bkey_last(t->data)) {
t                 759 drivers/md/bcache/bset.c 		t->end = *k;
t                 763 drivers/md/bcache/bset.c 	j = inorder_to_tree(inorder, t);
t                 766 drivers/md/bcache/bset.c 	    j < t->size &&
t                 767 drivers/md/bcache/bset.c 	    k == tree_to_bkey(t, j))
t                 769 drivers/md/bcache/bset.c 			make_bfloat(t, j);
t                 771 drivers/md/bcache/bset.c 		} while (j < t->size);
t                 773 drivers/md/bcache/bset.c 	j = inorder_to_tree(inorder + 1, t);
t                 776 drivers/md/bcache/bset.c 	    j < t->size &&
t                 777 drivers/md/bcache/bset.c 	    k == tree_to_prev_bkey(t, j))
t                 779 drivers/md/bcache/bset.c 			make_bfloat(t, j);
t                 781 drivers/md/bcache/bset.c 		} while (j < t->size);
t                 786 drivers/md/bcache/bset.c 				      struct bset_tree *t,
t                 790 drivers/md/bcache/bset.c 	unsigned int j = bkey_to_cacheline(t, k);
t                 793 drivers/md/bcache/bset.c 	if (!t->size)
t                 801 drivers/md/bcache/bset.c 	while (j < t->size &&
t                 802 drivers/md/bcache/bset.c 	       table_to_bkey(t, j) <= k)
t                 809 drivers/md/bcache/bset.c 	for (; j < t->size; j++) {
t                 810 drivers/md/bcache/bset.c 		t->prev[j] += shift;
t                 812 drivers/md/bcache/bset.c 		if (t->prev[j] > 7) {
t                 813 drivers/md/bcache/bset.c 			k = table_to_bkey(t, j - 1);
t                 815 drivers/md/bcache/bset.c 			while (k < cacheline_to_bkey(t, j, 0))
t                 818 drivers/md/bcache/bset.c 			t->prev[j] = bkey_to_cacheline_offset(t, j, k);
t                 822 drivers/md/bcache/bset.c 	if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
t                 827 drivers/md/bcache/bset.c 	for (k = table_to_bkey(t, t->size - 1);
t                 828 drivers/md/bcache/bset.c 	     k != bset_bkey_last(t->data);
t                 830 drivers/md/bcache/bset.c 		if (t->size == bkey_to_cacheline(t, k)) {
t                 831 drivers/md/bcache/bset.c 			t->prev[t->size] =
t                 832 drivers/md/bcache/bset.c 				bkey_to_cacheline_offset(t, t->size, k);
t                 833 drivers/md/bcache/bset.c 			t->size++;
t                 863 drivers/md/bcache/bset.c 	struct bset_tree *t = bset_tree_last(b);
t                 866 drivers/md/bcache/bset.c 	BUG_ON(bset_byte_offset(b, t->data) +
t                 867 drivers/md/bcache/bset.c 	       __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
t                 872 drivers/md/bcache/bset.c 		(void *) bset_bkey_last(t->data) - (void *) where);
t                 874 drivers/md/bcache/bset.c 	t->data->keys += bkey_u64s(insert);
t                 876 drivers/md/bcache/bset.c 	bch_bset_fix_lookup_table(b, t, where);
t                 942 drivers/md/bcache/bset.c static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
t                 945 drivers/md/bcache/bset.c 	unsigned int li = 0, ri = t->size;
t                 950 drivers/md/bcache/bset.c 		if (bkey_cmp(table_to_bkey(t, m), search) > 0)
t                 957 drivers/md/bcache/bset.c 		table_to_bkey(t, li),
t                 958 drivers/md/bcache/bset.c 		ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
t                 962 drivers/md/bcache/bset.c static struct bset_search_iter bset_search_tree(struct bset_tree *t,
t                 972 drivers/md/bcache/bset.c 		if (p < t->size)
t                 973 drivers/md/bcache/bset.c 			prefetch(&t->tree[p]);
t                 976 drivers/md/bcache/bset.c 		f = &t->tree[j];
t                 984 drivers/md/bcache/bset.c 			if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
t                 989 drivers/md/bcache/bset.c 	} while (n < t->size);
t                 991 drivers/md/bcache/bset.c 	inorder = to_inorder(j, t);
t                 998 drivers/md/bcache/bset.c 		l = cacheline_to_bkey(t, inorder, f->m);
t                1000 drivers/md/bcache/bset.c 		if (++inorder != t->size) {
t                1001 drivers/md/bcache/bset.c 			f = &t->tree[inorder_next(j, t->size)];
t                1002 drivers/md/bcache/bset.c 			r = cacheline_to_bkey(t, inorder, f->m);
t                1004 drivers/md/bcache/bset.c 			r = bset_bkey_last(t->data);
t                1006 drivers/md/bcache/bset.c 		r = cacheline_to_bkey(t, inorder, f->m);
t                1009 drivers/md/bcache/bset.c 			f = &t->tree[inorder_prev(j, t->size)];
t                1010 drivers/md/bcache/bset.c 			l = cacheline_to_bkey(t, inorder, f->m);
t                1012 drivers/md/bcache/bset.c 			l = t->data->start;
t                1018 drivers/md/bcache/bset.c struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
t                1038 drivers/md/bcache/bset.c 	if (unlikely(!t->size)) {
t                1039 drivers/md/bcache/bset.c 		i.l = t->data->start;
t                1040 drivers/md/bcache/bset.c 		i.r = bset_bkey_last(t->data);
t                1041 drivers/md/bcache/bset.c 	} else if (bset_written(b, t)) {
t                1049 drivers/md/bcache/bset.c 		if (unlikely(bkey_cmp(search, &t->end) >= 0))
t                1050 drivers/md/bcache/bset.c 			return bset_bkey_last(t->data);
t                1052 drivers/md/bcache/bset.c 		if (unlikely(bkey_cmp(search, t->data->start) < 0))
t                1053 drivers/md/bcache/bset.c 			return t->data->start;
t                1055 drivers/md/bcache/bset.c 		i = bset_search_tree(t, search);
t                1058 drivers/md/bcache/bset.c 		       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
t                1060 drivers/md/bcache/bset.c 		i = bset_search_write_set(t, search);
t                1064 drivers/md/bcache/bset.c 		BUG_ON(bset_written(b, t) &&
t                1065 drivers/md/bcache/bset.c 		       i.l != t->data->start &&
t                1066 drivers/md/bcache/bset.c 		       bkey_cmp(tree_to_prev_bkey(t,
t                1067 drivers/md/bcache/bset.c 			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
t                1070 drivers/md/bcache/bset.c 		BUG_ON(i.r != bset_bkey_last(t->data) &&
t                1376 drivers/md/bcache/bset.c 		struct bset_tree *t = &b->set[i];
t                1377 drivers/md/bcache/bset.c 		size_t bytes = t->data->keys * sizeof(uint64_t);
t                1380 drivers/md/bcache/bset.c 		if (bset_written(b, t)) {
t                1384 drivers/md/bcache/bset.c 			stats->floats += t->size - 1;
t                1386 drivers/md/bcache/bset.c 			for (j = 1; j < t->size; j++)
t                1387 drivers/md/bcache/bset.c 				if (t->tree[j].exponent == 127)
t                 239 drivers/md/bcache/bset.h static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
t                 241 drivers/md/bcache/bset.h 	return t <= b->set + b->nsets - b->last_set_unwritten;
t                 271 drivers/md/bcache/bset.h 	struct bset_tree *t = bset_tree_last(b);
t                 274 drivers/md/bcache/bset.h 	       (bset_byte_offset(b, t->data) + set_bytes(t->data)));
t                 280 drivers/md/bcache/bset.h 		(bset_byte_offset(b, t->data) + set_bytes(t->data))) /
t                 340 drivers/md/bcache/bset.h struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
t                 347 drivers/md/bcache/bset.h 					   struct bset_tree *t,
t                 350 drivers/md/bcache/bset.h 	return search ? __bch_bset_search(b, t, search) : t->data->start;
t                 701 drivers/md/bcache/btree.c 	struct btree *b, *t;
t                 732 drivers/md/bcache/btree.c 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
t                1321 drivers/md/bcache/btree.c 	struct bset_tree *t;
t                1339 drivers/md/bcache/btree.c 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
t                1340 drivers/md/bcache/btree.c 		btree_bug_on(t->size &&
t                1341 drivers/md/bcache/btree.c 			     bset_written(&b->keys, t) &&
t                1342 drivers/md/bcache/btree.c 			     bkey_cmp(&b->key, &t->end) < 0,
t                  65 drivers/md/bcache/closure.c 	struct closure *cl, *t;
t                  74 drivers/md/bcache/closure.c 	llist_for_each_entry_safe(cl, t, reverse, list) {
t                 148 drivers/md/bcache/io.c 		unsigned int t = local_clock_us();
t                 149 drivers/md/bcache/io.c 		int us = t - b->submit_time_us;
t                 155 drivers/md/bcache/io.c 			c->congested_last_us = t;
t                 424 drivers/md/bcache/journal.c 	struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
t                 461 drivers/md/bcache/journal.c 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
t                 362 drivers/md/bcache/request.c static void add_sequential(struct task_struct *t)
t                 364 drivers/md/bcache/request.c 	ewma_add(t->sequential_io_avg,
t                 365 drivers/md/bcache/request.c 		 t->sequential_io, 8, 0);
t                 367 drivers/md/bcache/request.c 	t->sequential_io = 0;
t                 154 drivers/md/bcache/stats.c static void scale_accounting(struct timer_list *t)
t                 156 drivers/md/bcache/stats.c 	struct cache_accounting *acc = from_timer(acc, t, timer);
t                 159 drivers/md/bcache/stats.c 	unsigned int t = atomic_xchg(&acc->collector.name, 0);		\
t                 160 drivers/md/bcache/stats.c 	t <<= 16;							\
t                 161 drivers/md/bcache/stats.c 	acc->five_minute.name += t;					\
t                 162 drivers/md/bcache/stats.c 	acc->hour.name += t;						\
t                 163 drivers/md/bcache/stats.c 	acc->day.name += t;						\
t                 164 drivers/md/bcache/stats.c 	acc->total.name += t;						\
t                1105 drivers/md/bcache/super.c 	struct cached_dev *exist_dc, *t;
t                1132 drivers/md/bcache/super.c 	list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
t                1849 drivers/md/bcache/super.c 	struct cached_dev *dc, *t;
t                2022 drivers/md/bcache/super.c 	list_for_each_entry_safe(dc, t, &uncached_devices, list)
t                2342 drivers/md/bcache/super.c 	struct cached_dev *dc, *t;
t                2345 drivers/md/bcache/super.c 		list_for_each_entry_safe(dc, t, &c->cached_devs, list)
t                2348 drivers/md/bcache/super.c 	list_for_each_entry_safe(dc, t, &uncached_devices, list)
t                  95 drivers/md/bcache/util.c 	int u = 0, t;
t                 111 drivers/md/bcache/util.c 		t = q & ~(~0 << 10);
t                 119 drivers/md/bcache/util.c 		return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
t                 121 drivers/md/bcache/util.c 		return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
t                 287 drivers/md/bcache/util.h #define ANYSINT_MAX(t)							\
t                 288 drivers/md/bcache/util.h 	((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
t                  22 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t;
t                  24 drivers/md/dm-cache-policy.c 	list_for_each_entry(t, &register_list, list)
t                  25 drivers/md/dm-cache-policy.c 		if (!strcmp(t->name, name))
t                  26 drivers/md/dm-cache-policy.c 			return t;
t                  33 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = __find_policy(name);
t                  35 drivers/md/dm-cache-policy.c 	if (t && !try_module_get(t->owner)) {
t                  37 drivers/md/dm-cache-policy.c 		t = ERR_PTR(-EINVAL);
t                  40 drivers/md/dm-cache-policy.c 	return t;
t                  45 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t;
t                  48 drivers/md/dm-cache-policy.c 	t = __get_policy_once(name);
t                  51 drivers/md/dm-cache-policy.c 	return t;
t                  56 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t;
t                  58 drivers/md/dm-cache-policy.c 	t = get_policy_once(name);
t                  59 drivers/md/dm-cache-policy.c 	if (IS_ERR(t))
t                  62 drivers/md/dm-cache-policy.c 	if (t)
t                  63 drivers/md/dm-cache-policy.c 		return t;
t                  67 drivers/md/dm-cache-policy.c 	t = get_policy_once(name);
t                  68 drivers/md/dm-cache-policy.c 	if (IS_ERR(t))
t                  71 drivers/md/dm-cache-policy.c 	return t;
t                  74 drivers/md/dm-cache-policy.c static void put_policy(struct dm_cache_policy_type *t)
t                  76 drivers/md/dm-cache-policy.c 	module_put(t->owner);
t                 138 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
t                 141 drivers/md/dm-cache-policy.c 	put_policy(t);
t                 147 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
t                 150 drivers/md/dm-cache-policy.c 	if (t->real)
t                 151 drivers/md/dm-cache-policy.c 		return t->real->name;
t                 153 drivers/md/dm-cache-policy.c 	return t->name;
t                 159 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
t                 161 drivers/md/dm-cache-policy.c 	return t->version;
t                 167 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
t                 169 drivers/md/dm-cache-policy.c 	return t->hint_size;
t                  51 drivers/md/dm-delay.c static void handle_delayed_timer(struct timer_list *t)
t                  53 drivers/md/dm-delay.c 	struct delay_c *dc = from_timer(dc, t, delay_timer);
t                1365 drivers/md/dm-integrity.c static void autocommit_fn(struct timer_list *t)
t                1367 drivers/md/dm-integrity.c 	struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
t                2351 drivers/md/dm-integrity.c 	__u8 *t;
t                2431 drivers/md/dm-integrity.c 	t = ic->recalc_tags;
t                2433 drivers/md/dm-integrity.c 		integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
t                2434 drivers/md/dm-integrity.c 		t += ic->tag_size;
t                2439 drivers/md/dm-integrity.c 	r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
t                 285 drivers/md/dm-ioctl.c 	struct dm_table *t;
t                 304 drivers/md/dm-ioctl.c 			t = __hash_remove(hc);
t                 308 drivers/md/dm-ioctl.c 			if (t) {
t                 310 drivers/md/dm-ioctl.c 				dm_table_destroy(t);
t                 868 drivers/md/dm-ioctl.c 	struct dm_table *t;
t                 897 drivers/md/dm-ioctl.c 	t = __hash_remove(hc);
t                 900 drivers/md/dm-ioctl.c 	if (t) {
t                 902 drivers/md/dm-ioctl.c 		dm_table_destroy(t);
t                1333 drivers/md/dm-ioctl.c 	struct dm_table *t, *old_map = NULL;
t                1341 drivers/md/dm-ioctl.c 	r = dm_table_create(&t, get_mode(param), param->target_count, md);
t                1347 drivers/md/dm-ioctl.c 	r = populate_table(t, param, param_size);
t                1353 drivers/md/dm-ioctl.c 	    (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
t                1354 drivers/md/dm-ioctl.c 	    !dm_table_get_wildcard_target(t)) {
t                1363 drivers/md/dm-ioctl.c 		dm_set_md_type(md, dm_table_get_type(t));
t                1366 drivers/md/dm-ioctl.c 		r = dm_setup_md_queue(md, t);
t                1371 drivers/md/dm-ioctl.c 	} else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
t                1373 drivers/md/dm-ioctl.c 		       dm_get_md_type(md), dm_table_get_type(t));
t                1392 drivers/md/dm-ioctl.c 	hc->new_map = t;
t                1410 drivers/md/dm-ioctl.c 	dm_table_destroy(t);
t                2071 drivers/md/dm-ioctl.c 	struct dm_table *t, *old_map;
t                2096 drivers/md/dm-ioctl.c 	r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
t                2102 drivers/md/dm-ioctl.c 		r = dm_table_add_target(t, spec_array[i]->target_type,
t                2113 drivers/md/dm-ioctl.c 	r = dm_table_complete(t);
t                2117 drivers/md/dm-ioctl.c 	md->type = dm_table_get_type(t);
t                2119 drivers/md/dm-ioctl.c 	r = dm_setup_md_queue(md, t);
t                2127 drivers/md/dm-ioctl.c 	old_map = dm_swap_table(md, t);
t                2144 drivers/md/dm-ioctl.c 	dm_table_destroy(t);
t                 120 drivers/md/dm-kcopyd.c static void io_job_start(struct dm_kcopyd_throttle *t)
t                 125 drivers/md/dm-kcopyd.c 	if (unlikely(!t))
t                 131 drivers/md/dm-kcopyd.c 	throttle = READ_ONCE(t->throttle);
t                 137 drivers/md/dm-kcopyd.c 	difference = now - t->last_jiffies;
t                 138 drivers/md/dm-kcopyd.c 	t->last_jiffies = now;
t                 139 drivers/md/dm-kcopyd.c 	if (t->num_io_jobs)
t                 140 drivers/md/dm-kcopyd.c 		t->io_period += difference;
t                 141 drivers/md/dm-kcopyd.c 	t->total_period += difference;
t                 146 drivers/md/dm-kcopyd.c 	if (unlikely(t->io_period > t->total_period))
t                 147 drivers/md/dm-kcopyd.c 		t->io_period = t->total_period;
t                 149 drivers/md/dm-kcopyd.c 	if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
t                 150 drivers/md/dm-kcopyd.c 		int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
t                 151 drivers/md/dm-kcopyd.c 		t->total_period >>= shift;
t                 152 drivers/md/dm-kcopyd.c 		t->io_period >>= shift;
t                 155 drivers/md/dm-kcopyd.c 	skew = t->io_period - throttle * t->total_period / 100;
t                 165 drivers/md/dm-kcopyd.c 	t->num_io_jobs++;
t                 170 drivers/md/dm-kcopyd.c static void io_job_finish(struct dm_kcopyd_throttle *t)
t                 174 drivers/md/dm-kcopyd.c 	if (unlikely(!t))
t                 179 drivers/md/dm-kcopyd.c 	t->num_io_jobs--;
t                 181 drivers/md/dm-kcopyd.c 	if (likely(READ_ONCE(t->throttle) >= 100))
t                 184 drivers/md/dm-kcopyd.c 	if (!t->num_io_jobs) {
t                 188 drivers/md/dm-kcopyd.c 		difference = now - t->last_jiffies;
t                 189 drivers/md/dm-kcopyd.c 		t->last_jiffies = now;
t                 191 drivers/md/dm-kcopyd.c 		t->io_period += difference;
t                 192 drivers/md/dm-kcopyd.c 		t->total_period += difference;
t                 197 drivers/md/dm-kcopyd.c 		if (unlikely(t->io_period > t->total_period))
t                 198 drivers/md/dm-kcopyd.c 			t->io_period = t->total_period;
t                  99 drivers/md/dm-raid1.c static void delayed_wake_fn(struct timer_list *t)
t                 101 drivers/md/dm-raid1.c 	struct mirror_set *ms = from_timer(ms, t, timer);
t                 537 drivers/md/dm-rq.c int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
t                 555 drivers/md/dm-rq.c 	immutable_tgt = dm_table_get_immutable_target(t);
t                  33 drivers/md/dm-rq.h int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
t                  97 drivers/md/dm-table.c static inline sector_t *get_node(struct dm_table *t,
t                 100 drivers/md/dm-table.c 	return t->index[l] + (n * KEYS_PER_NODE);
t                 107 drivers/md/dm-table.c static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
t                 109 drivers/md/dm-table.c 	for (; l < t->depth - 1; l++)
t                 112 drivers/md/dm-table.c 	if (n >= t->counts[l])
t                 115 drivers/md/dm-table.c 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
t                 122 drivers/md/dm-table.c static int setup_btree_index(unsigned int l, struct dm_table *t)
t                 127 drivers/md/dm-table.c 	for (n = 0U; n < t->counts[l]; n++) {
t                 128 drivers/md/dm-table.c 		node = get_node(t, l, n);
t                 131 drivers/md/dm-table.c 			node[k] = high(t, l + 1, get_child(n, k));
t                 159 drivers/md/dm-table.c static int alloc_targets(struct dm_table *t, unsigned int num)
t                 175 drivers/md/dm-table.c 	vfree(t->highs);
t                 177 drivers/md/dm-table.c 	t->num_allocated = num;
t                 178 drivers/md/dm-table.c 	t->highs = n_highs;
t                 179 drivers/md/dm-table.c 	t->targets = n_targets;
t                 187 drivers/md/dm-table.c 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
t                 189 drivers/md/dm-table.c 	if (!t)
t                 192 drivers/md/dm-table.c 	INIT_LIST_HEAD(&t->devices);
t                 193 drivers/md/dm-table.c 	INIT_LIST_HEAD(&t->target_callbacks);
t                 201 drivers/md/dm-table.c 		kfree(t);
t                 205 drivers/md/dm-table.c 	if (alloc_targets(t, num_targets)) {
t                 206 drivers/md/dm-table.c 		kfree(t);
t                 210 drivers/md/dm-table.c 	t->type = DM_TYPE_NONE;
t                 211 drivers/md/dm-table.c 	t->mode = mode;
t                 212 drivers/md/dm-table.c 	t->md = md;
t                 213 drivers/md/dm-table.c 	*result = t;
t                 231 drivers/md/dm-table.c void dm_table_destroy(struct dm_table *t)
t                 235 drivers/md/dm-table.c 	if (!t)
t                 239 drivers/md/dm-table.c 	if (t->depth >= 2)
t                 240 drivers/md/dm-table.c 		vfree(t->index[t->depth - 2]);
t                 243 drivers/md/dm-table.c 	for (i = 0; i < t->num_targets; i++) {
t                 244 drivers/md/dm-table.c 		struct dm_target *tgt = t->targets + i;
t                 252 drivers/md/dm-table.c 	vfree(t->highs);
t                 255 drivers/md/dm-table.c 	free_devices(&t->devices, t->md);
t                 257 drivers/md/dm-table.c 	dm_free_md_mempools(t->mempools);
t                 259 drivers/md/dm-table.c 	kfree(t);
t                 432 drivers/md/dm-table.c 	struct dm_table *t = ti->table;
t                 434 drivers/md/dm-table.c 	BUG_ON(!t);
t                 440 drivers/md/dm-table.c 	dd = find_device(&t->devices, dev);
t                 446 drivers/md/dm-table.c 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
t                 452 drivers/md/dm-table.c 		list_add(&dd->list, &t->devices);
t                 456 drivers/md/dm-table.c 		r = upgrade_mode(dd, mode, t->md);
t                 708 drivers/md/dm-table.c int dm_table_add_target(struct dm_table *t, const char *type,
t                 715 drivers/md/dm-table.c 	if (t->singleton) {
t                 717 drivers/md/dm-table.c 		      dm_device_name(t->md), t->targets->type->name);
t                 721 drivers/md/dm-table.c 	BUG_ON(t->num_targets >= t->num_allocated);
t                 723 drivers/md/dm-table.c 	tgt = t->targets + t->num_targets;
t                 727 drivers/md/dm-table.c 		DMERR("%s: zero-length target", dm_device_name(t->md));
t                 733 drivers/md/dm-table.c 		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
t                 738 drivers/md/dm-table.c 		if (t->num_targets) {
t                 742 drivers/md/dm-table.c 		t->singleton = true;
t                 745 drivers/md/dm-table.c 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
t                 750 drivers/md/dm-table.c 	if (t->immutable_target_type) {
t                 751 drivers/md/dm-table.c 		if (t->immutable_target_type != tgt->type) {
t                 756 drivers/md/dm-table.c 		if (t->num_targets) {
t                 760 drivers/md/dm-table.c 		t->immutable_target_type = tgt->type;
t                 764 drivers/md/dm-table.c 		t->integrity_added = 1;
t                 766 drivers/md/dm-table.c 	tgt->table = t;
t                 774 drivers/md/dm-table.c 	if (!adjoin(t, tgt)) {
t                 790 drivers/md/dm-table.c 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
t                 794 drivers/md/dm-table.c 		       dm_device_name(t->md), type);
t                 799 drivers/md/dm-table.c 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
t                 875 drivers/md/dm-table.c void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
t                 877 drivers/md/dm-table.c 	t->type = type;
t                 898 drivers/md/dm-table.c bool dm_table_supports_dax(struct dm_table *t,
t                 905 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                 906 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                 919 drivers/md/dm-table.c static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
t                 940 drivers/md/dm-table.c static int dm_table_determine_type(struct dm_table *t)
t                 946 drivers/md/dm-table.c 	struct list_head *devices = dm_table_get_devices(t);
t                 947 drivers/md/dm-table.c 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
t                 950 drivers/md/dm-table.c 	if (t->type != DM_TYPE_NONE) {
t                 952 drivers/md/dm-table.c 		if (t->type == DM_TYPE_BIO_BASED) {
t                 956 drivers/md/dm-table.c 		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
t                 957 drivers/md/dm-table.c 		BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
t                 961 drivers/md/dm-table.c 	for (i = 0; i < t->num_targets; i++) {
t                 962 drivers/md/dm-table.c 		tgt = t->targets + i;
t                 992 drivers/md/dm-table.c 		t->type = DM_TYPE_BIO_BASED;
t                 993 drivers/md/dm-table.c 		if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
t                 995 drivers/md/dm-table.c 			t->type = DM_TYPE_DAX_BIO_BASED;
t                 998 drivers/md/dm-table.c 			tgt = dm_table_get_immutable_target(t);
t                 999 drivers/md/dm-table.c 			if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
t                1000 drivers/md/dm-table.c 				t->type = DM_TYPE_NVME_BIO_BASED;
t                1003 drivers/md/dm-table.c 				t->type = DM_TYPE_NVME_BIO_BASED;
t                1011 drivers/md/dm-table.c 	t->type = DM_TYPE_REQUEST_BASED;
t                1020 drivers/md/dm-table.c 	if (t->num_targets > 1) {
t                1022 drivers/md/dm-table.c 		      t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
t                1028 drivers/md/dm-table.c 		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
t                1032 drivers/md/dm-table.c 			t->type = live_table->type;
t                1033 drivers/md/dm-table.c 		dm_put_live_table(t->md, srcu_idx);
t                1037 drivers/md/dm-table.c 	tgt = dm_table_get_immutable_target(t);
t                1060 drivers/md/dm-table.c enum dm_queue_mode dm_table_get_type(struct dm_table *t)
t                1062 drivers/md/dm-table.c 	return t->type;
t                1065 drivers/md/dm-table.c struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
t                1067 drivers/md/dm-table.c 	return t->immutable_target_type;
t                1070 drivers/md/dm-table.c struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
t                1073 drivers/md/dm-table.c 	if (t->num_targets > 1 ||
t                1074 drivers/md/dm-table.c 	    !dm_target_is_immutable(t->targets[0].type))
t                1077 drivers/md/dm-table.c 	return t->targets;
t                1080 drivers/md/dm-table.c struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
t                1085 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1086 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1094 drivers/md/dm-table.c bool dm_table_bio_based(struct dm_table *t)
t                1096 drivers/md/dm-table.c 	return __table_type_bio_based(dm_table_get_type(t));
t                1099 drivers/md/dm-table.c bool dm_table_request_based(struct dm_table *t)
t                1101 drivers/md/dm-table.c 	return __table_type_request_based(dm_table_get_type(t));
t                1104 drivers/md/dm-table.c static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
t                1106 drivers/md/dm-table.c 	enum dm_queue_mode type = dm_table_get_type(t);
t                1118 drivers/md/dm-table.c 		for (i = 0; i < t->num_targets; i++) {
t                1119 drivers/md/dm-table.c 			ti = t->targets + i;
t                1124 drivers/md/dm-table.c 	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
t                1126 drivers/md/dm-table.c 	if (!t->mempools)
t                1132 drivers/md/dm-table.c void dm_table_free_md_mempools(struct dm_table *t)
t                1134 drivers/md/dm-table.c 	dm_free_md_mempools(t->mempools);
t                1135 drivers/md/dm-table.c 	t->mempools = NULL;
t                1138 drivers/md/dm-table.c struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
t                1140 drivers/md/dm-table.c 	return t->mempools;
t                1143 drivers/md/dm-table.c static int setup_indexes(struct dm_table *t)
t                1150 drivers/md/dm-table.c 	for (i = t->depth - 2; i >= 0; i--) {
t                1151 drivers/md/dm-table.c 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
t                1152 drivers/md/dm-table.c 		total += t->counts[i];
t                1160 drivers/md/dm-table.c 	for (i = t->depth - 2; i >= 0; i--) {
t                1161 drivers/md/dm-table.c 		t->index[i] = indexes;
t                1162 drivers/md/dm-table.c 		indexes += (KEYS_PER_NODE * t->counts[i]);
t                1163 drivers/md/dm-table.c 		setup_btree_index(i, t);
t                1172 drivers/md/dm-table.c static int dm_table_build_index(struct dm_table *t)
t                1178 drivers/md/dm-table.c 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t                1179 drivers/md/dm-table.c 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
t                1182 drivers/md/dm-table.c 	t->counts[t->depth - 1] = leaf_nodes;
t                1183 drivers/md/dm-table.c 	t->index[t->depth - 1] = t->highs;
t                1185 drivers/md/dm-table.c 	if (t->depth >= 2)
t                1186 drivers/md/dm-table.c 		r = setup_indexes(t);
t                1200 drivers/md/dm-table.c static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
t                1202 drivers/md/dm-table.c 	struct list_head *devices = dm_table_get_devices(t);
t                1207 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1208 drivers/md/dm-table.c 		struct dm_target *ti = dm_table_get_target(t, i);
t                1228 drivers/md/dm-table.c 		       dm_device_name(t->md),
t                1244 drivers/md/dm-table.c static int dm_table_register_integrity(struct dm_table *t)
t                1246 drivers/md/dm-table.c 	struct mapped_device *md = t->md;
t                1250 drivers/md/dm-table.c 	if (t->integrity_added)
t                1253 drivers/md/dm-table.c 	template_disk = dm_table_get_integrity_disk(t);
t                1258 drivers/md/dm-table.c 		t->integrity_supported = true;
t                1275 drivers/md/dm-table.c 		       dm_device_name(t->md),
t                1281 drivers/md/dm-table.c 	t->integrity_supported = true;
t                1289 drivers/md/dm-table.c int dm_table_complete(struct dm_table *t)
t                1293 drivers/md/dm-table.c 	r = dm_table_determine_type(t);
t                1299 drivers/md/dm-table.c 	r = dm_table_build_index(t);
t                1305 drivers/md/dm-table.c 	r = dm_table_register_integrity(t);
t                1311 drivers/md/dm-table.c 	r = dm_table_alloc_md_mempools(t, t->md);
t                1319 drivers/md/dm-table.c void dm_table_event_callback(struct dm_table *t,
t                1323 drivers/md/dm-table.c 	t->event_fn = fn;
t                1324 drivers/md/dm-table.c 	t->event_context = context;
t                1328 drivers/md/dm-table.c void dm_table_event(struct dm_table *t)
t                1337 drivers/md/dm-table.c 	if (t->event_fn)
t                1338 drivers/md/dm-table.c 		t->event_fn(t->event_context);
t                1343 drivers/md/dm-table.c inline sector_t dm_table_get_size(struct dm_table *t)
t                1345 drivers/md/dm-table.c 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
t                1349 drivers/md/dm-table.c struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
t                1351 drivers/md/dm-table.c 	if (index >= t->num_targets)
t                1354 drivers/md/dm-table.c 	return t->targets + index;
t                1363 drivers/md/dm-table.c struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
t                1368 drivers/md/dm-table.c 	if (unlikely(sector >= dm_table_get_size(t)))
t                1371 drivers/md/dm-table.c 	for (l = 0; l < t->depth; l++) {
t                1373 drivers/md/dm-table.c 		node = get_node(t, l, n);
t                1380 drivers/md/dm-table.c 	return &t->targets[(KEYS_PER_NODE * n) + k];
t                1428 drivers/md/dm-table.c static bool dm_table_supports_zoned_model(struct dm_table *t,
t                1434 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1435 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1458 drivers/md/dm-table.c static bool dm_table_matches_zone_sectors(struct dm_table *t,
t                1464 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1465 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1606 drivers/md/dm-table.c static void dm_table_verify_integrity(struct dm_table *t)
t                1610 drivers/md/dm-table.c 	if (t->integrity_added)
t                1613 drivers/md/dm-table.c 	if (t->integrity_supported) {
t                1618 drivers/md/dm-table.c 		template_disk = dm_table_get_integrity_disk(t);
t                1620 drivers/md/dm-table.c 		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
t                1624 drivers/md/dm-table.c 	if (integrity_profile_exists(dm_disk(t->md))) {
t                1626 drivers/md/dm-table.c 		       dm_device_name(t->md));
t                1627 drivers/md/dm-table.c 		blk_integrity_unregister(dm_disk(t->md));
t                1640 drivers/md/dm-table.c static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
t                1651 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1652 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1682 drivers/md/dm-table.c static int dm_table_supports_dax_write_cache(struct dm_table *t)
t                1687 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1688 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1715 drivers/md/dm-table.c static bool dm_table_all_devices_attribute(struct dm_table *t,
t                1721 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1722 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1741 drivers/md/dm-table.c static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
t                1743 drivers/md/dm-table.c 	return dm_table_all_devices_attribute(t, device_no_partial_completion);
t                1754 drivers/md/dm-table.c static bool dm_table_supports_write_same(struct dm_table *t)
t                1759 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1760 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1781 drivers/md/dm-table.c static bool dm_table_supports_write_zeroes(struct dm_table *t)
t                1786 drivers/md/dm-table.c 	while (i < dm_table_get_num_targets(t)) {
t                1787 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i++);
t                1808 drivers/md/dm-table.c static bool dm_table_supports_discards(struct dm_table *t)
t                1813 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1814 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1842 drivers/md/dm-table.c static bool dm_table_supports_secure_erase(struct dm_table *t)
t                1847 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1848 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1875 drivers/md/dm-table.c static bool dm_table_requires_stable_pages(struct dm_table *t)
t                1880 drivers/md/dm-table.c 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
t                1881 drivers/md/dm-table.c 		ti = dm_table_get_target(t, i);
t                1891 drivers/md/dm-table.c void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
t                1902 drivers/md/dm-table.c 	if (!dm_table_supports_discards(t)) {
t                1913 drivers/md/dm-table.c 	if (dm_table_supports_secure_erase(t))
t                1916 drivers/md/dm-table.c 	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
t                1918 drivers/md/dm-table.c 		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
t                1923 drivers/md/dm-table.c 	if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
t                1925 drivers/md/dm-table.c 		if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
t                1926 drivers/md/dm-table.c 			set_dax_synchronous(t->md->dax_dev);
t                1931 drivers/md/dm-table.c 	if (dm_table_supports_dax_write_cache(t))
t                1932 drivers/md/dm-table.c 		dax_write_cache(t->md->dax_dev, true);
t                1935 drivers/md/dm-table.c 	if (dm_table_all_devices_attribute(t, device_is_nonrot))
t                1940 drivers/md/dm-table.c 	if (!dm_table_supports_write_same(t))
t                1942 drivers/md/dm-table.c 	if (!dm_table_supports_write_zeroes(t))
t                1945 drivers/md/dm-table.c 	dm_table_verify_integrity(t);
t                1951 drivers/md/dm-table.c 	if (dm_table_requires_stable_pages(t))
t                1962 drivers/md/dm-table.c 	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
t                1973 drivers/md/dm-table.c 		blk_revalidate_disk_zones(t->md->disk);
t                1979 drivers/md/dm-table.c unsigned int dm_table_get_num_targets(struct dm_table *t)
t                1981 drivers/md/dm-table.c 	return t->num_targets;
t                1984 drivers/md/dm-table.c struct list_head *dm_table_get_devices(struct dm_table *t)
t                1986 drivers/md/dm-table.c 	return &t->devices;
t                1989 drivers/md/dm-table.c fmode_t dm_table_get_mode(struct dm_table *t)
t                1991 drivers/md/dm-table.c 	return t->mode;
t                2001 drivers/md/dm-table.c static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
t                2003 drivers/md/dm-table.c 	int i = t->num_targets;
t                2004 drivers/md/dm-table.c 	struct dm_target *ti = t->targets;
t                2006 drivers/md/dm-table.c 	lockdep_assert_held(&t->md->suspend_lock);
t                2027 drivers/md/dm-table.c void dm_table_presuspend_targets(struct dm_table *t)
t                2029 drivers/md/dm-table.c 	if (!t)
t                2032 drivers/md/dm-table.c 	suspend_targets(t, PRESUSPEND);
t                2035 drivers/md/dm-table.c void dm_table_presuspend_undo_targets(struct dm_table *t)
t                2037 drivers/md/dm-table.c 	if (!t)
t                2040 drivers/md/dm-table.c 	suspend_targets(t, PRESUSPEND_UNDO);
t                2043 drivers/md/dm-table.c void dm_table_postsuspend_targets(struct dm_table *t)
t                2045 drivers/md/dm-table.c 	if (!t)
t                2048 drivers/md/dm-table.c 	suspend_targets(t, POSTSUSPEND);
t                2051 drivers/md/dm-table.c int dm_table_resume_targets(struct dm_table *t)
t                2055 drivers/md/dm-table.c 	lockdep_assert_held(&t->md->suspend_lock);
t                2057 drivers/md/dm-table.c 	for (i = 0; i < t->num_targets; i++) {
t                2058 drivers/md/dm-table.c 		struct dm_target *ti = t->targets + i;
t                2066 drivers/md/dm-table.c 			      dm_device_name(t->md), ti->type->name, r);
t                2071 drivers/md/dm-table.c 	for (i = 0; i < t->num_targets; i++) {
t                2072 drivers/md/dm-table.c 		struct dm_target *ti = t->targets + i;
t                2081 drivers/md/dm-table.c void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
t                2083 drivers/md/dm-table.c 	list_add(&cb->list, &t->target_callbacks);
t                2087 drivers/md/dm-table.c int dm_table_any_congested(struct dm_table *t, int bdi_bits)
t                2090 drivers/md/dm-table.c 	struct list_head *devices = dm_table_get_devices(t);
t                2102 drivers/md/dm-table.c 				     dm_device_name(t->md),
t                2106 drivers/md/dm-table.c 	list_for_each_entry(cb, &t->target_callbacks, list)
t                2113 drivers/md/dm-table.c struct mapped_device *dm_table_get_md(struct dm_table *t)
t                2115 drivers/md/dm-table.c 	return t->md;
t                2119 drivers/md/dm-table.c const char *dm_table_device_name(struct dm_table *t)
t                2121 drivers/md/dm-table.c 	return dm_device_name(t->md);
t                2125 drivers/md/dm-table.c void dm_table_run_md_queue_async(struct dm_table *t)
t                2130 drivers/md/dm-table.c 	if (!dm_table_request_based(t))
t                2133 drivers/md/dm-table.c 	md = dm_table_get_md(t);
t                 303 drivers/md/dm-thin-metadata.c static uint64_t pack_block_time(dm_block_t b, uint32_t t)
t                 305 drivers/md/dm-thin-metadata.c 	return (b << 24) | t;
t                 308 drivers/md/dm-thin-metadata.c static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
t                 311 drivers/md/dm-thin-metadata.c 	*t = v & ((1 << 24) - 1);
t                 319 drivers/md/dm-thin-metadata.c 	uint32_t t;
t                 322 drivers/md/dm-thin-metadata.c 	unpack_block_time(le64_to_cpu(v_le), &b, &t);
t                 331 drivers/md/dm-thin-metadata.c 	uint32_t t;
t                 334 drivers/md/dm-thin-metadata.c 	unpack_block_time(le64_to_cpu(v_le), &b, &t);
t                 342 drivers/md/dm-thin-metadata.c 	uint32_t t;
t                 346 drivers/md/dm-thin-metadata.c 	unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
t                 347 drivers/md/dm-thin-metadata.c 	unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
t                 151 drivers/md/dm-thin.c static void throttle_init(struct throttle *t)
t                 153 drivers/md/dm-thin.c 	init_rwsem(&t->lock);
t                 154 drivers/md/dm-thin.c 	t->throttle_applied = false;
t                 157 drivers/md/dm-thin.c static void throttle_work_start(struct throttle *t)
t                 159 drivers/md/dm-thin.c 	t->threshold = jiffies + THROTTLE_THRESHOLD;
t                 162 drivers/md/dm-thin.c static void throttle_work_update(struct throttle *t)
t                 164 drivers/md/dm-thin.c 	if (!t->throttle_applied && jiffies > t->threshold) {
t                 165 drivers/md/dm-thin.c 		down_write(&t->lock);
t                 166 drivers/md/dm-thin.c 		t->throttle_applied = true;
t                 170 drivers/md/dm-thin.c static void throttle_work_complete(struct throttle *t)
t                 172 drivers/md/dm-thin.c 	if (t->throttle_applied) {
t                 173 drivers/md/dm-thin.c 		t->throttle_applied = false;
t                 174 drivers/md/dm-thin.c 		up_write(&t->lock);
t                 178 drivers/md/dm-thin.c static void throttle_lock(struct throttle *t)
t                 180 drivers/md/dm-thin.c 	down_read(&t->lock);
t                 183 drivers/md/dm-thin.c static void throttle_unlock(struct throttle *t)
t                 185 drivers/md/dm-thin.c 	up_read(&t->lock);
t                 777 drivers/md/dm-writecache.c static void writecache_autocommit_timer(struct timer_list *t)
t                 779 drivers/md/dm-writecache.c 	struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
t                2048 drivers/md/dm.c static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
t                2050 drivers/md/dm.c 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
t                2053 drivers/md/dm.c 	if (dm_table_bio_based(t)) {
t                2086 drivers/md/dm.c 	dm_table_free_md_mempools(t);
t                2125 drivers/md/dm.c static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
t                2130 drivers/md/dm.c 	bool request_based = dm_table_request_based(t);
t                2136 drivers/md/dm.c 	size = dm_table_get_size(t);
t                2146 drivers/md/dm.c 	dm_table_event_callback(t, event_callback, md);
t                2165 drivers/md/dm.c 		md->immutable_target = dm_table_get_immutable_target(t);
t                2168 drivers/md/dm.c 	ret = __bind_mempools(md, t);
t                2175 drivers/md/dm.c 	rcu_assign_pointer(md->map, (void *)t);
t                2176 drivers/md/dm.c 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
t                2178 drivers/md/dm.c 	dm_table_set_restrictions(t, q, limits);
t                2275 drivers/md/dm.c int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
t                2283 drivers/md/dm.c 		r = dm_mq_init_request_queue(md, t);
t                2300 drivers/md/dm.c 	r = dm_calculate_queue_limits(t, &limits);
t                2305 drivers/md/dm.c 	dm_table_set_restrictions(t, md->queue, &limits);
t                  52 drivers/md/dm.h void dm_table_event_callback(struct dm_table *t,
t                  54 drivers/md/dm.h struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
t                  55 drivers/md/dm.h struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
t                  59 drivers/md/dm.h void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
t                  61 drivers/md/dm.h struct list_head *dm_table_get_devices(struct dm_table *t);
t                  62 drivers/md/dm.h void dm_table_presuspend_targets(struct dm_table *t);
t                  63 drivers/md/dm.h void dm_table_presuspend_undo_targets(struct dm_table *t);
t                  64 drivers/md/dm.h void dm_table_postsuspend_targets(struct dm_table *t);
t                  65 drivers/md/dm.h int dm_table_resume_targets(struct dm_table *t);
t                  66 drivers/md/dm.h int dm_table_any_congested(struct dm_table *t, int bdi_bits);
t                  67 drivers/md/dm.h enum dm_queue_mode dm_table_get_type(struct dm_table *t);
t                  68 drivers/md/dm.h struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
t                  69 drivers/md/dm.h struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
t                  70 drivers/md/dm.h struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
t                  71 drivers/md/dm.h bool dm_table_bio_based(struct dm_table *t);
t                  72 drivers/md/dm.h bool dm_table_request_based(struct dm_table *t);
t                  73 drivers/md/dm.h void dm_table_free_md_mempools(struct dm_table *t);
t                  74 drivers/md/dm.h struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
t                  75 drivers/md/dm.h bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
t                  86 drivers/md/dm.h int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
t                  91 drivers/md/dm.h #define dm_target_bio_based(t) ((t)->type->map != NULL)
t                  96 drivers/md/dm.h #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
t                 102 drivers/md/dm.h #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
t                 620 drivers/md/md.c static void md_safemode_timeout(struct timer_list *t);
t                5578 drivers/md/md.c static void md_safemode_timeout(struct timer_list *t)
t                5580 drivers/md/md.c 	struct mddev *mddev = from_timer(mddev, t, safemode_timer);
t                  78 drivers/md/persistent-data/dm-block-manager.c 	struct stack_store *t;
t                  85 drivers/md/persistent-data/dm-block-manager.c 	t = lock->traces + h;
t                  86 drivers/md/persistent-data/dm-block-manager.c 	t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
t                  32 drivers/md/persistent-data/dm-space-map-metadata.c static void threshold_init(struct threshold *t)
t                  34 drivers/md/persistent-data/dm-space-map-metadata.c 	t->threshold_set = false;
t                  35 drivers/md/persistent-data/dm-space-map-metadata.c 	t->value_set = false;
t                  38 drivers/md/persistent-data/dm-space-map-metadata.c static void set_threshold(struct threshold *t, dm_block_t value,
t                  41 drivers/md/persistent-data/dm-space-map-metadata.c 	t->threshold_set = true;
t                  42 drivers/md/persistent-data/dm-space-map-metadata.c 	t->threshold = value;
t                  43 drivers/md/persistent-data/dm-space-map-metadata.c 	t->fn = fn;
t                  44 drivers/md/persistent-data/dm-space-map-metadata.c 	t->context = context;
t                  47 drivers/md/persistent-data/dm-space-map-metadata.c static bool below_threshold(struct threshold *t, dm_block_t value)
t                  49 drivers/md/persistent-data/dm-space-map-metadata.c 	return t->threshold_set && value <= t->threshold;
t                  52 drivers/md/persistent-data/dm-space-map-metadata.c static bool threshold_already_triggered(struct threshold *t)
t                  54 drivers/md/persistent-data/dm-space-map-metadata.c 	return t->value_set && below_threshold(t, t->current_value);
t                  57 drivers/md/persistent-data/dm-space-map-metadata.c static void check_threshold(struct threshold *t, dm_block_t value)
t                  59 drivers/md/persistent-data/dm-space-map-metadata.c 	if (below_threshold(t, value) &&
t                  60 drivers/md/persistent-data/dm-space-map-metadata.c 	    !threshold_already_triggered(t))
t                  61 drivers/md/persistent-data/dm-space-map-metadata.c 		t->fn(t->context);
t                  63 drivers/md/persistent-data/dm-space-map-metadata.c 	t->value_set = true;
t                  64 drivers/md/persistent-data/dm-space-map-metadata.c 	t->current_value = value;
t                 357 drivers/md/raid5.c 	struct stripe_head *sh, *t;
t                 363 drivers/md/raid5.c 	llist_for_each_entry_safe(sh, t, head, release_list) {
t                 167 drivers/media/common/saa7146/saa7146_fops.c void saa7146_buffer_timeout(struct timer_list *t)
t                 169 drivers/media/common/saa7146/saa7146_fops.c 	struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
t                 364 drivers/media/common/saa7146/saa7146_hlp.c 		int l = 0, r = 0, t = 0, b = 0;
t                 389 drivers/media/common/saa7146/saa7146_hlp.c 		t = y[i];
t                 396 drivers/media/common/saa7146/saa7146_hlp.c 		line_list[ 2*i   ] = min_t(int, t, height);
t                 352 drivers/media/common/saa7146/saa7146_vbi.c static void vbi_read_timeout(struct timer_list *t)
t                 354 drivers/media/common/saa7146/saa7146_vbi.c 	struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
t                 363 drivers/media/dvb-core/dmxdev.c static void dvb_dmxdev_filter_timeout(struct timer_list *t)
t                 365 drivers/media/dvb-core/dmxdev.c 	struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer);
t                  91 drivers/media/dvb-frontends/atbm8830.c 	u64 t;
t                  94 drivers/media/dvb-frontends/atbm8830.c 	t = (u64)0x100000 * freq;
t                  95 drivers/media/dvb-frontends/atbm8830.c 	do_div(t, 30400);
t                  96 drivers/media/dvb-frontends/atbm8830.c 	val = t;
t                 109 drivers/media/dvb-frontends/atbm8830.c 	u64 t;
t                 115 drivers/media/dvb-frontends/atbm8830.c 		t = (u64) 2 * 31416 * (freq - fs);
t                 116 drivers/media/dvb-frontends/atbm8830.c 		t <<= 22;
t                 117 drivers/media/dvb-frontends/atbm8830.c 		do_div(t, fs);
t                 118 drivers/media/dvb-frontends/atbm8830.c 		do_div(t, 1000);
t                 119 drivers/media/dvb-frontends/atbm8830.c 		val = t;
t                 352 drivers/media/dvb-frontends/atbm8830.c 	u8 t;
t                 358 drivers/media/dvb-frontends/atbm8830.c 	atbm8830_read_reg(priv, REG_FRAME_ERR_CNT + 1, &t);
t                 359 drivers/media/dvb-frontends/atbm8830.c 	frame_err = t & 0x7F;
t                 361 drivers/media/dvb-frontends/atbm8830.c 	atbm8830_read_reg(priv, REG_FRAME_ERR_CNT, &t);
t                 362 drivers/media/dvb-frontends/atbm8830.c 	frame_err |= t;
t                 376 drivers/media/dvb-frontends/atbm8830.c 	u8 t;
t                 381 drivers/media/dvb-frontends/atbm8830.c 	atbm8830_read_reg(priv, REG_AGC_PWM_VAL + 1, &t);
t                 382 drivers/media/dvb-frontends/atbm8830.c 	pwm = t & 0x03;
t                 384 drivers/media/dvb-frontends/atbm8830.c 	atbm8830_read_reg(priv, REG_AGC_PWM_VAL, &t);
t                 385 drivers/media/dvb-frontends/atbm8830.c 	pwm |= t;
t                 158 drivers/media/dvb-frontends/bcm3510.c 	unsigned long t;
t                 190 drivers/media/dvb-frontends/bcm3510.c 	t = jiffies + 1*HZ;
t                 191 drivers/media/dvb-frontends/bcm3510.c 	while (time_before(jiffies, t)) {
t                 336 drivers/media/dvb-frontends/bcm3510.c 	s32 t;
t                 339 drivers/media/dvb-frontends/bcm3510.c 	t = st->status2.SIGNAL;
t                 341 drivers/media/dvb-frontends/bcm3510.c 	if (t > 190)
t                 342 drivers/media/dvb-frontends/bcm3510.c 		t = 190;
t                 343 drivers/media/dvb-frontends/bcm3510.c 	if (t < 90)
t                 344 drivers/media/dvb-frontends/bcm3510.c 		t = 90;
t                 346 drivers/media/dvb-frontends/bcm3510.c 	t -= 90;
t                 347 drivers/media/dvb-frontends/bcm3510.c 	t = t * 0xff / 100;
t                 349 drivers/media/dvb-frontends/bcm3510.c 	*strength = (t << 8) | t;
t                 682 drivers/media/dvb-frontends/bcm3510.c 	unsigned long  t;
t                 689 drivers/media/dvb-frontends/bcm3510.c 	t = jiffies + 3*HZ;
t                 690 drivers/media/dvb-frontends/bcm3510.c 	while (time_before(jiffies, t)) {
t                 706 drivers/media/dvb-frontends/bcm3510.c 	unsigned long t;
t                 712 drivers/media/dvb-frontends/bcm3510.c 	t = jiffies + 3*HZ;
t                 713 drivers/media/dvb-frontends/bcm3510.c 	while (time_before(jiffies, t)) {
t                3861 drivers/media/dvb-frontends/dib8000.c 		u32 t = (s/n) << 16;
t                3862 drivers/media/dvb-frontends/dib8000.c 		return t + ((s << 16) - n*t) / n;
t                1588 drivers/media/dvb-frontends/dib9000.c 	u16 i, len, t, index_msg;
t                1611 drivers/media/dvb-frontends/dib9000.c 				t = dib9000_read_word(state, 785);
t                1612 drivers/media/dvb-frontends/dib9000.c 				msg[index_msg].buf[i] = (t >> 8) & 0xff;
t                1613 drivers/media/dvb-frontends/dib9000.c 				msg[index_msg].buf[i + 1] = (t) & 0xff;
t                2314 drivers/media/dvb-frontends/dib9000.c 		u32 t = (s / n) << 16;
t                2315 drivers/media/dvb-frontends/dib9000.c 		return t + ((s << 16) - n * t) / n;
t                 104 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 108 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, reg, &t);
t                 110 drivers/media/dvb-frontends/lgs8gxx.c 		if ((t & mask) == val)
t                 182 drivers/media/dvb-frontends/lgs8gxx.c 	u8 reg_addr, t;
t                 191 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, reg_addr, &t);
t                 193 drivers/media/dvb-frontends/lgs8gxx.c 		v32 |= t;
t                 206 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 213 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x0C, &t);
t                 214 drivers/media/dvb-frontends/lgs8gxx.c 		t &= (~0x04);
t                 215 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x0C, t | 0x80);
t                 223 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x7E, &t);
t                 224 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x7E, t | 0x01);
t                 227 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0xC5, &t);
t                 228 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0xC5, t & 0xE0);
t                 235 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x7C, &t);
t                 236 drivers/media/dvb-frontends/lgs8gxx.c 		t = (t & 0x8C) | 0x03;
t                 237 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x7C, t);
t                 240 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0xC3, &t);
t                 241 drivers/media/dvb-frontends/lgs8gxx.c 		t = (t & 0xEF) |  0x10;
t                 242 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0xC3, t);
t                 253 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 257 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x0C, &t);
t                 258 drivers/media/dvb-frontends/lgs8gxx.c 		t &= (~0x80);
t                 259 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x0C, t);
t                 261 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x0C, &t);
t                 264 drivers/media/dvb-frontends/lgs8gxx.c 		if (((t&0x03) == 0x01) && (t2&0x01)) {
t                 286 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_read_reg(priv, 0xC5, &t);
t                 287 drivers/media/dvb-frontends/lgs8gxx.c 	t = (t & 0xE0) | 0x06;
t                 288 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_write_reg(priv, 0xC5, t);
t                 298 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 301 drivers/media/dvb-frontends/lgs8gxx.c 		ret = lgs8gxx_read_reg(priv, 0x13, &t);
t                 303 drivers/media/dvb-frontends/lgs8gxx.c 		ret = lgs8gxx_read_reg(priv, 0x4B, &t);
t                 308 drivers/media/dvb-frontends/lgs8gxx.c 		*locked = ((t & 0x80) == 0x80) ? 1 : 0;
t                 310 drivers/media/dvb-frontends/lgs8gxx.c 		*locked = ((t & 0xC0) == 0xC0) ? 1 : 0;
t                 443 drivers/media/dvb-frontends/lgs8gxx.c 		u8 t;
t                 446 drivers/media/dvb-frontends/lgs8gxx.c 			lgs8gxx_read_reg(priv, 0xA2, &t);
t                 447 drivers/media/dvb-frontends/lgs8gxx.c 			*detected_param = t;
t                 449 drivers/media/dvb-frontends/lgs8gxx.c 			lgs8gxx_read_reg(priv, 0x1F, &t);
t                 450 drivers/media/dvb-frontends/lgs8gxx.c 			*detected_param = t & 0x3F;
t                 490 drivers/media/dvb-frontends/lgs8gxx.c 		u8 t;
t                 491 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x19, &t);
t                 492 drivers/media/dvb-frontends/lgs8gxx.c 		t &= 0x81;
t                 493 drivers/media/dvb-frontends/lgs8gxx.c 		t |= detected_param << 1;
t                 494 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x19, t);
t                 521 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t, reg_addr;
t                 524 drivers/media/dvb-frontends/lgs8gxx.c 	ret = lgs8gxx_read_reg(priv, reg_addr, &t);
t                 528 drivers/media/dvb-frontends/lgs8gxx.c 	t &= 0xF8;
t                 529 drivers/media/dvb-frontends/lgs8gxx.c 	t |= serial ? TS_SERIAL : TS_PARALLEL;
t                 530 drivers/media/dvb-frontends/lgs8gxx.c 	t |= clk_pol ? TS_CLK_INVERTED : TS_CLK_NORMAL;
t                 531 drivers/media/dvb-frontends/lgs8gxx.c 	t |= clk_gated ? TS_CLK_GATED : TS_CLK_FREERUN;
t                 533 drivers/media/dvb-frontends/lgs8gxx.c 	ret = lgs8gxx_write_reg(priv, reg_addr, t);
t                 561 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 566 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_read_reg(priv, 0x7c, &t);
t                 567 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_write_reg(priv, 0x7c, (t&0x8c) | 0x3);
t                 570 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_read_reg(priv, 0xc3, &t);
t                 571 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_write_reg(priv, 0xc3, t&0x10);
t                 716 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t, locked = 0;
t                 730 drivers/media/dvb-frontends/lgs8gxx.c 	ret = lgs8gxx_read_reg(priv, 0x4B, &t);
t                 734 drivers/media/dvb-frontends/lgs8gxx.c 	dprintk("Reg 0x4B: 0x%02X\n", t);
t                 738 drivers/media/dvb-frontends/lgs8gxx.c 		if ((t & 0x40) == 0x40)
t                 740 drivers/media/dvb-frontends/lgs8gxx.c 		if ((t & 0x80) == 0x80)
t                 744 drivers/media/dvb-frontends/lgs8gxx.c 		if ((t & 0x80) == 0x80)
t                 791 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t; s8 ret;
t                 798 drivers/media/dvb-frontends/lgs8gxx.c 	ret = lgs8gxx_read_reg(priv, 0x4B, &t);
t                 803 drivers/media/dvb-frontends/lgs8gxx.c 		if ((t & 0xC0) == 0xC0) {
t                 826 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_read_reg(priv, 0x95, &t);
t                 827 drivers/media/dvb-frontends/lgs8gxx.c 	dprintk("%s: AVG Noise=0x%02X\n", __func__, t);
t                 834 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 839 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_read_reg(priv, 0xB1, &t);
t                 840 drivers/media/dvb-frontends/lgs8gxx.c 	v |= t;
t                 842 drivers/media/dvb-frontends/lgs8gxx.c 	lgs8gxx_read_reg(priv, 0xB0, &t);
t                 843 drivers/media/dvb-frontends/lgs8gxx.c 	v |= t;
t                 866 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 870 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x34, &t);
t                 872 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x95, &t);
t                 873 drivers/media/dvb-frontends/lgs8gxx.c 	dprintk("AVG Noise=0x%02X\n", t);
t                 874 drivers/media/dvb-frontends/lgs8gxx.c 	*snr = 256 - t;
t                 890 drivers/media/dvb-frontends/lgs8gxx.c 	u8 orig, t;
t                 895 drivers/media/dvb-frontends/lgs8gxx.c 		t = orig | 0x10;
t                 896 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x30, t);
t                 897 drivers/media/dvb-frontends/lgs8gxx.c 		t = orig | 0x18;
t                 898 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x30, t);
t                 899 drivers/media/dvb-frontends/lgs8gxx.c 		t = orig | 0x10;
t                 900 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x30, t);
t                 910 drivers/media/dvb-frontends/lgs8gxx.c 	u8 t;
t                 913 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, 0x30, &t);
t                 914 drivers/media/dvb-frontends/lgs8gxx.c 		t &= 0xE7;
t                 915 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_write_reg(priv, 0x30, t);
t                 924 drivers/media/dvb-frontends/lgs8gxx.c 	u8 reg_err, reg_total, t;
t                 942 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, reg_total+3-i, &t);
t                 943 drivers/media/dvb-frontends/lgs8gxx.c 		total_cnt |= t;
t                 947 drivers/media/dvb-frontends/lgs8gxx.c 		lgs8gxx_read_reg(priv, reg_err+3-i, &t);
t                 948 drivers/media/dvb-frontends/lgs8gxx.c 		err_cnt |= t;
t                 398 drivers/media/dvb-frontends/mt312.c 			  const enum fe_sec_tone_mode t)
t                 406 drivers/media/dvb-frontends/mt312.c 	if (t > SEC_TONE_OFF)
t                 414 drivers/media/dvb-frontends/mt312.c 			     (diseqc_mode & 0x40) | tone_tab[t]);
t                 295 drivers/media/dvb-frontends/tda10048.c 	u64 t;
t                 304 drivers/media/dvb-frontends/tda10048.c 		t = if_hz;
t                 305 drivers/media/dvb-frontends/tda10048.c 		t *= 10;
t                 306 drivers/media/dvb-frontends/tda10048.c 		t *= 32768;
t                 307 drivers/media/dvb-frontends/tda10048.c 		do_div(t, sample_freq_hz);
t                 308 drivers/media/dvb-frontends/tda10048.c 		t += 5;
t                 309 drivers/media/dvb-frontends/tda10048.c 		do_div(t, 10);
t                 312 drivers/media/dvb-frontends/tda10048.c 		t = sample_freq_hz - if_hz;
t                 313 drivers/media/dvb-frontends/tda10048.c 		t *= 10;
t                 314 drivers/media/dvb-frontends/tda10048.c 		t *= 32768;
t                 315 drivers/media/dvb-frontends/tda10048.c 		do_div(t, sample_freq_hz);
t                 316 drivers/media/dvb-frontends/tda10048.c 		t += 5;
t                 317 drivers/media/dvb-frontends/tda10048.c 		do_div(t, 10);
t                 318 drivers/media/dvb-frontends/tda10048.c 		t = ~t + 1;
t                 321 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_FREQ_PHY2_LSB, (u8)t);
t                 322 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_FREQ_PHY2_MSB, (u8)(t >> 8));
t                 331 drivers/media/dvb-frontends/tda10048.c 	u64 t, z;
t                 339 drivers/media/dvb-frontends/tda10048.c 	t = bw * 10;
t                 342 drivers/media/dvb-frontends/tda10048.c 	t *= (2048 * 1024);
t                 343 drivers/media/dvb-frontends/tda10048.c 	t *= 1024;
t                 345 drivers/media/dvb-frontends/tda10048.c 	do_div(t, z);
t                 346 drivers/media/dvb-frontends/tda10048.c 	t += 5;
t                 347 drivers/media/dvb-frontends/tda10048.c 	do_div(t, 10);
t                 349 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_TIME_WREF_LSB, (u8)t);
t                 350 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_TIME_WREF_MID1, (u8)(t >> 8));
t                 351 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_TIME_WREF_MID2, (u8)(t >> 16));
t                 352 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_TIME_WREF_MSB, (u8)(t >> 24));
t                 361 drivers/media/dvb-frontends/tda10048.c 	u64 t;
t                 369 drivers/media/dvb-frontends/tda10048.c 	t = sample_freq_hz;
t                 370 drivers/media/dvb-frontends/tda10048.c 	t *= 7;
t                 371 drivers/media/dvb-frontends/tda10048.c 	t *= 32;
t                 372 drivers/media/dvb-frontends/tda10048.c 	t *= 10;
t                 373 drivers/media/dvb-frontends/tda10048.c 	do_div(t, bw);
t                 374 drivers/media/dvb-frontends/tda10048.c 	t += 5;
t                 375 drivers/media/dvb-frontends/tda10048.c 	do_div(t, 10);
t                 377 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_TIME_INVWREF_LSB, (u8)t);
t                 378 drivers/media/dvb-frontends/tda10048.c 	tda10048_writereg(state, TDA10048_TIME_INVWREF_MSB, (u8)(t >> 8));
t                  33 drivers/media/i2c/adp1653.c #define TIMEOUT_US_TO_CODE(t)	((TIMEOUT_MAX + (TIMEOUT_STEP / 2) - (t)) \
t                 414 drivers/media/i2c/adv748x/adv748x.h #define tx_read(t, r) adv748x_read(t->state, t->page, r)
t                 415 drivers/media/i2c/adv748x/adv748x.h #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
t                 220 drivers/media/i2c/adv7604.c static bool adv76xx_check_dv_timings(const struct v4l2_dv_timings *t, void *hdl)
t                 225 drivers/media/i2c/adv7604.c 		if (v4l2_match_dv_timings(t, adv76xx_timings_exceptions + i, 0, false))
t                 325 drivers/media/i2c/adv7604.c static inline unsigned htotal(const struct v4l2_bt_timings *t)
t                 327 drivers/media/i2c/adv7604.c 	return V4L2_DV_BT_FRAME_WIDTH(t);
t                 330 drivers/media/i2c/adv7604.c static inline unsigned vtotal(const struct v4l2_bt_timings *t)
t                 332 drivers/media/i2c/adv7604.c 	return V4L2_DV_BT_FRAME_HEIGHT(t);
t                 147 drivers/media/i2c/adv7842.c static bool adv7842_check_dv_timings(const struct v4l2_dv_timings *t, void *hdl)
t                 152 drivers/media/i2c/adv7842.c 		if (v4l2_match_dv_timings(t, adv7842_timings_exceptions + i, 0, false))
t                 257 drivers/media/i2c/adv7842.c static inline unsigned hblanking(const struct v4l2_bt_timings *t)
t                 259 drivers/media/i2c/adv7842.c 	return V4L2_DV_BT_BLANKING_WIDTH(t);
t                 262 drivers/media/i2c/adv7842.c static inline unsigned htotal(const struct v4l2_bt_timings *t)
t                 264 drivers/media/i2c/adv7842.c 	return V4L2_DV_BT_FRAME_WIDTH(t);
t                 267 drivers/media/i2c/adv7842.c static inline unsigned vblanking(const struct v4l2_bt_timings *t)
t                 269 drivers/media/i2c/adv7842.c 	return V4L2_DV_BT_BLANKING_HEIGHT(t);
t                 272 drivers/media/i2c/adv7842.c static inline unsigned vtotal(const struct v4l2_bt_timings *t)
t                 274 drivers/media/i2c/adv7842.c 	return V4L2_DV_BT_FRAME_HEIGHT(t);
t                 453 drivers/media/i2c/et8ek8/et8ek8_driver.c #define TIMEPERFRAME_AVG_FPS(t)						\
t                 454 drivers/media/i2c/et8ek8/et8ek8_driver.c 	(((t).denominator + ((t).numerator >> 1)) / (t).numerator)
t                 995 drivers/media/i2c/ov7251.c static inline u32 avg_fps(const struct v4l2_fract *t)
t                 997 drivers/media/i2c/ov7251.c 	return (t->denominator + (t->numerator >> 1)) / t->numerator;
t                 157 drivers/media/i2c/sony-btf-mpx.c static int mpx_setup(struct sony_btf_mpx *t)
t                 159 drivers/media/i2c/sony-btf-mpx.c 	struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
t                 163 drivers/media/i2c/sony-btf-mpx.c 	int mode = t->mpxmode;
t                 177 drivers/media/i2c/sony-btf-mpx.c 	if (t->audmode != V4L2_TUNER_MODE_MONO)
t                 181 drivers/media/i2c/sony-btf-mpx.c 		switch (t->audmode) {
t                 224 drivers/media/i2c/sony-btf-mpx.c 			t->audmode == V4L2_TUNER_MODE_MONO ? 0x07f0 : 0x0190);
t                 269 drivers/media/i2c/sony-btf-mpx.c 	struct sony_btf_mpx *t = to_state(sd);
t                 281 drivers/media/i2c/sony-btf-mpx.c 	if (default_mpx_mode != t->mpxmode) {
t                 282 drivers/media/i2c/sony-btf-mpx.c 		t->mpxmode = default_mpx_mode;
t                 283 drivers/media/i2c/sony-btf-mpx.c 		mpx_setup(t);
t                 290 drivers/media/i2c/sony-btf-mpx.c 	struct sony_btf_mpx *t = to_state(sd);
t                 298 drivers/media/i2c/sony-btf-mpx.c 	vt->audmode = t->audmode;
t                 304 drivers/media/i2c/sony-btf-mpx.c 	struct sony_btf_mpx *t = to_state(sd);
t                 309 drivers/media/i2c/sony-btf-mpx.c 	if (vt->audmode != t->audmode) {
t                 310 drivers/media/i2c/sony-btf-mpx.c 		t->audmode = vt->audmode;
t                 311 drivers/media/i2c/sony-btf-mpx.c 		mpx_setup(t);
t                 337 drivers/media/i2c/sony-btf-mpx.c 	struct sony_btf_mpx *t;
t                 346 drivers/media/i2c/sony-btf-mpx.c 	t = devm_kzalloc(&client->dev, sizeof(*t), GFP_KERNEL);
t                 347 drivers/media/i2c/sony-btf-mpx.c 	if (t == NULL)
t                 350 drivers/media/i2c/sony-btf-mpx.c 	sd = &t->sd;
t                 354 drivers/media/i2c/sony-btf-mpx.c 	t->mpxmode = 0;
t                 355 drivers/media/i2c/sony-btf-mpx.c 	t->audmode = V4L2_TUNER_MODE_STEREO;
t                 295 drivers/media/i2c/tc358743.c static inline unsigned fps(const struct v4l2_bt_timings *t)
t                 297 drivers/media/i2c/tc358743.c 	if (!V4L2_DV_BT_FRAME_HEIGHT(t) || !V4L2_DV_BT_FRAME_WIDTH(t))
t                 300 drivers/media/i2c/tc358743.c 	return DIV_ROUND_CLOSEST((unsigned)t->pixelclock,
t                 301 drivers/media/i2c/tc358743.c 			V4L2_DV_BT_FRAME_HEIGHT(t) * V4L2_DV_BT_FRAME_WIDTH(t));
t                1471 drivers/media/i2c/tc358743.c static void tc358743_irq_poll_timer(struct timer_list *t)
t                1473 drivers/media/i2c/tc358743.c 	struct tc358743_state *state = from_timer(state, t, timer);
t                 274 drivers/media/i2c/tda7432.c 	struct tda7432 *t = to_state(sd);
t                 280 drivers/media/i2c/tda7432.c 		if (t->balance->val < 0) {
t                 282 drivers/media/i2c/tda7432.c 			rr = rf = -t->balance->val;
t                 284 drivers/media/i2c/tda7432.c 		} else if (t->balance->val > 0) {
t                 287 drivers/media/i2c/tda7432.c 			lr = lf = t->balance->val;
t                 293 drivers/media/i2c/tda7432.c 		if (t->mute->val) {
t                 313 drivers/media/i2c/tda7432.c 		bass = t->bass->val;
t                 314 drivers/media/i2c/tda7432.c 		treble = t->treble->val;
t                 349 drivers/media/i2c/tda7432.c 	struct tda7432 *t;
t                 355 drivers/media/i2c/tda7432.c 	t = devm_kzalloc(&client->dev, sizeof(*t), GFP_KERNEL);
t                 356 drivers/media/i2c/tda7432.c 	if (!t)
t                 358 drivers/media/i2c/tda7432.c 	sd = &t->sd;
t                 360 drivers/media/i2c/tda7432.c 	v4l2_ctrl_handler_init(&t->hdl, 5);
t                 361 drivers/media/i2c/tda7432.c 	v4l2_ctrl_new_std(&t->hdl, &tda7432_ctrl_ops,
t                 363 drivers/media/i2c/tda7432.c 	t->mute = v4l2_ctrl_new_std(&t->hdl, &tda7432_ctrl_ops,
t                 365 drivers/media/i2c/tda7432.c 	t->balance = v4l2_ctrl_new_std(&t->hdl, &tda7432_ctrl_ops,
t                 367 drivers/media/i2c/tda7432.c 	t->bass = v4l2_ctrl_new_std(&t->hdl, &tda7432_ctrl_ops,
t                 369 drivers/media/i2c/tda7432.c 	t->treble = v4l2_ctrl_new_std(&t->hdl, &tda7432_ctrl_ops,
t                 371 drivers/media/i2c/tda7432.c 	sd->ctrl_handler = &t->hdl;
t                 372 drivers/media/i2c/tda7432.c 	if (t->hdl.error) {
t                 373 drivers/media/i2c/tda7432.c 		int err = t->hdl.error;
t                 375 drivers/media/i2c/tda7432.c 		v4l2_ctrl_handler_free(&t->hdl);
t                 378 drivers/media/i2c/tda7432.c 	v4l2_ctrl_cluster(2, &t->bass);
t                 379 drivers/media/i2c/tda7432.c 	v4l2_ctrl_cluster(2, &t->mute);
t                 380 drivers/media/i2c/tda7432.c 	v4l2_ctrl_handler_setup(&t->hdl);
t                 396 drivers/media/i2c/tda7432.c 	struct tda7432 *t = to_state(sd);
t                 400 drivers/media/i2c/tda7432.c 	v4l2_ctrl_handler_free(&t->hdl);
t                  81 drivers/media/i2c/tda9840.c static int tda9840_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *t)
t                  86 drivers/media/i2c/tda9840.c 	if (t->index)
t                  93 drivers/media/i2c/tda9840.c 		byte = (t->audmode == V4L2_TUNER_MODE_MONO) ?
t                  96 drivers/media/i2c/tda9840.c 		switch (t->audmode) {
t                 113 drivers/media/i2c/tda9840.c static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
t                 120 drivers/media/i2c/tda9840.c 	t->rxsubchans = V4L2_TUNER_SUB_MONO;
t                 124 drivers/media/i2c/tda9840.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO;
t                 127 drivers/media/i2c/tda9840.c 		t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
t                 130 drivers/media/i2c/tda9840.c 		t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
t                 133 drivers/media/i2c/tda9840.c 		t->rxsubchans = V4L2_TUNER_MODE_MONO;
t                  61 drivers/media/i2c/ths8200.c static inline unsigned htotal(const struct v4l2_bt_timings *t)
t                  63 drivers/media/i2c/ths8200.c 	return V4L2_DV_BT_FRAME_WIDTH(t);
t                  66 drivers/media/i2c/ths8200.c static inline unsigned vtotal(const struct v4l2_bt_timings *t)
t                  68 drivers/media/i2c/ths8200.c 	return V4L2_DV_BT_FRAME_HEIGHT(t);
t                 319 drivers/media/i2c/tvaudio.c static void chip_thread_wake(struct timer_list *t)
t                 321 drivers/media/i2c/tvaudio.c 	struct CHIPSTATE *chip = from_timer(chip, t, wt);
t                 441 drivers/media/i2c/tvaudio.c 	int t = chip->shadow.bytes[TDA9840_SW + 1] & ~0x7e;
t                 445 drivers/media/i2c/tvaudio.c 		t |= TDA9840_MONO;
t                 448 drivers/media/i2c/tvaudio.c 		t |= TDA9840_STEREO;
t                 451 drivers/media/i2c/tvaudio.c 		t |= TDA9840_DUALA;
t                 454 drivers/media/i2c/tvaudio.c 		t |= TDA9840_DUALB;
t                 457 drivers/media/i2c/tvaudio.c 		t |= TDA9840_DUALAB;
t                 464 drivers/media/i2c/tvaudio.c 		chip_write(chip, TDA9840_SW, t);
t                1451 drivers/media/i2c/tvaudio.c 	audiocmd *t = NULL;
t                1457 drivers/media/i2c/tvaudio.c 		t = &ta8874z_mono;
t                1460 drivers/media/i2c/tvaudio.c 		t = &ta8874z_stereo;
t                1463 drivers/media/i2c/tvaudio.c 		t = &ta8874z_main;
t                1466 drivers/media/i2c/tvaudio.c 		t = &ta8874z_sub;
t                1469 drivers/media/i2c/tvaudio.c 		t = &ta8874z_both;
t                1476 drivers/media/i2c/tvaudio.c 		chip_cmd(chip, "TA8874Z", t);
t                 559 drivers/media/i2c/tvp7002.c 		const struct v4l2_bt_timings *t = &tvp7002_timings[i].timings.bt;
t                 561 drivers/media/i2c/tvp7002.c 		if (!memcmp(bt, t, &bt->standards - &bt->width)) {
t                  55 drivers/media/pci/bt8xx/bttv-audio-hook.c void gvbctv3pci_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                  61 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                  62 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                  71 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                  86 drivers/media/pci/bt8xx/bttv-audio-hook.c void gvbctv5pci_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                  95 drivers/media/pci/bt8xx/bttv-audio-hook.c 		switch (t->audmode) {
t                 114 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->rxsubchans = V4L2_TUNER_SUB_LANG1 |  V4L2_TUNER_SUB_LANG2;
t                 115 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->audmode = V4L2_TUNER_MODE_LANG1_LANG2;
t                 118 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->rxsubchans = V4L2_TUNER_SUB_LANG2;
t                 119 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->audmode = V4L2_TUNER_MODE_LANG1_LANG2;
t                 122 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->rxsubchans = V4L2_TUNER_SUB_LANG1;
t                 123 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->audmode = V4L2_TUNER_MODE_LANG1_LANG2;
t                 126 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->rxsubchans = V4L2_TUNER_SUB_STEREO;
t                 127 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->audmode = V4L2_TUNER_MODE_STEREO;
t                 130 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->rxsubchans = V4L2_TUNER_SUB_MONO;
t                 131 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->audmode = V4L2_TUNER_MODE_MONO;
t                 134 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 138 drivers/media/pci/bt8xx/bttv-audio-hook.c 			t->audmode = V4L2_TUNER_MODE_LANG1;
t                 157 drivers/media/pci/bt8xx/bttv-audio-hook.c void avermedia_tvphone_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 163 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 164 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 172 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 188 drivers/media/pci/bt8xx/bttv-audio-hook.c void avermedia_tv_stereo_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 194 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 195 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 203 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 221 drivers/media/pci/bt8xx/bttv-audio-hook.c void lt9415_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 226 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_MONO;
t                 232 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 233 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 241 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 259 drivers/media/pci/bt8xx/bttv-audio-hook.c void terratv_audio(struct bttv *btv,  struct v4l2_tuner *t, int set)
t                 265 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 266 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 275 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 292 drivers/media/pci/bt8xx/bttv-audio-hook.c void winfast2000_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 298 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 299 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 308 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 336 drivers/media/pci/bt8xx/bttv-audio-hook.c void pvbt878p9b_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 345 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 346 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 354 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 377 drivers/media/pci/bt8xx/bttv-audio-hook.c void fv2000s_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 386 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 387 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 395 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 416 drivers/media/pci/bt8xx/bttv-audio-hook.c void windvr_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 422 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 423 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 431 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                 451 drivers/media/pci/bt8xx/bttv-audio-hook.c void adtvk503_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
t                 459 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 460 drivers/media/pci/bt8xx/bttv-audio-hook.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO |
t                 469 drivers/media/pci/bt8xx/bttv-audio-hook.c 	switch (t->audmode) {
t                1787 drivers/media/pci/bt8xx/bttv-driver.c 					const struct v4l2_tuner *t)
t                1792 drivers/media/pci/bt8xx/bttv-driver.c 	if (t->index)
t                1795 drivers/media/pci/bt8xx/bttv-driver.c 	bttv_call_all(btv, tuner, s_tuner, t);
t                1798 drivers/media/pci/bt8xx/bttv-driver.c 		struct v4l2_tuner copy = *t;
t                2720 drivers/media/pci/bt8xx/bttv-driver.c 				struct v4l2_tuner *t)
t                2725 drivers/media/pci/bt8xx/bttv-driver.c 	if (0 != t->index)
t                2728 drivers/media/pci/bt8xx/bttv-driver.c 	t->rxsubchans = V4L2_TUNER_SUB_MONO;
t                2729 drivers/media/pci/bt8xx/bttv-driver.c 	t->capability = V4L2_TUNER_CAP_NORM;
t                2730 drivers/media/pci/bt8xx/bttv-driver.c 	bttv_call_all(btv, tuner, g_tuner, t);
t                2731 drivers/media/pci/bt8xx/bttv-driver.c 	strscpy(t->name, "Television", sizeof(t->name));
t                2732 drivers/media/pci/bt8xx/bttv-driver.c 	t->type       = V4L2_TUNER_ANALOG_TV;
t                2734 drivers/media/pci/bt8xx/bttv-driver.c 		t->signal = 0xffff;
t                2737 drivers/media/pci/bt8xx/bttv-driver.c 		btv->audio_mode_gpio(btv, t, 0);
t                3197 drivers/media/pci/bt8xx/bttv-driver.c static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
t                3202 drivers/media/pci/bt8xx/bttv-driver.c 	if (0 != t->index)
t                3204 drivers/media/pci/bt8xx/bttv-driver.c 	strscpy(t->name, "Radio", sizeof(t->name));
t                3205 drivers/media/pci/bt8xx/bttv-driver.c 	t->type = V4L2_TUNER_RADIO;
t                3208 drivers/media/pci/bt8xx/bttv-driver.c 	bttv_call_all(btv, tuner, g_tuner, t);
t                3211 drivers/media/pci/bt8xx/bttv-driver.c 		btv->audio_mode_gpio(btv, t, 0);
t                3214 drivers/media/pci/bt8xx/bttv-driver.c 		return snd_tea575x_g_tuner(&btv->tea, t);
t                3220 drivers/media/pci/bt8xx/bttv-driver.c 					const struct v4l2_tuner *t)
t                3225 drivers/media/pci/bt8xx/bttv-driver.c 	if (0 != t->index)
t                3229 drivers/media/pci/bt8xx/bttv-driver.c 	bttv_call_all(btv, tuner, s_tuner, t);
t                3597 drivers/media/pci/bt8xx/bttv-driver.c static void bttv_irq_timeout(struct timer_list *t)
t                3599 drivers/media/pci/bt8xx/bttv-driver.c 	struct bttv *btv = from_timer(btv, t, timeout);
t                 127 drivers/media/pci/bt8xx/bttv-input.c static void bttv_input_timer(struct timer_list *t)
t                 129 drivers/media/pci/bt8xx/bttv-input.c 	struct bttv_ir *ir = from_timer(ir, t, timer);
t                 183 drivers/media/pci/bt8xx/bttv-input.c static void bttv_rc5_timer_end(struct timer_list *t)
t                 185 drivers/media/pci/bt8xx/bttv-input.c 	struct bttv_ir *ir = from_timer(ir, t, timer);
t                 678 drivers/media/pci/cx18/cx18-fileops.c void cx18_vb_timeout(struct timer_list *t)
t                 680 drivers/media/pci/cx18/cx18-fileops.c 	struct cx18_stream *s = from_timer(s, t, vb_timeout);
t                  23 drivers/media/pci/cx18/cx18-fileops.h void cx18_vb_timeout(struct timer_list *t);
t                1266 drivers/media/pci/cx23885/cx23885-417.c 				struct v4l2_tuner *t)
t                1272 drivers/media/pci/cx23885/cx23885-417.c 	if (0 != t->index)
t                1274 drivers/media/pci/cx23885/cx23885-417.c 	strscpy(t->name, "Television", sizeof(t->name));
t                1275 drivers/media/pci/cx23885/cx23885-417.c 	call_all(dev, tuner, g_tuner, t);
t                1277 drivers/media/pci/cx23885/cx23885-417.c 	dprintk(1, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
t                1283 drivers/media/pci/cx23885/cx23885-417.c 				const struct v4l2_tuner *t)
t                1291 drivers/media/pci/cx23885/cx23885-417.c 	call_all(dev, tuner, s_tuner, t);
t                 882 drivers/media/pci/cx23885/cx23885-video.c 				struct v4l2_tuner *t)
t                 888 drivers/media/pci/cx23885/cx23885-video.c 	if (0 != t->index)
t                 891 drivers/media/pci/cx23885/cx23885-video.c 	strscpy(t->name, "Television", sizeof(t->name));
t                 893 drivers/media/pci/cx23885/cx23885-video.c 	call_all(dev, tuner, g_tuner, t);
t                 898 drivers/media/pci/cx23885/cx23885-video.c 				const struct v4l2_tuner *t)
t                 904 drivers/media/pci/cx23885/cx23885-video.c 	if (0 != t->index)
t                 907 drivers/media/pci/cx23885/cx23885-video.c 	call_all(dev, tuner, s_tuner, t);
t                 976 drivers/media/pci/cx88/cx88-blackbird.c 			  struct v4l2_tuner *t)
t                 984 drivers/media/pci/cx88/cx88-blackbird.c 	if (t->index != 0)
t                 987 drivers/media/pci/cx88/cx88-blackbird.c 	strscpy(t->name, "Television", sizeof(t->name));
t                 988 drivers/media/pci/cx88/cx88-blackbird.c 	t->capability = V4L2_TUNER_CAP_NORM;
t                 989 drivers/media/pci/cx88/cx88-blackbird.c 	t->rangehigh  = 0xffffffffUL;
t                 990 drivers/media/pci/cx88/cx88-blackbird.c 	call_all(core, tuner, g_tuner, t);
t                 992 drivers/media/pci/cx88/cx88-blackbird.c 	cx88_get_stereo(core, t);
t                 994 drivers/media/pci/cx88/cx88-blackbird.c 	t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
t                 999 drivers/media/pci/cx88/cx88-blackbird.c 			  const struct v4l2_tuner *t)
t                1006 drivers/media/pci/cx88/cx88-blackbird.c 	if (t->index != 0)
t                1009 drivers/media/pci/cx88/cx88-blackbird.c 	cx88_set_stereo(core, t->audmode, 1);
t                 808 drivers/media/pci/cx88/cx88-tvaudio.c void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
t                 826 drivers/media/pci/cx88/cx88-tvaudio.c 	t->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_SAP |
t                 828 drivers/media/pci/cx88/cx88-tvaudio.c 	t->rxsubchans = UNSET;
t                 829 drivers/media/pci/cx88/cx88-tvaudio.c 	t->audmode = V4L2_TUNER_MODE_MONO;
t                 833 drivers/media/pci/cx88/cx88-tvaudio.c 		t->audmode = V4L2_TUNER_MODE_STEREO;
t                 836 drivers/media/pci/cx88/cx88-tvaudio.c 		t->audmode = V4L2_TUNER_MODE_LANG2;
t                 839 drivers/media/pci/cx88/cx88-tvaudio.c 		t->audmode = V4L2_TUNER_MODE_MONO;
t                 842 drivers/media/pci/cx88/cx88-tvaudio.c 		t->audmode = V4L2_TUNER_MODE_SAP;
t                 853 drivers/media/pci/cx88/cx88-tvaudio.c 			t->rxsubchans = cx88_dsp_detect_stereo_sap(core);
t                 868 drivers/media/pci/cx88/cx88-tvaudio.c 	if (t->rxsubchans == UNSET) {
t                 869 drivers/media/pci/cx88/cx88-tvaudio.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO;
t                 874 drivers/media/pci/cx88/cx88-tvaudio.c 		if (t->audmode == V4L2_TUNER_MODE_STEREO)
t                 875 drivers/media/pci/cx88/cx88-tvaudio.c 			t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
t                 990 drivers/media/pci/cx88/cx88-tvaudio.c 	struct v4l2_tuner t;
t                1011 drivers/media/pci/cx88/cx88-tvaudio.c 			memset(&t, 0, sizeof(t));
t                1012 drivers/media/pci/cx88/cx88-tvaudio.c 			cx88_get_stereo(core, &t);
t                1019 drivers/media/pci/cx88/cx88-tvaudio.c 			if (t.rxsubchans & V4L2_TUNER_SUB_STEREO)
t                 909 drivers/media/pci/cx88/cx88-video.c 			  struct v4l2_tuner *t)
t                 917 drivers/media/pci/cx88/cx88-video.c 	if (t->index != 0)
t                 920 drivers/media/pci/cx88/cx88-video.c 	strscpy(t->name, "Television", sizeof(t->name));
t                 921 drivers/media/pci/cx88/cx88-video.c 	t->capability = V4L2_TUNER_CAP_NORM;
t                 922 drivers/media/pci/cx88/cx88-video.c 	t->rangehigh  = 0xffffffffUL;
t                 923 drivers/media/pci/cx88/cx88-video.c 	call_all(core, tuner, g_tuner, t);
t                 925 drivers/media/pci/cx88/cx88-video.c 	cx88_get_stereo(core, t);
t                 927 drivers/media/pci/cx88/cx88-video.c 	t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
t                 932 drivers/media/pci/cx88/cx88-video.c 			  const struct v4l2_tuner *t)
t                 939 drivers/media/pci/cx88/cx88-video.c 	if (t->index != 0)
t                 942 drivers/media/pci/cx88/cx88-video.c 	cx88_set_stereo(core, t->audmode, 1);
t                1025 drivers/media/pci/cx88/cx88-video.c 			 struct v4l2_tuner *t)
t                1030 drivers/media/pci/cx88/cx88-video.c 	if (unlikely(t->index > 0))
t                1033 drivers/media/pci/cx88/cx88-video.c 	strscpy(t->name, "Radio", sizeof(t->name));
t                1035 drivers/media/pci/cx88/cx88-video.c 	call_all(core, tuner, g_tuner, t);
t                1040 drivers/media/pci/cx88/cx88-video.c 			 const struct v4l2_tuner *t)
t                1045 drivers/media/pci/cx88/cx88-video.c 	if (t->index != 0)
t                1048 drivers/media/pci/cx88/cx88-video.c 	call_all(core, tuner, s_tuner, t);
t                 685 drivers/media/pci/cx88/cx88.h void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t);
t                1065 drivers/media/pci/ivtv/ivtv-irq.c void ivtv_unfinished_dma(struct timer_list *t)
t                1067 drivers/media/pci/ivtv/ivtv-irq.c 	struct ivtv *itv = from_timer(itv, t, dma_timer);
t                  39 drivers/media/pci/ivtv/ivtv-irq.h void ivtv_unfinished_dma(struct timer_list *t);
t                 632 drivers/media/pci/netup_unidvb/netup_unidvb_core.c static void netup_unidvb_dma_timeout(struct timer_list *t)
t                 634 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 	struct netup_dma *dma = from_timer(dma, t, timeout);
t                 100 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 	struct spi_transfer *t;
t                 107 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 	list_for_each_entry(t, &msg->transfers, transfer_list) {
t                 108 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 		tr_size = t->len;
t                 110 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 			u32 frag_offset = t->len - tr_size;
t                 115 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 			if (list_is_last(&t->transfer_list,
t                 117 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 					frag_offset + frag_size == t->len) {
t                 120 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 			if (t->tx_buf) {
t                 122 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 					t->tx_buf + frag_offset,
t                 141 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 				if (t->rx_buf) {
t                 142 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c 					memcpy_fromio(t->rx_buf + frag_offset,
t                 332 drivers/media/pci/saa7134/saa7134-core.c void saa7134_buffer_timeout(struct timer_list *t)
t                 334 drivers/media/pci/saa7134/saa7134-core.c 	struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
t                 432 drivers/media/pci/saa7134/saa7134-input.c static void saa7134_input_timer(struct timer_list *t)
t                 434 drivers/media/pci/saa7134/saa7134-input.c 	struct saa7134_card_ir *ir = from_timer(ir, t, timer);
t                 210 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_TASK_CONDITIONS(t)              (0x000 +t)
t                 211 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_FIELD_HANDLING(t)               (0x001 +t)
t                 212 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_DATA_PATH(t)                    (0x002 +t)
t                 213 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_START1(t)                 (0x004 +t)
t                 214 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_START2(t)                 (0x005 +t)
t                 215 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_STOP1(t)                  (0x006 +t)
t                 216 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_STOP2(t)                  (0x007 +t)
t                 217 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_V_START1(t)                 (0x008 +t)
t                 218 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_V_START2(t)                 (0x009 +t)
t                 219 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_V_STOP1(t)                  (0x00a +t)
t                 220 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_V_STOP2(t)                  (0x00b +t)
t                 221 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_LEN1(t)                   (0x00c +t)
t                 222 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_LEN2(t)                   (0x00d +t)
t                 223 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_V_LEN1(t)                   (0x00e +t)
t                 224 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_V_LEN2(t)                   (0x00f +t)
t                 226 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_H_START1(t)               (0x014 +t)
t                 227 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_H_START2(t)               (0x015 +t)
t                 228 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_H_STOP1(t)                (0x016 +t)
t                 229 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_H_STOP2(t)                (0x017 +t)
t                 230 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_V_START1(t)               (0x018 +t)
t                 231 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_V_START2(t)               (0x019 +t)
t                 232 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_V_STOP1(t)                (0x01a +t)
t                 233 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_V_STOP2(t)                (0x01b +t)
t                 234 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_PIXELS1(t)                (0x01c +t)
t                 235 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_PIXELS2(t)                (0x01d +t)
t                 236 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_LINES1(t)                 (0x01e +t)
t                 237 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VIDEO_LINES2(t)                 (0x01f +t)
t                 239 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_H_PRESCALE(t)                   (0x020 +t)
t                 240 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_ACC_LENGTH(t)                   (0x021 +t)
t                 241 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_LEVEL_CTRL(t)                   (0x022 +t)
t                 242 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_FIR_PREFILTER_CTRL(t)           (0x023 +t)
t                 243 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_LUMA_BRIGHT(t)                  (0x024 +t)
t                 244 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_LUMA_CONTRAST(t)                (0x025 +t)
t                 245 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_CHROMA_SATURATION(t)            (0x026 +t)
t                 246 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_SCALE_INC1(t)             (0x028 +t)
t                 247 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_H_SCALE_INC2(t)             (0x029 +t)
t                 248 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_PHASE_OFFSET_LUMA(t)        (0x02a +t)
t                 249 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_VBI_PHASE_OFFSET_CHROMA(t)      (0x02b +t)
t                 250 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_H_SCALE_INC1(t)                 (0x02c +t)
t                 251 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_H_SCALE_INC2(t)                 (0x02d +t)
t                 252 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_H_PHASE_OFF_LUMA(t)             (0x02e +t)
t                 253 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_H_PHASE_OFF_CHROMA(t)           (0x02f +t)
t                 254 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_SCALE_RATIO1(t)               (0x030 +t)
t                 255 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_SCALE_RATIO2(t)               (0x031 +t)
t                 256 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_FILTER(t)                     (0x032 +t)
t                 257 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_PHASE_OFFSET0(t)              (0x034 +t)
t                 258 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_PHASE_OFFSET1(t)              (0x035 +t)
t                 259 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_PHASE_OFFSET2(t)              (0x036 +t)
t                 260 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_V_PHASE_OFFSET3(t)              (0x037 +t)
t                1681 drivers/media/pci/saa7134/saa7134-video.c 					struct v4l2_tuner *t)
t                1686 drivers/media/pci/saa7134/saa7134-video.c 	if (0 != t->index)
t                1688 drivers/media/pci/saa7134/saa7134-video.c 	memset(t, 0, sizeof(*t));
t                1697 drivers/media/pci/saa7134/saa7134-video.c 		strscpy(t->name, "Television", sizeof(t->name));
t                1698 drivers/media/pci/saa7134/saa7134-video.c 		t->type = V4L2_TUNER_ANALOG_TV;
t                1699 drivers/media/pci/saa7134/saa7134-video.c 		saa_call_all(dev, tuner, g_tuner, t);
t                1700 drivers/media/pci/saa7134/saa7134-video.c 		t->capability = V4L2_TUNER_CAP_NORM |
t                1704 drivers/media/pci/saa7134/saa7134-video.c 		t->rxsubchans = saa7134_tvaudio_getstereo(dev);
t                1705 drivers/media/pci/saa7134/saa7134-video.c 		t->audmode = saa7134_tvaudio_rx2mode(t->rxsubchans);
t                1708 drivers/media/pci/saa7134/saa7134-video.c 		t->signal = 0xffff;
t                1714 drivers/media/pci/saa7134/saa7134-video.c 					const struct v4l2_tuner *t)
t                1719 drivers/media/pci/saa7134/saa7134-video.c 	if (0 != t->index)
t                1727 drivers/media/pci/saa7134/saa7134-video.c 	if (mode != t->audmode)
t                1728 drivers/media/pci/saa7134/saa7134-video.c 		dev->thread.mode = t->audmode;
t                1876 drivers/media/pci/saa7134/saa7134-video.c 					struct v4l2_tuner *t)
t                1880 drivers/media/pci/saa7134/saa7134-video.c 	if (0 != t->index)
t                1883 drivers/media/pci/saa7134/saa7134-video.c 	strscpy(t->name, "Radio", sizeof(t->name));
t                1885 drivers/media/pci/saa7134/saa7134-video.c 	saa_call_all(dev, tuner, g_tuner, t);
t                1886 drivers/media/pci/saa7134/saa7134-video.c 	t->audmode &= V4L2_TUNER_MODE_MONO | V4L2_TUNER_MODE_STEREO;
t                1888 drivers/media/pci/saa7134/saa7134-video.c 		t->signal = 0xf800 - ((saa_readb(0x581) & 0x1f) << 11);
t                1889 drivers/media/pci/saa7134/saa7134-video.c 		t->rxsubchans = (saa_readb(0x529) & 0x08) ?
t                1895 drivers/media/pci/saa7134/saa7134-video.c 					const struct v4l2_tuner *t)
t                1899 drivers/media/pci/saa7134/saa7134-video.c 	if (0 != t->index)
t                1902 drivers/media/pci/saa7134/saa7134-video.c 	saa_call_all(dev, tuner, s_tuner, t);
t                 770 drivers/media/pci/saa7134/saa7134.h void saa7134_buffer_timeout(struct timer_list *t);
t                 819 drivers/media/pci/saa7134/saa7134.h 					struct v4l2_tuner *t);
t                 821 drivers/media/pci/saa7134/saa7134.h 					const struct v4l2_tuner *t);
t                 175 drivers/media/pci/saa7146/mxb.c 	struct v4l2_tuner t = {
t                 179 drivers/media/pci/saa7146/mxb.c 	tda9840_call(mxb, tuner, s_tuner, &t);
t                 530 drivers/media/pci/saa7146/mxb.c static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
t                 535 drivers/media/pci/saa7146/mxb.c 	if (t->index) {
t                 537 drivers/media/pci/saa7146/mxb.c 		      t->index);
t                 541 drivers/media/pci/saa7146/mxb.c 	DEB_EE("VIDIOC_G_TUNER: %d\n", t->index);
t                 543 drivers/media/pci/saa7146/mxb.c 	memset(t, 0, sizeof(*t));
t                 544 drivers/media/pci/saa7146/mxb.c 	strscpy(t->name, "TV Tuner", sizeof(t->name));
t                 545 drivers/media/pci/saa7146/mxb.c 	t->type = V4L2_TUNER_ANALOG_TV;
t                 546 drivers/media/pci/saa7146/mxb.c 	t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
t                 548 drivers/media/pci/saa7146/mxb.c 	t->audmode = mxb->cur_mode;
t                 549 drivers/media/pci/saa7146/mxb.c 	return call_all(dev, tuner, g_tuner, t);
t                 552 drivers/media/pci/saa7146/mxb.c static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *t)
t                 557 drivers/media/pci/saa7146/mxb.c 	if (t->index) {
t                 559 drivers/media/pci/saa7146/mxb.c 		      t->index);
t                 563 drivers/media/pci/saa7146/mxb.c 	mxb->cur_mode = t->audmode;
t                 564 drivers/media/pci/saa7146/mxb.c 	return call_all(dev, tuner, s_tuner, t);
t                 922 drivers/media/pci/saa7164/saa7164-api.c 	struct tmComResDescrHeader *hdr, *t;
t                1060 drivers/media/pci/saa7164/saa7164-api.c 			t = (struct tmComResDescrHeader *)
t                1064 drivers/media/pci/saa7164/saa7164-api.c 				t = (struct tmComResDescrHeader *)
t                1066 drivers/media/pci/saa7164/saa7164-api.c 				switch (t->subtype) {
t                1069 drivers/media/pci/saa7164/saa7164-api.c 					(struct tmComResTSFormatDescrHeader *)t;
t                1081 drivers/media/pci/saa7164/saa7164-api.c 					(struct tmComResPSFormatDescrHeader *)t;
t                1093 drivers/media/pci/saa7164/saa7164-api.c 					(struct tmComResVBIFormatDescrHeader *)t;
t                1120 drivers/media/pci/saa7164/saa7164-api.c 						t->subtype);
t                1122 drivers/media/pci/saa7164/saa7164-api.c 				next_offset += t->len;
t                1483 drivers/media/pci/saa7164/saa7164-api.c 	struct tmComResGPIO t;
t                1491 drivers/media/pci/saa7164/saa7164-api.c 	t.pin = pin;
t                1492 drivers/media/pci/saa7164/saa7164-api.c 	t.state = state;
t                1495 drivers/media/pci/saa7164/saa7164-api.c 		EXU_GPIO_CONTROL, sizeof(t), &t);
t                 309 drivers/media/pci/saa7164/saa7164-encoder.c int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
t                 315 drivers/media/pci/saa7164/saa7164-encoder.c 	if (0 != t->index)
t                 318 drivers/media/pci/saa7164/saa7164-encoder.c 	strscpy(t->name, "tuner", sizeof(t->name));
t                 319 drivers/media/pci/saa7164/saa7164-encoder.c 	t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
t                 320 drivers/media/pci/saa7164/saa7164-encoder.c 	t->rangelow = SAA7164_TV_MIN_FREQ;
t                 321 drivers/media/pci/saa7164/saa7164-encoder.c 	t->rangehigh = SAA7164_TV_MAX_FREQ;
t                 323 drivers/media/pci/saa7164/saa7164-encoder.c 	dprintk(DBGLVL_ENC, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
t                 329 drivers/media/pci/saa7164/saa7164-encoder.c 			   const struct v4l2_tuner *t)
t                 331 drivers/media/pci/saa7164/saa7164-encoder.c 	if (0 != t->index)
t                 590 drivers/media/pci/saa7164/saa7164.h int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
t                 591 drivers/media/pci/saa7164/saa7164.h int saa7164_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t);
t                1046 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c 	struct v4l2_fract *t = &sp->parm.capture.timeperframe;
t                1052 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c 	solo_enc->interval = calc_interval(fps, t->numerator, t->denominator);
t                 311 drivers/media/pci/ttpci/av7110_v4l.c static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
t                 318 drivers/media/pci/ttpci/av7110_v4l.c 	dprintk(2, "VIDIOC_G_TUNER: %d\n", t->index);
t                 320 drivers/media/pci/ttpci/av7110_v4l.c 	if (!av7110->analog_tuner_flags || t->index != 0)
t                 323 drivers/media/pci/ttpci/av7110_v4l.c 	memset(t, 0, sizeof(*t));
t                 324 drivers/media/pci/ttpci/av7110_v4l.c 	strscpy((char *)t->name, "Television", sizeof(t->name));
t                 326 drivers/media/pci/ttpci/av7110_v4l.c 	t->type = V4L2_TUNER_ANALOG_TV;
t                 327 drivers/media/pci/ttpci/av7110_v4l.c 	t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
t                 329 drivers/media/pci/ttpci/av7110_v4l.c 	t->rangelow = 772;	/* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */
t                 330 drivers/media/pci/ttpci/av7110_v4l.c 	t->rangehigh = 13684;	/* 855.25 MHz / 62.5 kHz = 13684 */
t                 332 drivers/media/pci/ttpci/av7110_v4l.c 	t->signal = 0xffff;
t                 333 drivers/media/pci/ttpci/av7110_v4l.c 	t->afc = 0;
t                 343 drivers/media/pci/ttpci/av7110_v4l.c 		t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
t                 344 drivers/media/pci/ttpci/av7110_v4l.c 		t->audmode = V4L2_TUNER_MODE_STEREO;
t                 347 drivers/media/pci/ttpci/av7110_v4l.c 		t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
t                 348 drivers/media/pci/ttpci/av7110_v4l.c 		t->audmode = V4L2_TUNER_MODE_LANG1;
t                 350 drivers/media/pci/ttpci/av7110_v4l.c 		t->rxsubchans = V4L2_TUNER_SUB_MONO;
t                 355 drivers/media/pci/ttpci/av7110_v4l.c static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *t)
t                 360 drivers/media/pci/ttpci/av7110_v4l.c 	dprintk(2, "VIDIOC_S_TUNER: %d\n", t->index);
t                 365 drivers/media/pci/ttpci/av7110_v4l.c 	switch (t->audmode) {
t                 799 drivers/media/pci/tw5864/tw5864-video.c 	struct v4l2_fract *t = &sp->parm.capture.timeperframe;
t                 807 drivers/media/pci/tw5864/tw5864-video.c 	if (!t->numerator || !t->denominator) {
t                 808 drivers/media/pci/tw5864/tw5864-video.c 		t->numerator = time_base.numerator * input->frame_interval;
t                 809 drivers/media/pci/tw5864/tw5864-video.c 		t->denominator = time_base.denominator;
t                 810 drivers/media/pci/tw5864/tw5864-video.c 	} else if (t->denominator != time_base.denominator) {
t                 811 drivers/media/pci/tw5864/tw5864-video.c 		t->numerator = t->numerator * time_base.denominator /
t                 812 drivers/media/pci/tw5864/tw5864-video.c 			t->denominator;
t                 813 drivers/media/pci/tw5864/tw5864-video.c 		t->denominator = time_base.denominator;
t                 816 drivers/media/pci/tw5864/tw5864-video.c 	input->frame_interval = t->numerator / time_base.numerator;
t                 126 drivers/media/pci/tw686x/tw686x-core.c static void tw686x_dma_delay(struct timer_list *t)
t                 128 drivers/media/pci/tw686x/tw686x-core.c 	struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer);
t                 405 drivers/media/platform/aspeed-video.c 	u32 t = readl(video->base + reg);
t                 406 drivers/media/platform/aspeed-video.c 	u32 before = t;
t                 408 drivers/media/platform/aspeed-video.c 	t &= ~clear;
t                 409 drivers/media/platform/aspeed-video.c 	t |= bits;
t                 410 drivers/media/platform/aspeed-video.c 	writel(t, video->base + reg);
t                 417 drivers/media/platform/aspeed-video.c 	u32 t = readl(video->base + reg);
t                 419 drivers/media/platform/aspeed-video.c 	dev_dbg(video->dev, "read %03x[%08x]\n", reg, t);
t                 420 drivers/media/platform/aspeed-video.c 	return t;
t                 343 drivers/media/platform/fsl-viu.c static void viu_vid_timeout(struct timer_list *t)
t                 345 drivers/media/platform/fsl-viu.c 	struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
t                  67 drivers/media/platform/omap/omap_vout_vrfb.c 	struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
t                  69 drivers/media/platform/omap/omap_vout_vrfb.c 	t->tx_status = 1;
t                  70 drivers/media/platform/omap/omap_vout_vrfb.c 	wake_up_interruptible(&t->wait);
t                 489 drivers/media/platform/qcom/camss/camss-vfe.c 	struct camss_buffer *t;
t                 491 drivers/media/platform/qcom/camss/camss-vfe.c 	list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
t                 126 drivers/media/platform/rcar_jpu.c #define JCQTN_SHIFT(t)		(((t) - 1) << 1)
t                 130 drivers/media/platform/rcar_jpu.c #define JCHTN_AC_SHIFT(t)	(((t) << 1) - 1)
t                 131 drivers/media/platform/rcar_jpu.c #define JCHTN_DC_SHIFT(t)	(((t) - 1) << 1)
t                 182 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
t                 187 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 	reg &= ~EXYNOS3250_QT_NUM_MASK(t);
t                 188 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 	reg |= (n << EXYNOS3250_QT_NUM_SHIFT(t)) &
t                 189 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 					EXYNOS3250_QT_NUM_MASK(t);
t                 193 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c void exynos3250_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
t                 198 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 	reg &= ~EXYNOS3250_HT_NUM_AC_MASK(t);
t                 200 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 	reg |= (0 << EXYNOS3250_HT_NUM_AC_SHIFT(t)) &
t                 201 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 					EXYNOS3250_HT_NUM_AC_MASK(t);
t                 205 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c void exynos3250_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
t                 210 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 	reg &= ~EXYNOS3250_HT_NUM_DC_MASK(t);
t                 212 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 	reg |= (0 << EXYNOS3250_HT_NUM_DC_SHIFT(t)) &
t                 213 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c 					EXYNOS3250_HT_NUM_DC_MASK(t);
t                  28 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
t                  29 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h void exynos3250_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
t                  30 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h void exynos3250_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
t                 101 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
t                 106 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c 	reg &= ~S5P_QT_NUMt_MASK(t);
t                 107 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c 	reg |= (n << S5P_QT_NUMt_SHIFT(t)) & S5P_QT_NUMt_MASK(t);
t                 111 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
t                 116 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c 	reg &= ~S5P_HT_NUMt_AC_MASK(t);
t                 118 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c 	reg |= (0 << S5P_HT_NUMt_AC_SHIFT(t)) & S5P_HT_NUMt_AC_MASK(t);
t                 122 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
t                 127 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c 	reg &= ~S5P_HT_NUMt_DC_MASK(t);
t                 129 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c 	reg |= (0 << S5P_HT_NUMt_DC_SHIFT(t)) & S5P_HT_NUMt_DC_MASK(t);
t                  33 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
t                  34 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
t                  35 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
t                  34 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_QT_NUMt_SHIFT(t)		(((t) - 1) << 1)
t                  35 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_QT_NUMt_MASK(t)		(0x3 << S5P_QT_NUMt_SHIFT(t))
t                  39 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_HT_NUMt_AC_SHIFT(t)		(((t) << 1) - 1)
t                  40 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_HT_NUMt_AC_MASK(t)		(0x1 << S5P_HT_NUMt_AC_SHIFT(t))
t                  42 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_HT_NUMt_DC_SHIFT(t)		(((t) - 1) << 1)
t                  43 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_HT_NUMt_DC_MASK(t)		(0x1 << S5P_HT_NUMt_DC_SHIFT(t))
t                 420 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_QT_NUM_SHIFT(t)		((((t) - 1) << 1) + 8)
t                 421 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_QT_NUM_MASK(t)		(0x3 << EXYNOS3250_QT_NUM_SHIFT(t))
t                 424 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_HT_NUM_AC_SHIFT(t)		(((t) << 1) - 1)
t                 425 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_HT_NUM_AC_MASK(t)		(0x1 << EXYNOS3250_HT_NUM_AC_SHIFT(t))
t                 427 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_HT_NUM_DC_SHIFT(t)		(((t) - 1) << 1)
t                 428 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_HT_NUM_DC_MASK(t)		(0x1 << EXYNOS3250_HT_NUM_DC_SHIFT(t))
t                 144 drivers/media/platform/s5p-mfc/s5p_mfc.c static void s5p_mfc_watchdog(struct timer_list *t)
t                 146 drivers/media/platform/s5p-mfc/s5p_mfc.c 	struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
t                 148 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
t                 154 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c 		    formats[i].type == t)
t                 100 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
t                 106 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		    formats[i].type == t)
t                1676 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	static unsigned int t[V4L2_MPEG_VIDEO_H264_LEVEL_4_0 + 1] = {
t                1690 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	return t[lvl];
t                1695 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	static unsigned int t[V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 + 1] = {
t                1705 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	return t[lvl];
t                1710 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	static unsigned int t[] = {
t                1725 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	return t[lvl];
t                1730 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	static unsigned int t[V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED + 1] = {
t                1750 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	return t[sar];
t                  61 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c static void c8sectpfe_timer_interrupt(struct timer_list *t)
t                  63 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 	struct c8sectpfei *fei = from_timer(fei, t, timer);
t                 285 drivers/media/radio/radio-cadet.c static void cadet_handler(struct timer_list *t)
t                 287 drivers/media/radio/radio-cadet.c 	struct cadet *dev = from_timer(dev, t, readtimer);
t                 338 drivers/media/radio/radio-wl1273.c 	unsigned long t;
t                 370 drivers/media/radio/radio-wl1273.c 	t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
t                 371 drivers/media/radio/radio-wl1273.c 	if (!t)
t                 374 drivers/media/radio/radio-wl1273.c 	dev_dbg(radio->dev, "WL1273_CHANL_SET: %lu\n", t);
t                 384 drivers/media/radio/radio-wl1273.c 	t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
t                 385 drivers/media/radio/radio-wl1273.c 	if (!t)
t                 389 drivers/media/radio/radio-wl1273.c 	dev_dbg(radio->dev, "WL1273_POWER_ENB_SET: %lu\n", t);
t                 398 drivers/media/radio/radio-wl1273.c 	unsigned long t;
t                 439 drivers/media/radio/radio-wl1273.c 	t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
t                 440 drivers/media/radio/radio-wl1273.c 	if (!t) {
t                 538 drivers/media/radio/wl128x/fmdrv_common.c static void int_timeout_handler(struct timer_list *t)
t                 544 drivers/media/radio/wl128x/fmdrv_common.c 	fmdev = from_timer(fmdev, t, irq_info.timer);
t                 663 drivers/media/rc/ene_ir.c static void ene_tx_irqsim(struct timer_list *t)
t                 665 drivers/media/rc/ene_ir.c 	struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
t                 131 drivers/media/rc/igorplugusb.c static void igorplugusb_timer(struct timer_list *t)
t                 133 drivers/media/rc/igorplugusb.c 	struct igorplugusb *ir = from_timer(ir, t, timer);
t                 866 drivers/media/rc/img-ir/img-ir-hw.c static void img_ir_end_timer(struct timer_list *t)
t                 868 drivers/media/rc/img-ir/img-ir-hw.c 	struct img_ir_priv *priv = from_timer(priv, t, hw.end_timer);
t                 880 drivers/media/rc/img-ir/img-ir-hw.c static void img_ir_suspend_timer(struct timer_list *t)
t                 882 drivers/media/rc/img-ir/img-ir-hw.c 	struct img_ir_priv *priv = from_timer(priv, t, hw.suspend_timer);
t                  66 drivers/media/rc/img-ir/img-ir-raw.c static void img_ir_echo_timer(struct timer_list *t)
t                  68 drivers/media/rc/img-ir/img-ir-raw.c 	struct img_ir_priv *priv = from_timer(priv, t, raw.timer);
t                1061 drivers/media/rc/imon.c static void imon_touch_display_timeout(struct timer_list *t)
t                1063 drivers/media/rc/imon.c 	struct imon_context *ictx = from_timer(ictx, t, ttimer);
t                1554 drivers/media/rc/imon.c 	ktime_t t;
t                1654 drivers/media/rc/imon.c 	t = ktime_get();
t                1657 drivers/media/rc/imon.c 		msec = ktime_ms_delta(t, prev_time);
t                1663 drivers/media/rc/imon.c 	prev_time = t;
t                 110 drivers/media/rc/ir-mce_kbd-decoder.c static void mce_kbd_rx_timeout(struct timer_list *t)
t                 112 drivers/media/rc/ir-mce_kbd-decoder.c 	struct ir_raw_event_ctrl *raw = from_timer(raw, t, mce_kbd.rx_timeout);
t                 555 drivers/media/rc/rc-ir-raw.c static void ir_raw_edge_handle(struct timer_list *t)
t                 557 drivers/media/rc/rc-ir-raw.c 	struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
t                 643 drivers/media/rc/rc-main.c static void ir_timer_keyup(struct timer_list *t)
t                 645 drivers/media/rc/rc-main.c 	struct rc_dev *dev = from_timer(dev, t, timer_keyup);
t                 672 drivers/media/rc/rc-main.c static void ir_timer_repeat(struct timer_list *t)
t                 674 drivers/media/rc/rc-main.c 	struct rc_dev *dev = from_timer(dev, t, timer_repeat);
t                 181 drivers/media/tuners/max2165.c 	u32 t;
t                 204 drivers/media/tuners/max2165.c 	t = priv->tf_balun_low_ref;
t                 205 drivers/media/tuners/max2165.c 	t += (priv->tf_balun_hi_ref - priv->tf_balun_low_ref)
t                 208 drivers/media/tuners/max2165.c 	tf = t;
t                 175 drivers/media/tuners/tuner-xc2028.c #define dump_firm_type(t)	dump_firm_type_and_int_freq(t, 0)
t                 551 drivers/media/tuners/xc4000.c #define dump_firm_type(t)	dump_firm_type_and_int_freq(t, 0)
t                  98 drivers/media/usb/au0828/au0828-dvb.c static void au0828_bulk_timeout(struct timer_list *t)
t                 100 drivers/media/usb/au0828/au0828-dvb.c 	struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
t                 954 drivers/media/usb/au0828/au0828-video.c static void au0828_vid_buffer_timeout(struct timer_list *t)
t                 956 drivers/media/usb/au0828/au0828-video.c 	struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
t                 978 drivers/media/usb/au0828/au0828-video.c static void au0828_vbi_buffer_timeout(struct timer_list *t)
t                 980 drivers/media/usb/au0828/au0828-video.c 	struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
t                1489 drivers/media/usb/au0828/au0828-video.c static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
t                1495 drivers/media/usb/au0828/au0828-video.c 	if (t->index != 0)
t                1505 drivers/media/usb/au0828/au0828-video.c 	strscpy(t->name, "Auvitek tuner", sizeof(t->name));
t                1509 drivers/media/usb/au0828/au0828-video.c 	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
t                1515 drivers/media/usb/au0828/au0828-video.c 				const struct v4l2_tuner *t)
t                1519 drivers/media/usb/au0828/au0828-video.c 	if (t->index != 0)
t                1527 drivers/media/usb/au0828/au0828-video.c 	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
t                1530 drivers/media/usb/au0828/au0828-video.c 	dprintk(1, "VIDIOC_S_TUNER: signal = %x, afc = %x\n", t->signal,
t                1531 drivers/media/usb/au0828/au0828-video.c 		t->afc);
t                1221 drivers/media/usb/cx231xx/cx231xx-video.c int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
t                1231 drivers/media/usb/cx231xx/cx231xx-video.c 	if (0 != t->index)
t                1234 drivers/media/usb/cx231xx/cx231xx-video.c 	strscpy(t->name, "Tuner", sizeof(t->name));
t                1236 drivers/media/usb/cx231xx/cx231xx-video.c 	t->type = V4L2_TUNER_ANALOG_TV;
t                1237 drivers/media/usb/cx231xx/cx231xx-video.c 	t->capability = V4L2_TUNER_CAP_NORM;
t                1238 drivers/media/usb/cx231xx/cx231xx-video.c 	t->rangehigh = 0xffffffffUL;
t                1239 drivers/media/usb/cx231xx/cx231xx-video.c 	t->signal = 0xffff;	/* LOCKED */
t                1240 drivers/media/usb/cx231xx/cx231xx-video.c 	call_all(dev, tuner, g_tuner, t);
t                1245 drivers/media/usb/cx231xx/cx231xx-video.c int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
t                1255 drivers/media/usb/cx231xx/cx231xx-video.c 	if (0 != t->index)
t                1258 drivers/media/usb/cx231xx/cx231xx-video.c 	call_all(dev, tuner, s_tuner, t);
t                1705 drivers/media/usb/cx231xx/cx231xx-video.c static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
t                1709 drivers/media/usb/cx231xx/cx231xx-video.c 	if (t->index)
t                1712 drivers/media/usb/cx231xx/cx231xx-video.c 	strscpy(t->name, "Radio", sizeof(t->name));
t                1714 drivers/media/usb/cx231xx/cx231xx-video.c 	call_all(dev, tuner, g_tuner, t);
t                1718 drivers/media/usb/cx231xx/cx231xx-video.c static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
t                1722 drivers/media/usb/cx231xx/cx231xx-video.c 	if (t->index)
t                1725 drivers/media/usb/cx231xx/cx231xx-video.c 	call_all(dev, tuner, s_tuner, t);
t                 942 drivers/media/usb/cx231xx/cx231xx.h int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
t                 943 drivers/media/usb/cx231xx/cx231xx.h int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t);
t                  54 drivers/media/usb/dvb-usb-v2/dvb_usb.h #define dvb_usb_dbg_usb_control_msg(udev, r, t, v, i, b, l) { \
t                  56 drivers/media/usb/dvb-usb-v2/dvb_usb.h 	if (t == (USB_TYPE_VENDOR | USB_DIR_OUT)) \
t                  61 drivers/media/usb/dvb-usb-v2/dvb_usb.h 			"%s %*ph\n",  __func__, t, r, v & 0xff, v >> 8, \
t                1833 drivers/media/usb/em28xx/em28xx-video.c 			  struct v4l2_tuner *t)
t                1837 drivers/media/usb/em28xx/em28xx-video.c 	if (t->index != 0)
t                1840 drivers/media/usb/em28xx/em28xx-video.c 	strscpy(t->name, "Tuner", sizeof(t->name));
t                1842 drivers/media/usb/em28xx/em28xx-video.c 	v4l2_device_call_all(&dev->v4l2->v4l2_dev, 0, tuner, g_tuner, t);
t                1847 drivers/media/usb/em28xx/em28xx-video.c 			  const struct v4l2_tuner *t)
t                1851 drivers/media/usb/em28xx/em28xx-video.c 	if (t->index != 0)
t                1854 drivers/media/usb/em28xx/em28xx-video.c 	v4l2_device_call_all(&dev->v4l2->v4l2_dev, 0, tuner, s_tuner, t);
t                2089 drivers/media/usb/em28xx/em28xx-video.c 			 struct v4l2_tuner *t)
t                2093 drivers/media/usb/em28xx/em28xx-video.c 	if (unlikely(t->index > 0))
t                2096 drivers/media/usb/em28xx/em28xx-video.c 	strscpy(t->name, "Radio", sizeof(t->name));
t                2098 drivers/media/usb/em28xx/em28xx-video.c 	v4l2_device_call_all(&dev->v4l2->v4l2_dev, 0, tuner, g_tuner, t);
t                2104 drivers/media/usb/em28xx/em28xx-video.c 			 const struct v4l2_tuner *t)
t                2108 drivers/media/usb/em28xx/em28xx-video.c 	if (t->index != 0)
t                2111 drivers/media/usb/em28xx/em28xx-video.c 	v4l2_device_call_all(&dev->v4l2->v4l2_dev, 0, tuner, s_tuner, t);
t                 710 drivers/media/usb/go7007/go7007-v4l2.c 				struct v4l2_tuner *t)
t                 714 drivers/media/usb/go7007/go7007-v4l2.c 	if (t->index != 0)
t                 717 drivers/media/usb/go7007/go7007-v4l2.c 	strscpy(t->name, "Tuner", sizeof(t->name));
t                 718 drivers/media/usb/go7007/go7007-v4l2.c 	return call_all(&go->v4l2_dev, tuner, g_tuner, t);
t                 722 drivers/media/usb/go7007/go7007-v4l2.c 				const struct v4l2_tuner *t)
t                 726 drivers/media/usb/go7007/go7007-v4l2.c 	if (t->index != 0)
t                 729 drivers/media/usb/go7007/go7007-v4l2.c 	return call_all(&go->v4l2_dev, tuner, s_tuner, t);
t                1146 drivers/media/usb/msi2500/msi2500.c 	struct spi_transfer *t;
t                1150 drivers/media/usb/msi2500/msi2500.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                1151 drivers/media/usb/msi2500/msi2500.c 		dev_dbg(dev->dev, "msg=%*ph\n", t->len, t->tx_buf);
t                1153 drivers/media/usb/msi2500/msi2500.c 		data |= ((u8 *)t->tx_buf)[0] << 8;
t                1154 drivers/media/usb/msi2500/msi2500.c 		data |= ((u8 *)t->tx_buf)[1] << 16;
t                1155 drivers/media/usb/msi2500/msi2500.c 		data |= ((u8 *)t->tx_buf)[2] << 24;
t                3555 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_ctl_timeout(struct timer_list *t)
t                3557 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	struct hdw_timer *timer = from_timer(timer, t, timer);
t                4414 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_hdw_quiescent_timeout(struct timer_list *t)
t                4416 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	struct pvr2_hdw *hdw = from_timer(hdw, t, quiescent_timer);
t                4425 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *t)
t                4427 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	struct pvr2_hdw *hdw = from_timer(hdw, t, decoder_stabilization_timer);
t                4436 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_hdw_encoder_wait_timeout(struct timer_list *t)
t                4438 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_wait_timer);
t                4447 drivers/media/usb/pvrusb2/pvrusb2-hdw.c static void pvr2_hdw_encoder_run_timeout(struct timer_list *t)
t                4449 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_run_timer);
t                 125 drivers/media/usb/pvrusb2/pvrusb2-std.c 	v4l2_std_id t;
t                 159 drivers/media/usb/pvrusb2/pvrusb2-std.c 		t = sp->id & cmsk;
t                 160 drivers/media/usb/pvrusb2/pvrusb2-std.c 		if (!t) return 0; // Specific color + modulation system illegal
t                 161 drivers/media/usb/pvrusb2/pvrusb2-std.c 		id |= t;
t                   9 drivers/media/usb/pvrusb2/pvrusb2-util.h #define PVR2_DECOMPOSE_LE(t,i,d) \
t                  11 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i] = (d) & 0xff;\
t                  12 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i+1] = ((d) >> 8) & 0xff;\
t                  13 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i+2] = ((d) >> 16) & 0xff;\
t                  14 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i+3] = ((d) >> 24) & 0xff;\
t                  17 drivers/media/usb/pvrusb2/pvrusb2-util.h #define PVR2_DECOMPOSE_BE(t,i,d) \
t                  19 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i+3] = (d) & 0xff;\
t                  20 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i+2] = ((d) >> 8) & 0xff;\
t                  21 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i+1] = ((d) >> 16) & 0xff;\
t                  22 drivers/media/usb/pvrusb2/pvrusb2-util.h 	(t)[i] = ((d) >> 24) & 0xff;\
t                  25 drivers/media/usb/pvrusb2/pvrusb2-util.h #define PVR2_COMPOSE_LE(t,i) \
t                  26 drivers/media/usb/pvrusb2/pvrusb2-util.h     ((((u32)((t)[i+3])) << 24) | \
t                  27 drivers/media/usb/pvrusb2/pvrusb2-util.h      (((u32)((t)[i+2])) << 16) | \
t                  28 drivers/media/usb/pvrusb2/pvrusb2-util.h      (((u32)((t)[i+1])) << 8) | \
t                  29 drivers/media/usb/pvrusb2/pvrusb2-util.h      ((u32)((t)[i])))
t                  31 drivers/media/usb/pvrusb2/pvrusb2-util.h #define PVR2_COMPOSE_BE(t,i) \
t                  32 drivers/media/usb/pvrusb2/pvrusb2-util.h     ((((u32)((t)[i])) << 24) | \
t                  33 drivers/media/usb/pvrusb2/pvrusb2-util.h      (((u32)((t)[i+1])) << 16) | \
t                  34 drivers/media/usb/pvrusb2/pvrusb2-util.h      (((u32)((t)[i+2])) << 8) | \
t                  35 drivers/media/usb/pvrusb2/pvrusb2-util.h      ((u32)((t)[i+3])))
t                 472 drivers/media/usb/s2255/s2255drv.c static void s2255_timer(struct timer_list *t)
t                 474 drivers/media/usb/s2255/s2255drv.c 	struct s2255_dev *dev = from_timer(dev, t, timer);
t                1173 drivers/media/usb/tm6000/tm6000-video.c 				struct v4l2_tuner *t)
t                1180 drivers/media/usb/tm6000/tm6000-video.c 	if (0 != t->index)
t                1183 drivers/media/usb/tm6000/tm6000-video.c 	strscpy(t->name, "Television", sizeof(t->name));
t                1184 drivers/media/usb/tm6000/tm6000-video.c 	t->type       = V4L2_TUNER_ANALOG_TV;
t                1185 drivers/media/usb/tm6000/tm6000-video.c 	t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
t                1186 drivers/media/usb/tm6000/tm6000-video.c 	t->rangehigh  = 0xffffffffUL;
t                1187 drivers/media/usb/tm6000/tm6000-video.c 	t->rxsubchans = V4L2_TUNER_SUB_STEREO;
t                1189 drivers/media/usb/tm6000/tm6000-video.c 	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
t                1191 drivers/media/usb/tm6000/tm6000-video.c 	t->audmode = dev->amode;
t                1197 drivers/media/usb/tm6000/tm6000-video.c 				const struct v4l2_tuner *t)
t                1204 drivers/media/usb/tm6000/tm6000-video.c 	if (0 != t->index)
t                1207 drivers/media/usb/tm6000/tm6000-video.c 	if (t->audmode > V4L2_TUNER_MODE_STEREO)
t                1210 drivers/media/usb/tm6000/tm6000-video.c 		dev->amode = t->audmode;
t                1211 drivers/media/usb/tm6000/tm6000-video.c 	dprintk(dev, 3, "audio mode: %x\n", t->audmode);
t                1213 drivers/media/usb/tm6000/tm6000-video.c 	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
t                1254 drivers/media/usb/tm6000/tm6000-video.c 					struct v4l2_tuner *t)
t                1259 drivers/media/usb/tm6000/tm6000-video.c 	if (0 != t->index)
t                1262 drivers/media/usb/tm6000/tm6000-video.c 	memset(t, 0, sizeof(*t));
t                1263 drivers/media/usb/tm6000/tm6000-video.c 	strscpy(t->name, "Radio", sizeof(t->name));
t                1264 drivers/media/usb/tm6000/tm6000-video.c 	t->type = V4L2_TUNER_RADIO;
t                1265 drivers/media/usb/tm6000/tm6000-video.c 	t->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
t                1266 drivers/media/usb/tm6000/tm6000-video.c 	t->rxsubchans = V4L2_TUNER_SUB_STEREO;
t                1267 drivers/media/usb/tm6000/tm6000-video.c 	t->audmode = V4L2_TUNER_MODE_STEREO;
t                1269 drivers/media/usb/tm6000/tm6000-video.c 	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
t                1275 drivers/media/usb/tm6000/tm6000-video.c 					const struct v4l2_tuner *t)
t                1280 drivers/media/usb/tm6000/tm6000-video.c 	if (0 != t->index)
t                1282 drivers/media/usb/tm6000/tm6000-video.c 	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
t                  89 drivers/media/v4l2-core/tuner-core.c 	i2c_adapter_id(t->i2c->adapter), t->i2c->addr
t                 240 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = fe->analog_demod_priv;
t                 260 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = fe->analog_demod_priv;
t                 301 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
t                 302 drivers/media/v4l2-core/tuner-core.c 	struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
t                 303 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                 312 drivers/media/v4l2-core/tuner-core.c 	t->type = type;
t                 313 drivers/media/v4l2-core/tuner-core.c 	t->config = new_config;
t                 316 drivers/media/v4l2-core/tuner-core.c 		t->fe.callback = tuner_callback;
t                 320 drivers/media/v4l2-core/tuner-core.c 	tuner_detach(&t->fe);
t                 321 drivers/media/v4l2-core/tuner-core.c 	t->fe.analog_demod_priv = NULL;
t                 323 drivers/media/v4l2-core/tuner-core.c 	switch (t->type) {
t                 326 drivers/media/v4l2-core/tuner-core.c 			   &t->fe, t->i2c->adapter, t->i2c->addr))
t                 331 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
t                 332 drivers/media/v4l2-core/tuner-core.c 				t->i2c->addr, t->config))
t                 337 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(tea5767_attach, &t->fe,
t                 338 drivers/media/v4l2-core/tuner-core.c 				t->i2c->adapter, t->i2c->addr))
t                 340 drivers/media/v4l2-core/tuner-core.c 		t->mode_mask = T_RADIO;
t                 343 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(tea5761_attach, &t->fe,
t                 344 drivers/media/v4l2-core/tuner-core.c 				t->i2c->adapter, t->i2c->addr))
t                 346 drivers/media/v4l2-core/tuner-core.c 		t->mode_mask = T_RADIO;
t                 359 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(simple_tuner_attach, &t->fe,
t                 360 drivers/media/v4l2-core/tuner-core.c 				t->i2c->adapter, t->i2c->addr, t->type))
t                 369 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(simple_tuner_attach, &t->fe,
t                 370 drivers/media/v4l2-core/tuner-core.c 				t->i2c->adapter, t->i2c->addr, t->type))
t                 376 drivers/media/v4l2-core/tuner-core.c 			.i2c_adap  = t->i2c->adapter,
t                 377 drivers/media/v4l2-core/tuner-core.c 			.i2c_addr  = t->i2c->addr,
t                 379 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
t                 386 drivers/media/v4l2-core/tuner-core.c 			   &t->fe, t->i2c->adapter, t->i2c->addr))
t                 392 drivers/media/v4l2-core/tuner-core.c 			.i2c_address = t->i2c->addr,
t                 398 drivers/media/v4l2-core/tuner-core.c 				&t->fe, t->i2c->adapter, &xc5000_cfg))
t                 406 drivers/media/v4l2-core/tuner-core.c 			.i2c_address = t->i2c->addr,
t                 413 drivers/media/v4l2-core/tuner-core.c 				&t->fe, t->i2c->adapter, &xc5000c_cfg))
t                 424 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
t                 425 drivers/media/v4l2-core/tuner-core.c 				t->i2c->adapter, &cfg))
t                 433 drivers/media/v4l2-core/tuner-core.c 			.i2c_address	  = t->i2c->addr,
t                 442 drivers/media/v4l2-core/tuner-core.c 				&t->fe, t->i2c->adapter, &xc4000_cfg))
t                 448 drivers/media/v4l2-core/tuner-core.c 		if (!dvb_attach(simple_tuner_attach, &t->fe,
t                 449 drivers/media/v4l2-core/tuner-core.c 				t->i2c->adapter, t->i2c->addr, t->type))
t                 458 drivers/media/v4l2-core/tuner-core.c 		t->name = fe_tuner_ops->info.name;
t                 460 drivers/media/v4l2-core/tuner-core.c 		t->fe.analog_demod_priv = t;
t                 470 drivers/media/v4l2-core/tuner-core.c 		t->name = analog_ops->info.name;
t                 474 drivers/media/v4l2-core/tuner-core.c 	t->sd.entity.name = t->name;
t                 477 drivers/media/v4l2-core/tuner-core.c 	dprintk("type set to %s\n", t->name);
t                 479 drivers/media/v4l2-core/tuner-core.c 	t->mode_mask = new_mode_mask;
t                 488 drivers/media/v4l2-core/tuner-core.c 		if (V4L2_TUNER_RADIO == t->mode)
t                 489 drivers/media/v4l2-core/tuner-core.c 			set_radio_freq(c, t->radio_freq);
t                 491 drivers/media/v4l2-core/tuner-core.c 			set_tv_freq(c, t->tv_freq);
t                 496 drivers/media/v4l2-core/tuner-core.c 		  t->mode_mask);
t                 500 drivers/media/v4l2-core/tuner-core.c 	dprintk("Tuner attach for type = %d failed.\n", t->type);
t                 501 drivers/media/v4l2-core/tuner-core.c 	t->type = TUNER_ABSENT;
t                 523 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                 532 drivers/media/v4l2-core/tuner-core.c 	if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
t                 533 drivers/media/v4l2-core/tuner-core.c 	    (t->mode_mask & tun_setup->mode_mask))) ||
t                 539 drivers/media/v4l2-core/tuner-core.c 			  t->type, t->mode_mask,
t                 557 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                 558 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                 560 drivers/media/v4l2-core/tuner-core.c 	if (t->type != cfg->tuner)
t                 564 drivers/media/v4l2-core/tuner-core.c 		analog_ops->set_config(&t->fe, cfg->priv);
t                 631 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t;
t                 638 drivers/media/v4l2-core/tuner-core.c 	t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
t                 639 drivers/media/v4l2-core/tuner-core.c 	if (NULL == t)
t                 641 drivers/media/v4l2-core/tuner-core.c 	v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops);
t                 642 drivers/media/v4l2-core/tuner-core.c 	t->i2c = client;
t                 643 drivers/media/v4l2-core/tuner-core.c 	t->name = "(tuner unset)";
t                 644 drivers/media/v4l2-core/tuner-core.c 	t->type = UNSET;
t                 645 drivers/media/v4l2-core/tuner-core.c 	t->audmode = V4L2_TUNER_MODE_STEREO;
t                 646 drivers/media/v4l2-core/tuner-core.c 	t->standby = true;
t                 647 drivers/media/v4l2-core/tuner-core.c 	t->radio_freq = 87.5 * 16000;	/* Initial freq range */
t                 648 drivers/media/v4l2-core/tuner-core.c 	t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
t                 665 drivers/media/v4l2-core/tuner-core.c 					       t->i2c->adapter,
t                 666 drivers/media/v4l2-core/tuner-core.c 					       t->i2c->addr) >= 0) {
t                 667 drivers/media/v4l2-core/tuner-core.c 				t->type = TUNER_TEA5761;
t                 668 drivers/media/v4l2-core/tuner-core.c 				t->mode_mask = T_RADIO;
t                 669 drivers/media/v4l2-core/tuner-core.c 				tuner_lookup(t->i2c->adapter, &radio, &tv);
t                 675 drivers/media/v4l2-core/tuner-core.c 			kfree(t);
t                 683 drivers/media/v4l2-core/tuner-core.c 			if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
t                 684 drivers/media/v4l2-core/tuner-core.c 					       t->i2c->addr) >= 0) {
t                 688 drivers/media/v4l2-core/tuner-core.c 				t->type = TUNER_TDA9887;
t                 689 drivers/media/v4l2-core/tuner-core.c 				t->mode_mask = T_RADIO | T_ANALOG_TV;
t                 695 drivers/media/v4l2-core/tuner-core.c 					       t->i2c->adapter, t->i2c->addr)
t                 697 drivers/media/v4l2-core/tuner-core.c 				t->type = TUNER_TEA5767;
t                 698 drivers/media/v4l2-core/tuner-core.c 				t->mode_mask = T_RADIO;
t                 700 drivers/media/v4l2-core/tuner-core.c 				tuner_lookup(t->i2c->adapter, &radio, &tv);
t                 717 drivers/media/v4l2-core/tuner-core.c 	tuner_lookup(t->i2c->adapter, &radio, &tv);
t                 719 drivers/media/v4l2-core/tuner-core.c 		t->mode_mask = T_ANALOG_TV;
t                 721 drivers/media/v4l2-core/tuner-core.c 			t->mode_mask |= T_RADIO;
t                 722 drivers/media/v4l2-core/tuner-core.c 		dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
t                 728 drivers/media/v4l2-core/tuner-core.c 	t->sd.entity.name = t->name;
t                 734 drivers/media/v4l2-core/tuner-core.c 	if (t->type == TUNER_TDA9887) {
t                 735 drivers/media/v4l2-core/tuner-core.c 		t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
t                 736 drivers/media/v4l2-core/tuner-core.c 		t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
t                 737 drivers/media/v4l2-core/tuner-core.c 		t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
t                 738 drivers/media/v4l2-core/tuner-core.c 		t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG;
t                 739 drivers/media/v4l2-core/tuner-core.c 		ret = media_entity_pads_init(&t->sd.entity,
t                 741 drivers/media/v4l2-core/tuner-core.c 					     &t->pad[0]);
t                 742 drivers/media/v4l2-core/tuner-core.c 		t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
t                 744 drivers/media/v4l2-core/tuner-core.c 		t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
t                 745 drivers/media/v4l2-core/tuner-core.c 		t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
t                 746 drivers/media/v4l2-core/tuner-core.c 		t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
t                 747 drivers/media/v4l2-core/tuner-core.c 		t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG;
t                 748 drivers/media/v4l2-core/tuner-core.c 		t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
t                 749 drivers/media/v4l2-core/tuner-core.c 		t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO;
t                 750 drivers/media/v4l2-core/tuner-core.c 		ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
t                 751 drivers/media/v4l2-core/tuner-core.c 					     &t->pad[0]);
t                 752 drivers/media/v4l2-core/tuner-core.c 		t->sd.entity.function = MEDIA_ENT_F_TUNER;
t                 757 drivers/media/v4l2-core/tuner-core.c 		kfree(t);
t                 762 drivers/media/v4l2-core/tuner-core.c 	if (t->mode_mask & T_ANALOG_TV)
t                 763 drivers/media/v4l2-core/tuner-core.c 		t->mode = V4L2_TUNER_ANALOG_TV;
t                 765 drivers/media/v4l2-core/tuner-core.c 		t->mode = V4L2_TUNER_RADIO;
t                 766 drivers/media/v4l2-core/tuner-core.c 	set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
t                 767 drivers/media/v4l2-core/tuner-core.c 	list_add_tail(&t->list, &tuner_list);
t                 770 drivers/media/v4l2-core/tuner-core.c 		   t->type,
t                 771 drivers/media/v4l2-core/tuner-core.c 		   t->mode_mask & T_RADIO ? " Radio" : "",
t                 772 drivers/media/v4l2-core/tuner-core.c 		   t->mode_mask & T_ANALOG_TV ? " TV" : "");
t                 784 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(i2c_get_clientdata(client));
t                 786 drivers/media/v4l2-core/tuner-core.c 	v4l2_device_unregister_subdev(&t->sd);
t                 787 drivers/media/v4l2-core/tuner-core.c 	tuner_detach(&t->fe);
t                 788 drivers/media/v4l2-core/tuner-core.c 	t->fe.analog_demod_priv = NULL;
t                 790 drivers/media/v4l2-core/tuner-core.c 	list_del(&t->list);
t                 791 drivers/media/v4l2-core/tuner-core.c 	kfree(t);
t                 819 drivers/media/v4l2-core/tuner-core.c static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
t                 827 drivers/media/v4l2-core/tuner-core.c 	if ((t_mode & t->mode_mask) == 0)
t                 842 drivers/media/v4l2-core/tuner-core.c static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
t                 844 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                 846 drivers/media/v4l2-core/tuner-core.c 	if (mode != t->mode) {
t                 847 drivers/media/v4l2-core/tuner-core.c 		if (check_mode(t, mode) == -EINVAL) {
t                 850 drivers/media/v4l2-core/tuner-core.c 			t->standby = true;
t                 852 drivers/media/v4l2-core/tuner-core.c 				analog_ops->standby(&t->fe);
t                 855 drivers/media/v4l2-core/tuner-core.c 		t->mode = mode;
t                 866 drivers/media/v4l2-core/tuner-core.c static void set_freq(struct tuner *t, unsigned int freq)
t                 868 drivers/media/v4l2-core/tuner-core.c 	struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
t                 870 drivers/media/v4l2-core/tuner-core.c 	if (t->mode == V4L2_TUNER_RADIO) {
t                 872 drivers/media/v4l2-core/tuner-core.c 			freq = t->radio_freq;
t                 876 drivers/media/v4l2-core/tuner-core.c 			freq = t->tv_freq;
t                 893 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
t                 894 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                 897 drivers/media/v4l2-core/tuner-core.c 		.mode      = t->mode,
t                 898 drivers/media/v4l2-core/tuner-core.c 		.audmode   = t->audmode,
t                 899 drivers/media/v4l2-core/tuner-core.c 		.std       = t->std
t                 902 drivers/media/v4l2-core/tuner-core.c 	if (t->type == UNSET) {
t                 924 drivers/media/v4l2-core/tuner-core.c 	t->tv_freq = freq;
t                 925 drivers/media/v4l2-core/tuner-core.c 	t->standby = false;
t                 927 drivers/media/v4l2-core/tuner-core.c 	analog_ops->set_params(&t->fe, &params);
t                 944 drivers/media/v4l2-core/tuner-core.c static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
t                1034 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
t                1035 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                1038 drivers/media/v4l2-core/tuner-core.c 		.mode      = t->mode,
t                1039 drivers/media/v4l2-core/tuner-core.c 		.audmode   = t->audmode,
t                1040 drivers/media/v4l2-core/tuner-core.c 		.std       = t->std
t                1043 drivers/media/v4l2-core/tuner-core.c 	if (t->type == UNSET) {
t                1065 drivers/media/v4l2-core/tuner-core.c 	t->radio_freq = freq;
t                1066 drivers/media/v4l2-core/tuner-core.c 	t->standby = false;
t                1068 drivers/media/v4l2-core/tuner-core.c 	analog_ops->set_params(&t->fe, &params);
t                1073 drivers/media/v4l2-core/tuner-core.c 	t->audmode = params.audmode;
t                1089 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = fe->analog_demod_priv;
t                1095 drivers/media/v4l2-core/tuner-core.c 	switch (t->mode) {
t                1107 drivers/media/v4l2-core/tuner-core.c 	if (t->mode == V4L2_TUNER_RADIO) {
t                1108 drivers/media/v4l2-core/tuner-core.c 		freq = t->radio_freq / 16000;
t                1109 drivers/media/v4l2-core/tuner-core.c 		freq_fraction = (t->radio_freq % 16000) * 100 / 16000;
t                1111 drivers/media/v4l2-core/tuner-core.c 		freq = t->tv_freq / 16;
t                1112 drivers/media/v4l2-core/tuner-core.c 		freq_fraction = (t->tv_freq % 16) * 100 / 16;
t                1115 drivers/media/v4l2-core/tuner-core.c 		   t->standby ? " on standby mode" : "");
t                1117 drivers/media/v4l2-core/tuner-core.c 	pr_info("Standard:        0x%08lx\n", (unsigned long)t->std);
t                1118 drivers/media/v4l2-core/tuner-core.c 	if (t->mode != V4L2_TUNER_RADIO)
t                1123 drivers/media/v4l2-core/tuner-core.c 		fe_tuner_ops->get_status(&t->fe, &tuner_status);
t                1143 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1145 drivers/media/v4l2-core/tuner-core.c 	if (set_mode(t, V4L2_TUNER_RADIO) == 0)
t                1146 drivers/media/v4l2-core/tuner-core.c 		set_freq(t, 0);
t                1160 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1161 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                1164 drivers/media/v4l2-core/tuner-core.c 	t->standby = true;
t                1166 drivers/media/v4l2-core/tuner-core.c 		analog_ops->standby(&t->fe);
t                1172 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1174 drivers/media/v4l2-core/tuner-core.c 	if (set_mode(t, V4L2_TUNER_ANALOG_TV))
t                1177 drivers/media/v4l2-core/tuner-core.c 	t->std = tuner_fixup_std(t, std);
t                1178 drivers/media/v4l2-core/tuner-core.c 	if (t->std != std)
t                1179 drivers/media/v4l2-core/tuner-core.c 		dprintk("Fixup standard %llx to %llx\n", std, t->std);
t                1180 drivers/media/v4l2-core/tuner-core.c 	set_freq(t, 0);
t                1186 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1188 drivers/media/v4l2-core/tuner-core.c 	if (set_mode(t, f->type) == 0)
t                1189 drivers/media/v4l2-core/tuner-core.c 		set_freq(t, f->frequency);
t                1205 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1206 drivers/media/v4l2-core/tuner-core.c 	struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
t                1208 drivers/media/v4l2-core/tuner-core.c 	if (check_mode(t, f->type) == -EINVAL)
t                1210 drivers/media/v4l2-core/tuner-core.c 	if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
t                1213 drivers/media/v4l2-core/tuner-core.c 		fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
t                1214 drivers/media/v4l2-core/tuner-core.c 		f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
t                1219 drivers/media/v4l2-core/tuner-core.c 			t->radio_freq : t->tv_freq;
t                1236 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1237 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                1238 drivers/media/v4l2-core/tuner-core.c 	struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
t                1240 drivers/media/v4l2-core/tuner-core.c 	if (check_mode(t, vt->type) == -EINVAL)
t                1242 drivers/media/v4l2-core/tuner-core.c 	if (vt->type == t->mode && analog_ops->get_afc)
t                1243 drivers/media/v4l2-core/tuner-core.c 		analog_ops->get_afc(&t->fe, &vt->afc);
t                1244 drivers/media/v4l2-core/tuner-core.c 	if (vt->type == t->mode && analog_ops->has_signal) {
t                1247 drivers/media/v4l2-core/tuner-core.c 		if (!analog_ops->has_signal(&t->fe, &signal))
t                1258 drivers/media/v4l2-core/tuner-core.c 	if (vt->type == t->mode) {
t                1263 drivers/media/v4l2-core/tuner-core.c 			fe_tuner_ops->get_status(&t->fe, &tuner_status);
t                1269 drivers/media/v4l2-core/tuner-core.c 		vt->audmode = t->audmode;
t                1289 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1291 drivers/media/v4l2-core/tuner-core.c 	if (set_mode(t, vt->type))
t                1294 drivers/media/v4l2-core/tuner-core.c 	if (t->mode == V4L2_TUNER_RADIO) {
t                1295 drivers/media/v4l2-core/tuner-core.c 		t->audmode = vt->audmode;
t                1302 drivers/media/v4l2-core/tuner-core.c 		if (t->audmode != V4L2_TUNER_MODE_MONO &&
t                1303 drivers/media/v4l2-core/tuner-core.c 		    t->audmode != V4L2_TUNER_MODE_STEREO)
t                1304 drivers/media/v4l2-core/tuner-core.c 			t->audmode = V4L2_TUNER_MODE_STEREO;
t                1306 drivers/media/v4l2-core/tuner-core.c 	set_freq(t, 0);
t                1313 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(sd);
t                1314 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                1317 drivers/media/v4l2-core/tuner-core.c 		analog_ops->tuner_status(&t->fe);
t                1325 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
t                1326 drivers/media/v4l2-core/tuner-core.c 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
t                1330 drivers/media/v4l2-core/tuner-core.c 	if (t->fe.ops.tuner_ops.suspend)
t                1331 drivers/media/v4l2-core/tuner-core.c 		t->fe.ops.tuner_ops.suspend(&t->fe);
t                1332 drivers/media/v4l2-core/tuner-core.c 	else if (!t->standby && analog_ops->standby)
t                1333 drivers/media/v4l2-core/tuner-core.c 		analog_ops->standby(&t->fe);
t                1341 drivers/media/v4l2-core/tuner-core.c 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
t                1345 drivers/media/v4l2-core/tuner-core.c 	if (t->fe.ops.tuner_ops.resume)
t                1346 drivers/media/v4l2-core/tuner-core.c 		t->fe.ops.tuner_ops.resume(&t->fe);
t                1347 drivers/media/v4l2-core/tuner-core.c 	else if (!t->standby)
t                1348 drivers/media/v4l2-core/tuner-core.c 		if (set_mode(t, t->mode) == 0)
t                1349 drivers/media/v4l2-core/tuner-core.c 			set_freq(t, 0);
t                 140 drivers/media/v4l2-core/v4l2-dv-timings.c bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
t                 145 drivers/media/v4l2-core/v4l2-dv-timings.c 	const struct v4l2_bt_timings *bt = &t->bt;
t                 149 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (t->type != V4L2_DV_BT_656_1120)
t                 151 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (t->type != dvcap->type ||
t                 164 drivers/media/v4l2-core/v4l2-dv-timings.c 	return fnc == NULL || fnc(t, fnc_handle);
t                 168 drivers/media/v4l2-core/v4l2-dv-timings.c int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
t                 175 drivers/media/v4l2-core/v4l2-dv-timings.c 	memset(t->reserved, 0, sizeof(t->reserved));
t                 179 drivers/media/v4l2-core/v4l2-dv-timings.c 		    idx++ == t->index) {
t                 180 drivers/media/v4l2-core/v4l2-dv-timings.c 			t->timings = v4l2_dv_timings_presets[i];
t                 188 drivers/media/v4l2-core/v4l2-dv-timings.c bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
t                 196 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
t                 202 drivers/media/v4l2-core/v4l2-dv-timings.c 		    v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
t                 204 drivers/media/v4l2-core/v4l2-dv-timings.c 			u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS;
t                 206 drivers/media/v4l2-core/v4l2-dv-timings.c 			*t = v4l2_dv_timings_presets[i];
t                 207 drivers/media/v4l2-core/v4l2-dv-timings.c 			if (can_reduce_fps(&t->bt))
t                 208 drivers/media/v4l2-core/v4l2-dv-timings.c 				t->bt.flags |= flags;
t                 217 drivers/media/v4l2-core/v4l2-dv-timings.c bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
t                 227 drivers/media/v4l2-core/v4l2-dv-timings.c 			*t = v4l2_dv_timings_presets[i];
t                 276 drivers/media/v4l2-core/v4l2-dv-timings.c 			   const struct v4l2_dv_timings *t, bool detailed)
t                 278 drivers/media/v4l2-core/v4l2-dv-timings.c 	const struct v4l2_bt_timings *bt = &t->bt;
t                 282 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (t->type != V4L2_DV_BT_656_1120)
t                 356 drivers/media/v4l2-core/v4l2-dv-timings.c struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
t                 361 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (t->type != V4L2_DV_BT_656_1120)
t                 363 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
t                 366 drivers/media/v4l2-core/v4l2-dv-timings.c 	ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
t                 367 drivers/media/v4l2-core/v4l2-dv-timings.c 	ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
t                 385 drivers/media/v4l2-core/v4l2-dv-timings.c struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t)
t                 387 drivers/media/v4l2-core/v4l2-dv-timings.c 	const struct v4l2_bt_timings *bt = &t->bt;
t                 393 drivers/media/v4l2-core/v4l2-dv-timings.c 	if (t->type != V4L2_DV_BT_656_1120)
t                2584 drivers/media/v4l2-core/v4l2-ioctl.c 		struct v4l2_tuner t = {
t                2591 drivers/media/v4l2-core/v4l2-ioctl.c 		err = ops->vidioc_g_tuner(file, fh, &t);
t                2594 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
t                2595 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangelow = t.rangelow;
t                2596 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangehigh = t.rangehigh;
t                 642 drivers/memory/omap-gpmc.c 	    t->field, (cd), #field) < 0)                       \
t                 705 drivers/memory/omap-gpmc.c int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
t                 711 drivers/memory/omap-gpmc.c 	div = gpmc_calc_divider(t->sync_clk);
t                 732 drivers/memory/omap-gpmc.c 		div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
t                 736 drivers/memory/omap-gpmc.c 			       t->wait_monitoring
t                 795 drivers/memory/omap-gpmc.c 	gpmc_cs_bool_timings(cs, &t->bool_timings);
t                1139 drivers/memory/omap-gpmc.c static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
t                1215 drivers/memory/omap-gpmc.c 	gpmc_calc_timings(t, s, &dev_t);
t                1760 drivers/memory/omap-gpmc.c static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
t                1762 drivers/memory/omap-gpmc.c 	t->cs_on /= 1000;
t                1763 drivers/memory/omap-gpmc.c 	t->cs_rd_off /= 1000;
t                1764 drivers/memory/omap-gpmc.c 	t->cs_wr_off /= 1000;
t                1765 drivers/memory/omap-gpmc.c 	t->adv_on /= 1000;
t                1766 drivers/memory/omap-gpmc.c 	t->adv_rd_off /= 1000;
t                1767 drivers/memory/omap-gpmc.c 	t->adv_wr_off /= 1000;
t                1768 drivers/memory/omap-gpmc.c 	t->we_on /= 1000;
t                1769 drivers/memory/omap-gpmc.c 	t->we_off /= 1000;
t                1770 drivers/memory/omap-gpmc.c 	t->oe_on /= 1000;
t                1771 drivers/memory/omap-gpmc.c 	t->oe_off /= 1000;
t                1772 drivers/memory/omap-gpmc.c 	t->page_burst_access /= 1000;
t                1773 drivers/memory/omap-gpmc.c 	t->access /= 1000;
t                1774 drivers/memory/omap-gpmc.c 	t->rd_cycle /= 1000;
t                1775 drivers/memory/omap-gpmc.c 	t->wr_cycle /= 1000;
t                1776 drivers/memory/omap-gpmc.c 	t->bus_turnaround /= 1000;
t                1777 drivers/memory/omap-gpmc.c 	t->cycle2cycle_delay /= 1000;
t                1778 drivers/memory/omap-gpmc.c 	t->wait_monitoring /= 1000;
t                1779 drivers/memory/omap-gpmc.c 	t->clk_activation /= 1000;
t                1780 drivers/memory/omap-gpmc.c 	t->wr_access /= 1000;
t                1781 drivers/memory/omap-gpmc.c 	t->wr_data_mux_bus /= 1000;
t                1493 drivers/memstick/core/ms_block.c static void msb_cache_flush_timer(struct timer_list *t)
t                1495 drivers/memstick/core/ms_block.c 	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
t                 591 drivers/memstick/host/jmb38x_ms.c static void jmb38x_ms_abort(struct timer_list *t)
t                 593 drivers/memstick/host/jmb38x_ms.c 	struct jmb38x_ms_host *host = from_timer(host, t, timer);
t                 616 drivers/memstick/host/r592.c static void r592_detect_timer(struct timer_list *t)
t                 618 drivers/memstick/host/r592.c 	struct r592_device *dev = from_timer(dev, t, detect_timer);
t                 536 drivers/memstick/host/tifm_ms.c static void tifm_ms_abort(struct timer_list *t)
t                 538 drivers/memstick/host/tifm_ms.c 	struct tifm_ms *host = from_timer(host, t, timer);
t                 587 drivers/message/fusion/lsi/mpi_targ.h #define SET_IO_INDEX(t, i)                                                     \
t                 588 drivers/message/fusion/lsi/mpi_targ.h             ((t) = ((t) & ~TARGET_MODE_REPLY_IO_INDEX_MASK) |                  \
t                 595 drivers/message/fusion/lsi/mpi_targ.h #define SET_INITIATOR_INDEX(t, ii)                                             \
t                 596 drivers/message/fusion/lsi/mpi_targ.h         ((t) = ((t) & ~TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) |               \
t                 603 drivers/message/fusion/lsi/mpi_targ.h #define SET_ALIAS(t, a)  ((t) = ((t) & ~TARGET_MODE_REPLY_ALIAS_MASK) |        \
t                 610 drivers/message/fusion/lsi/mpi_targ.h #define SET_PORT(t, p)  ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) |          \
t                 627 drivers/message/fusion/lsi/mpi_targ.h #define SET_HOST_INDEX_0100(t, hi)                                             \
t                 628 drivers/message/fusion/lsi/mpi_targ.h             ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) |           \
t                 635 drivers/message/fusion/lsi/mpi_targ.h #define SET_IOC_INDEX_0100(t, ii)                                              \
t                 636 drivers/message/fusion/lsi/mpi_targ.h             ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) |            \
t                 644 drivers/message/fusion/lsi/mpi_targ.h #define SET_INITIATOR_INDEX_0100(t, ii)                                        \
t                 645 drivers/message/fusion/lsi/mpi_targ.h         ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) |          \
t                4665 drivers/message/fusion/mptbase.c 	int t;
t                4687 drivers/message/fusion/mptbase.c 	if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
t                4691 drivers/message/fusion/mptbase.c 			ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
t                4703 drivers/message/fusion/mptbase.c 	if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
t                4721 drivers/message/fusion/mptbase.c 			if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
t                4729 drivers/message/fusion/mptbase.c 				ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
t                4734 drivers/message/fusion/mptbase.c 		if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
t                4738 drivers/message/fusion/mptbase.c 				ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
t                4870 drivers/message/fusion/mptbase.c 	int t;
t                4881 drivers/message/fusion/mptbase.c 	if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
t                4886 drivers/message/fusion/mptbase.c 		if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
t                4895 drivers/message/fusion/mptbase.c 			ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
t                4903 drivers/message/fusion/mptbase.c 		if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
t                4912 drivers/message/fusion/mptbase.c 	if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
t                4934 drivers/message/fusion/mptbase.c 			ioc->name, t, u16cnt/2));
t                  57 drivers/mfd/ezx-pcap.c 	struct spi_transfer t;
t                  61 drivers/mfd/ezx-pcap.c 	memset(&t, 0, sizeof(t));
t                  63 drivers/mfd/ezx-pcap.c 	t.len = sizeof(u32);
t                  64 drivers/mfd/ezx-pcap.c 	spi_message_add_tail(&t, &m);
t                  67 drivers/mfd/ezx-pcap.c 	t.tx_buf = (u8 *) &pcap->buf;
t                  68 drivers/mfd/ezx-pcap.c 	t.rx_buf = (u8 *) &pcap->buf;
t                  69 drivers/mfd/mc13xxx-spi.c 	struct spi_transfer t = {
t                  82 drivers/mfd/mc13xxx-spi.c 	spi_message_add_tail(&t, &m);
t                 733 drivers/mfd/menelaus.c 	int t, ret;
t                 746 drivers/mfd/menelaus.c 	t = (GPIO_CTRL_SLPCTLEN | GPIO3_DIR_INPUT);
t                 748 drivers/mfd/menelaus.c 		ret |= t;
t                 750 drivers/mfd/menelaus.c 		ret &= ~t;
t                 828 drivers/mfd/menelaus.c static void menelaus_to_time(char *regs, struct rtc_time *t)
t                 830 drivers/mfd/menelaus.c 	t->tm_sec = bcd2bin(regs[0]);
t                 831 drivers/mfd/menelaus.c 	t->tm_min = bcd2bin(regs[1]);
t                 833 drivers/mfd/menelaus.c 		t->tm_hour = bcd2bin(regs[2] & 0x1f) - 1;
t                 835 drivers/mfd/menelaus.c 			t->tm_hour += 12;
t                 837 drivers/mfd/menelaus.c 		t->tm_hour = bcd2bin(regs[2] & 0x3f);
t                 838 drivers/mfd/menelaus.c 	t->tm_mday = bcd2bin(regs[3]);
t                 839 drivers/mfd/menelaus.c 	t->tm_mon = bcd2bin(regs[4]) - 1;
t                 840 drivers/mfd/menelaus.c 	t->tm_year = bcd2bin(regs[5]) + 100;
t                 843 drivers/mfd/menelaus.c static int time_to_menelaus(struct rtc_time *t, int regnum)
t                 847 drivers/mfd/menelaus.c 	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_sec));
t                 851 drivers/mfd/menelaus.c 	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_min));
t                 856 drivers/mfd/menelaus.c 		hour = t->tm_hour + 1;
t                 862 drivers/mfd/menelaus.c 		hour = bin2bcd(t->tm_hour);
t                 867 drivers/mfd/menelaus.c 	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mday));
t                 871 drivers/mfd/menelaus.c 	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mon + 1));
t                 875 drivers/mfd/menelaus.c 	status = menelaus_write_reg(regnum++, bin2bcd(t->tm_year - 100));
t                 886 drivers/mfd/menelaus.c static int menelaus_read_time(struct device *dev, struct rtc_time *t)
t                 911 drivers/mfd/menelaus.c 	menelaus_to_time(regs, t);
t                 912 drivers/mfd/menelaus.c 	t->tm_wday = bcd2bin(regs[6]);
t                 917 drivers/mfd/menelaus.c static int menelaus_set_time(struct device *dev, struct rtc_time *t)
t                 922 drivers/mfd/menelaus.c 	status = time_to_menelaus(t, MENELAUS_RTC_SEC);
t                 925 drivers/mfd/menelaus.c 	status = menelaus_write_reg(MENELAUS_RTC_WKDAY, bin2bcd(t->tm_wday));
t                  32 drivers/misc/cardreader/rtsx_usb.c static void rtsx_usb_sg_timed_out(struct timer_list *t)
t                  34 drivers/misc/cardreader/rtsx_usb.c 	struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
t                  29 drivers/misc/cb710/debug.c #define CB710_READ_REGS_TEMPLATE(t)					\
t                  30 drivers/misc/cb710/debug.c static void cb710_read_regs_##t(void __iomem *iobase,			\
t                  31 drivers/misc/cb710/debug.c 	u##t *reg, unsigned select)					\
t                  35 drivers/misc/cb710/debug.c 	for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) {	\
t                  39 drivers/misc/cb710/debug.c 		for (j = 0; j < 0x10/(t/8); ++j) {			\
t                  40 drivers/misc/cb710/debug.c 			if (!allow_reg_read(i, j, t))			\
t                  42 drivers/misc/cb710/debug.c 			reg[j] = ioread##t(iobase			\
t                  43 drivers/misc/cb710/debug.c 				+ (i << 4) + (j * (t/8)));		\
t                  53 drivers/misc/cb710/debug.c #define CB710_DUMP_REGS_TEMPLATE(t)					\
t                  54 drivers/misc/cb710/debug.c static void cb710_dump_regs_##t(struct device *dev,			\
t                  55 drivers/misc/cb710/debug.c 	const u##t *reg, unsigned select)				\
t                  57 drivers/misc/cb710/debug.c 	const char *const xp = &cb710_xes[8 - t/4];			\
t                  58 drivers/misc/cb710/debug.c 	const char *const format = cb710_regf_##t;			\
t                  63 drivers/misc/cb710/debug.c 	for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) {	\
t                  67 drivers/misc/cb710/debug.c 		for (j = 0; j < 0x10/(t/8); ++j) {			\
t                  69 drivers/misc/cb710/debug.c 			if (j == 8/(t/8))				\
t                  71 drivers/misc/cb710/debug.c 			if (allow_reg_read(i, j, t))			\
t                  80 drivers/misc/cb710/debug.c #define CB710_READ_AND_DUMP_REGS_TEMPLATE(t)				\
t                  81 drivers/misc/cb710/debug.c static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip,	\
t                  84 drivers/misc/cb710/debug.c 	u##t regs[CB710_REG_COUNT/sizeof(u##t)];			\
t                  87 drivers/misc/cb710/debug.c 	cb710_read_regs_##t(chip->iobase, regs, select);		\
t                  88 drivers/misc/cb710/debug.c 	cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select);	\
t                  91 drivers/misc/cb710/debug.c #define CB710_REG_ACCESS_TEMPLATES(t)		\
t                  92 drivers/misc/cb710/debug.c   CB710_READ_REGS_TEMPLATE(t)			\
t                  93 drivers/misc/cb710/debug.c   CB710_DUMP_REGS_TEMPLATE(t)			\
t                  94 drivers/misc/cb710/debug.c   CB710_READ_AND_DUMP_REGS_TEMPLATE(t)
t                 169 drivers/misc/cs5535-mfgpt.c 		unsigned long t;
t                 172 drivers/misc/cs5535-mfgpt.c 		t = find_first_bit(mfgpt->avail, max);
t                 174 drivers/misc/cs5535-mfgpt.c 		timer_nr = t < max ? (int) t : -1;
t                 266 drivers/misc/cs5535-mfgpt.c 	struct cs5535_mfgpt_timer t;
t                 269 drivers/misc/cs5535-mfgpt.c 		t.nr = i;
t                 271 drivers/misc/cs5535-mfgpt.c 		cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_RESET, 0);
t                 272 drivers/misc/cs5535-mfgpt.c 		cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_RESET, 0);
t                 273 drivers/misc/cs5535-mfgpt.c 		cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_NMI, 0);
t                 274 drivers/misc/cs5535-mfgpt.c 		cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_NMI, 0);
t                 275 drivers/misc/cs5535-mfgpt.c 		cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_IRQ, 0);
t                 276 drivers/misc/cs5535-mfgpt.c 		cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_IRQ, 0);
t                 319 drivers/misc/cs5535-mfgpt.c 	int err = -EIO, t;
t                 353 drivers/misc/cs5535-mfgpt.c 	t = scan_timers(&cs5535_mfgpt_chip);
t                 354 drivers/misc/cs5535-mfgpt.c 	dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
t                  70 drivers/misc/eeprom/at25.c 	struct spi_transfer	t[2];
t                 103 drivers/misc/eeprom/at25.c 	memset(t, 0, sizeof(t));
t                 105 drivers/misc/eeprom/at25.c 	t[0].tx_buf = command;
t                 106 drivers/misc/eeprom/at25.c 	t[0].len = at25->addrlen + 1;
t                 107 drivers/misc/eeprom/at25.c 	spi_message_add_tail(&t[0], &m);
t                 109 drivers/misc/eeprom/at25.c 	t[1].rx_buf = buf;
t                 110 drivers/misc/eeprom/at25.c 	t[1].len = count;
t                 111 drivers/misc/eeprom/at25.c 	spi_message_add_tail(&t[1], &m);
t                  79 drivers/misc/eeprom/eeprom_93xx46.c 		struct spi_transfer t[2] = { { 0 } };
t                 101 drivers/misc/eeprom/eeprom_93xx46.c 		t[0].tx_buf = (char *)&cmd_addr;
t                 102 drivers/misc/eeprom/eeprom_93xx46.c 		t[0].len = 2;
t                 103 drivers/misc/eeprom/eeprom_93xx46.c 		t[0].bits_per_word = bits;
t                 104 drivers/misc/eeprom/eeprom_93xx46.c 		spi_message_add_tail(&t[0], &m);
t                 106 drivers/misc/eeprom/eeprom_93xx46.c 		t[1].rx_buf = buf;
t                 107 drivers/misc/eeprom/eeprom_93xx46.c 		t[1].len = count;
t                 108 drivers/misc/eeprom/eeprom_93xx46.c 		t[1].bits_per_word = 8;
t                 109 drivers/misc/eeprom/eeprom_93xx46.c 		spi_message_add_tail(&t[1], &m);
t                 137 drivers/misc/eeprom/eeprom_93xx46.c 	struct spi_transfer t;
t                 159 drivers/misc/eeprom/eeprom_93xx46.c 	memset(&t, 0, sizeof(t));
t                 161 drivers/misc/eeprom/eeprom_93xx46.c 	t.tx_buf = &cmd_addr;
t                 162 drivers/misc/eeprom/eeprom_93xx46.c 	t.len = 2;
t                 163 drivers/misc/eeprom/eeprom_93xx46.c 	t.bits_per_word = bits;
t                 164 drivers/misc/eeprom/eeprom_93xx46.c 	spi_message_add_tail(&t, &m);
t                 190 drivers/misc/eeprom/eeprom_93xx46.c 	struct spi_transfer t[2];
t                 209 drivers/misc/eeprom/eeprom_93xx46.c 	memset(t, 0, sizeof(t));
t                 211 drivers/misc/eeprom/eeprom_93xx46.c 	t[0].tx_buf = (char *)&cmd_addr;
t                 212 drivers/misc/eeprom/eeprom_93xx46.c 	t[0].len = 2;
t                 213 drivers/misc/eeprom/eeprom_93xx46.c 	t[0].bits_per_word = bits;
t                 214 drivers/misc/eeprom/eeprom_93xx46.c 	spi_message_add_tail(&t[0], &m);
t                 216 drivers/misc/eeprom/eeprom_93xx46.c 	t[1].tx_buf = buf;
t                 217 drivers/misc/eeprom/eeprom_93xx46.c 	t[1].len = data_len;
t                 218 drivers/misc/eeprom/eeprom_93xx46.c 	t[1].bits_per_word = 8;
t                 219 drivers/misc/eeprom/eeprom_93xx46.c 	spi_message_add_tail(&t[1], &m);
t                 280 drivers/misc/eeprom/eeprom_93xx46.c 	struct spi_transfer t;
t                 301 drivers/misc/eeprom/eeprom_93xx46.c 	memset(&t, 0, sizeof(t));
t                 303 drivers/misc/eeprom/eeprom_93xx46.c 	t.tx_buf = &cmd_addr;
t                 304 drivers/misc/eeprom/eeprom_93xx46.c 	t.len = 2;
t                 305 drivers/misc/eeprom/eeprom_93xx46.c 	t.bits_per_word = bits;
t                 306 drivers/misc/eeprom/eeprom_93xx46.c 	spi_message_add_tail(&t, &m);
t                 618 drivers/misc/genwqe/card_ddcb.c 	unsigned int t;
t                 635 drivers/misc/genwqe/card_ddcb.c 	for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
t                 197 drivers/misc/genwqe/card_debugfs.c 	u64 t;
t                 199 drivers/misc/genwqe/card_debugfs.c 	t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0);
t                 200 drivers/misc/genwqe/card_debugfs.c 	seq_printf(s, "  PF   0x%016llx\n", t);
t                 203 drivers/misc/genwqe/card_debugfs.c 		t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1);
t                 204 drivers/misc/genwqe/card_debugfs.c 		seq_printf(s, "  VF%-2d 0x%016llx\n", vf_num, t);
t                 100 drivers/misc/genwqe/card_sysfs.c 	u64 t;
t                 103 drivers/misc/genwqe/card_sysfs.c 	t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER);
t                 104 drivers/misc/genwqe/card_sysfs.c 	return sprintf(buf, "%016llx\n", t);
t                 112 drivers/misc/genwqe/card_sysfs.c 	u64 t;
t                 115 drivers/misc/genwqe/card_sysfs.c 	t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME);
t                 116 drivers/misc/genwqe/card_sysfs.c 	return sprintf(buf, "%016llx\n", t);
t                 175 drivers/misc/sgi-xp/xpc_main.c xpc_timeout_partition_disengage(struct timer_list *t)
t                 177 drivers/misc/sgi-xp/xpc_main.c 	struct xpc_partition *part = from_timer(part, t, disengage_timer);
t                 157 drivers/mmc/core/host.c static void mmc_retune_timer(struct timer_list *t)
t                 159 drivers/mmc/core/host.c 	struct mmc_host *host = from_timer(host, t, retune_timer);
t                1401 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1404 drivers/mmc/core/mmc_test.c 	t->blocks = sz >> 9;
t                1407 drivers/mmc/core/mmc_test.c 		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
t                1408 drivers/mmc/core/mmc_test.c 						  t->max_segs, t->max_seg_sz,
t                1409 drivers/mmc/core/mmc_test.c 				       &t->sg_len);
t                1411 drivers/mmc/core/mmc_test.c 		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
t                1412 drivers/mmc/core/mmc_test.c 				      t->max_seg_sz, &t->sg_len, min_sg_len);
t                1426 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1428 drivers/mmc/core/mmc_test.c 	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
t                1429 drivers/mmc/core/mmc_test.c 					t->blocks, 512, write);
t                1443 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1450 drivers/mmc/core/mmc_test.c 		struct mmc_test_area *t = &test->area;
t                1453 drivers/mmc/core/mmc_test.c 		if (t->max_seg_sz >= PAGE_SIZE)
t                1454 drivers/mmc/core/mmc_test.c 			max_tfr = t->max_segs * PAGE_SIZE;
t                1456 drivers/mmc/core/mmc_test.c 			max_tfr = t->max_segs * t->max_seg_sz;
t                1468 drivers/mmc/core/mmc_test.c 		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
t                1469 drivers/mmc/core/mmc_test.c 				 dev_addr, t->blocks, 512, write, count);
t                1501 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1503 drivers/mmc/core/mmc_test.c 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
t                1511 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1516 drivers/mmc/core/mmc_test.c 	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
t                1525 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1527 drivers/mmc/core/mmc_test.c 	kfree(t->sg);
t                1528 drivers/mmc/core/mmc_test.c 	mmc_test_free_mem(t->mem);
t                1542 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1552 drivers/mmc/core/mmc_test.c 	t->max_sz = sz;
t                1553 drivers/mmc/core/mmc_test.c 	while (t->max_sz < 4 * 1024 * 1024)
t                1554 drivers/mmc/core/mmc_test.c 		t->max_sz += sz;
t                1555 drivers/mmc/core/mmc_test.c 	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
t                1556 drivers/mmc/core/mmc_test.c 		t->max_sz -= sz;
t                1558 drivers/mmc/core/mmc_test.c 	t->max_segs = test->card->host->max_segs;
t                1559 drivers/mmc/core/mmc_test.c 	t->max_seg_sz = test->card->host->max_seg_size;
t                1560 drivers/mmc/core/mmc_test.c 	t->max_seg_sz -= t->max_seg_sz % 512;
t                1562 drivers/mmc/core/mmc_test.c 	t->max_tfr = t->max_sz;
t                1563 drivers/mmc/core/mmc_test.c 	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
t                1564 drivers/mmc/core/mmc_test.c 		t->max_tfr = test->card->host->max_blk_count << 9;
t                1565 drivers/mmc/core/mmc_test.c 	if (t->max_tfr > test->card->host->max_req_size)
t                1566 drivers/mmc/core/mmc_test.c 		t->max_tfr = test->card->host->max_req_size;
t                1567 drivers/mmc/core/mmc_test.c 	if (t->max_tfr / t->max_seg_sz > t->max_segs)
t                1568 drivers/mmc/core/mmc_test.c 		t->max_tfr = t->max_segs * t->max_seg_sz;
t                1576 drivers/mmc/core/mmc_test.c 	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
t                1577 drivers/mmc/core/mmc_test.c 				    t->max_seg_sz);
t                1578 drivers/mmc/core/mmc_test.c 	if (!t->mem)
t                1581 drivers/mmc/core/mmc_test.c 	t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
t                1582 drivers/mmc/core/mmc_test.c 	if (!t->sg) {
t                1587 drivers/mmc/core/mmc_test.c 	t->dev_addr = mmc_test_capacity(test->card) / 2;
t                1588 drivers/mmc/core/mmc_test.c 	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
t                1644 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1646 drivers/mmc/core/mmc_test.c 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
t                1687 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1692 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
t                1693 drivers/mmc/core/mmc_test.c 		dev_addr = t->dev_addr + (sz >> 9);
t                1698 drivers/mmc/core/mmc_test.c 	sz = t->max_tfr;
t                1699 drivers/mmc/core/mmc_test.c 	dev_addr = t->dev_addr;
t                1708 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1716 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
t                1717 drivers/mmc/core/mmc_test.c 		dev_addr = t->dev_addr + (sz >> 9);
t                1725 drivers/mmc/core/mmc_test.c 	sz = t->max_tfr;
t                1726 drivers/mmc/core/mmc_test.c 	dev_addr = t->dev_addr;
t                1735 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1747 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz < t->max_sz; sz <<= 1) {
t                1748 drivers/mmc/core/mmc_test.c 		dev_addr = t->dev_addr + (sz >> 9);
t                1756 drivers/mmc/core/mmc_test.c 	dev_addr = t->dev_addr;
t                1768 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1773 drivers/mmc/core/mmc_test.c 	cnt = t->max_sz / sz;
t                1774 drivers/mmc/core/mmc_test.c 	dev_addr = t->dev_addr;
t                1792 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1796 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
t                1801 drivers/mmc/core/mmc_test.c 	sz = t->max_tfr;
t                1807 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1815 drivers/mmc/core/mmc_test.c 	cnt = t->max_sz / sz;
t                1816 drivers/mmc/core/mmc_test.c 	dev_addr = t->dev_addr;
t                1834 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1838 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
t                1843 drivers/mmc/core/mmc_test.c 	sz = t->max_tfr;
t                1852 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1864 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
t                1871 drivers/mmc/core/mmc_test.c 		cnt = t->max_sz / sz;
t                1872 drivers/mmc/core/mmc_test.c 		dev_addr = t->dev_addr;
t                1935 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1940 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
t                1957 drivers/mmc/core/mmc_test.c 	sz = t->max_tfr;
t                1987 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                1992 drivers/mmc/core/mmc_test.c 	sz = t->max_tfr;
t                2001 drivers/mmc/core/mmc_test.c 		if (t->max_seg_sz >= PAGE_SIZE)
t                2002 drivers/mmc/core/mmc_test.c 			max_tfr = t->max_segs * PAGE_SIZE;
t                2004 drivers/mmc/core/mmc_test.c 			max_tfr = t->max_segs * t->max_seg_sz;
t                2076 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                2082 drivers/mmc/core/mmc_test.c 	if (reqsize > t->max_tfr)
t                2083 drivers/mmc/core/mmc_test.c 		reqsize = t->max_tfr;
t                2353 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                2369 drivers/mmc/core/mmc_test.c 	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
t                2372 drivers/mmc/core/mmc_test.c 	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
t                2449 drivers/mmc/core/mmc_test.c 	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
t                2451 drivers/mmc/core/mmc_test.c 			mmc_hostname(test->card->host), count, t->blocks);
t                2465 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                2475 drivers/mmc/core/mmc_test.c 	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
t                2480 drivers/mmc/core/mmc_test.c 	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
t                2487 drivers/mmc/core/mmc_test.c 	struct mmc_test_area *t = &test->area;
t                2491 drivers/mmc/core/mmc_test.c 	for (sz = 512; sz <= t->max_tfr; sz += 512) {
t                 687 drivers/mmc/host/atmel-mci.c static void atmci_timeout_timer(struct timer_list *t)
t                 691 drivers/mmc/host/atmel-mci.c 	host = from_timer(host, t, timer);
t                1619 drivers/mmc/host/atmel-mci.c static void atmci_detect_change(struct timer_list *t)
t                1621 drivers/mmc/host/atmel-mci.c 	struct atmel_mci_slot	*slot = from_timer(slot, t, detect_timer);
t                2969 drivers/mmc/host/dw_mmc.c static void dw_mci_cmd11_timer(struct timer_list *t)
t                2971 drivers/mmc/host/dw_mmc.c 	struct dw_mci *host = from_timer(host, t, cmd11_timer);
t                2983 drivers/mmc/host/dw_mmc.c static void dw_mci_cto_timer(struct timer_list *t)
t                2985 drivers/mmc/host/dw_mmc.c 	struct dw_mci *host = from_timer(host, t, cto_timer);
t                3038 drivers/mmc/host/dw_mmc.c static void dw_mci_dto_timer(struct timer_list *t)
t                3040 drivers/mmc/host/dw_mmc.c 	struct dw_mci *host = from_timer(host, t, dto_timer);
t                 406 drivers/mmc/host/dw_mmc.h #define SDMMC_SET_FIFOTH(m, r, t)	(((m) & 0x7) << 28 | \
t                 408 drivers/mmc/host/dw_mmc.h 					 ((t) & 0xFFF))
t                 595 drivers/mmc/host/jz4740_mmc.c static void jz4740_mmc_timeout(struct timer_list *t)
t                 597 drivers/mmc/host/jz4740_mmc.c 	struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
t                 466 drivers/mmc/host/meson-mx-sdio.c static void meson_mx_mmc_timeout(struct timer_list *t)
t                 468 drivers/mmc/host/meson-mx-sdio.c 	struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
t                 121 drivers/mmc/host/mmc_spi.c 	struct spi_transfer	token, t, crc, early_status;
t                 252 drivers/mmc/host/mmc_spi.c 	u8	*end = cp + host->t.len;
t                 438 drivers/mmc/host/mmc_spi.c 	struct spi_transfer	*t;
t                 515 drivers/mmc/host/mmc_spi.c 	t = &host->t;
t                 516 drivers/mmc/host/mmc_spi.c 	memset(t, 0, sizeof(*t));
t                 517 drivers/mmc/host/mmc_spi.c 	t->tx_buf = t->rx_buf = data->status;
t                 518 drivers/mmc/host/mmc_spi.c 	t->tx_dma = t->rx_dma = host->data_dma;
t                 519 drivers/mmc/host/mmc_spi.c 	t->len = cp - data->status;
t                 520 drivers/mmc/host/mmc_spi.c 	t->cs_change = 1;
t                 521 drivers/mmc/host/mmc_spi.c 	spi_message_add_tail(t, &host->m);
t                 562 drivers/mmc/host/mmc_spi.c 	struct spi_transfer	*t;
t                 574 drivers/mmc/host/mmc_spi.c 		t = &host->token;
t                 575 drivers/mmc/host/mmc_spi.c 		memset(t, 0, sizeof(*t));
t                 576 drivers/mmc/host/mmc_spi.c 		t->len = 1;
t                 581 drivers/mmc/host/mmc_spi.c 		t->tx_buf = &scratch->data_token;
t                 583 drivers/mmc/host/mmc_spi.c 			t->tx_dma = dma + offsetof(struct scratch, data_token);
t                 584 drivers/mmc/host/mmc_spi.c 		spi_message_add_tail(t, &host->m);
t                 590 drivers/mmc/host/mmc_spi.c 	t = &host->t;
t                 591 drivers/mmc/host/mmc_spi.c 	memset(t, 0, sizeof(*t));
t                 592 drivers/mmc/host/mmc_spi.c 	t->tx_buf = host->ones;
t                 593 drivers/mmc/host/mmc_spi.c 	t->tx_dma = host->ones_dma;
t                 595 drivers/mmc/host/mmc_spi.c 	spi_message_add_tail(t, &host->m);
t                 597 drivers/mmc/host/mmc_spi.c 	t = &host->crc;
t                 598 drivers/mmc/host/mmc_spi.c 	memset(t, 0, sizeof(*t));
t                 599 drivers/mmc/host/mmc_spi.c 	t->len = 2;
t                 602 drivers/mmc/host/mmc_spi.c 		t->tx_buf = &scratch->crc_val;
t                 604 drivers/mmc/host/mmc_spi.c 			t->tx_dma = dma + offsetof(struct scratch, crc_val);
t                 606 drivers/mmc/host/mmc_spi.c 		t->tx_buf = host->ones;
t                 607 drivers/mmc/host/mmc_spi.c 		t->tx_dma = host->ones_dma;
t                 608 drivers/mmc/host/mmc_spi.c 		t->rx_buf = &scratch->crc_val;
t                 610 drivers/mmc/host/mmc_spi.c 			t->rx_dma = dma + offsetof(struct scratch, crc_val);
t                 612 drivers/mmc/host/mmc_spi.c 	spi_message_add_tail(t, &host->m);
t                 629 drivers/mmc/host/mmc_spi.c 		t = &host->early_status;
t                 630 drivers/mmc/host/mmc_spi.c 		memset(t, 0, sizeof(*t));
t                 631 drivers/mmc/host/mmc_spi.c 		t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
t                 632 drivers/mmc/host/mmc_spi.c 		t->tx_buf = host->ones;
t                 633 drivers/mmc/host/mmc_spi.c 		t->tx_dma = host->ones_dma;
t                 634 drivers/mmc/host/mmc_spi.c 		t->rx_buf = scratch->status;
t                 636 drivers/mmc/host/mmc_spi.c 			t->rx_dma = dma + offsetof(struct scratch, status);
t                 637 drivers/mmc/host/mmc_spi.c 		t->cs_change = 1;
t                 638 drivers/mmc/host/mmc_spi.c 		spi_message_add_tail(t, &host->m);
t                 655 drivers/mmc/host/mmc_spi.c mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
t                 664 drivers/mmc/host/mmc_spi.c 		scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
t                 729 drivers/mmc/host/mmc_spi.c 	t->tx_buf += t->len;
t                 731 drivers/mmc/host/mmc_spi.c 		t->tx_dma += t->len;
t                 761 drivers/mmc/host/mmc_spi.c mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
t                 800 drivers/mmc/host/mmc_spi.c 				t->rx_dma, t->len,
t                 815 drivers/mmc/host/mmc_spi.c 				t->rx_dma, t->len,
t                 823 drivers/mmc/host/mmc_spi.c 		u8 *cp = t->rx_buf;
t                 827 drivers/mmc/host/mmc_spi.c 		for (len = t->len; len; len--) {
t                 841 drivers/mmc/host/mmc_spi.c 		u16 crc = crc_itu_t(0, t->rx_buf, t->len);
t                 847 drivers/mmc/host/mmc_spi.c 				scratch->crc_val, crc, t->len);
t                 852 drivers/mmc/host/mmc_spi.c 	t->rx_buf += t->len;
t                 854 drivers/mmc/host/mmc_spi.c 		t->rx_dma += t->len;
t                 870 drivers/mmc/host/mmc_spi.c 	struct spi_transfer	*t;
t                 880 drivers/mmc/host/mmc_spi.c 	t = &host->t;
t                 882 drivers/mmc/host/mmc_spi.c 	if (t->speed_hz)
t                 883 drivers/mmc/host/mmc_spi.c 		clock_rate = t->speed_hz;
t                 917 drivers/mmc/host/mmc_spi.c 				t->tx_dma = dma_addr + sg->offset;
t                 919 drivers/mmc/host/mmc_spi.c 				t->rx_dma = dma_addr + sg->offset;
t                 925 drivers/mmc/host/mmc_spi.c 			t->tx_buf = kmap_addr + sg->offset;
t                 927 drivers/mmc/host/mmc_spi.c 			t->rx_buf = kmap_addr + sg->offset;
t                 931 drivers/mmc/host/mmc_spi.c 			t->len = min(length, blk_size);
t                 936 drivers/mmc/host/mmc_spi.c 				t->len);
t                 939 drivers/mmc/host/mmc_spi.c 				status = mmc_spi_writeblock(host, t, timeout);
t                 941 drivers/mmc/host/mmc_spi.c 				status = mmc_spi_readblock(host, t, timeout);
t                 945 drivers/mmc/host/mmc_spi.c 			data->bytes_xfered += t->len;
t                 946 drivers/mmc/host/mmc_spi.c 			length -= t->len;
t                  72 drivers/mmc/host/mvsdio.c 		unsigned long t = jiffies + HZ;
t                  76 drivers/mmc/host/mvsdio.c 			if (time_after(jiffies, t)) {
t                  84 drivers/mmc/host/mvsdio.c 				   hw_state, count, jiffies - (t - HZ));
t                 510 drivers/mmc/host/mvsdio.c static void mvsd_timeout_timer(struct timer_list *t)
t                 512 drivers/mmc/host/mvsdio.c 	struct mvsd_host *host = from_timer(host, t, timer);
t                 960 drivers/mmc/host/mxcmmc.c static void mxcmci_watchdog(struct timer_list *t)
t                 962 drivers/mmc/host/mxcmmc.c 	struct mxcmci_host *host = from_timer(host, t, watchdog);
t                 626 drivers/mmc/host/omap.c mmc_omap_cmd_timer(struct timer_list *t)
t                 628 drivers/mmc/host/omap.c 	struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
t                 655 drivers/mmc/host/omap.c mmc_omap_clk_timer(struct timer_list *t)
t                 657 drivers/mmc/host/omap.c 	struct mmc_omap_host *host = from_timer(host, t, clk_timer);
t                 875 drivers/mmc/host/omap.c static void mmc_omap_cover_timer(struct timer_list *t)
t                 877 drivers/mmc/host/omap.c 	struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
t                2747 drivers/mmc/host/sdhci.c static void sdhci_timeout_timer(struct timer_list *t)
t                2752 drivers/mmc/host/sdhci.c 	host = from_timer(host, t, timer);
t                2768 drivers/mmc/host/sdhci.c static void sdhci_timeout_data_timer(struct timer_list *t)
t                2773 drivers/mmc/host/sdhci.c 	host = from_timer(host, t, data_timer);
t                 783 drivers/mmc/host/tifm_sd.c static void tifm_sd_abort(struct timer_list *t)
t                 785 drivers/mmc/host/tifm_sd.c 	struct tifm_sd *host = from_timer(host, t, timer);
t                 926 drivers/mmc/host/via-sdmmc.c static void via_sdc_timeout(struct timer_list *t)
t                 931 drivers/mmc/host/via-sdmmc.c 	sdhost = from_timer(sdhost, t, timer);
t                 741 drivers/mmc/host/vub300.c static void vub300_inactivity_timer_expired(struct timer_list *t)
t                 743 drivers/mmc/host/vub300.c 	struct vub300_mmc_host *vub300 = from_timer(vub300, t,
t                1181 drivers/mmc/host/vub300.c static void vub300_sg_timed_out(struct timer_list *t)
t                1183 drivers/mmc/host/vub300.c 	struct vub300_mmc_host *vub300 = from_timer(vub300, t,
t                 946 drivers/mmc/host/wbsd.c static void wbsd_reset_ignore(struct timer_list *t)
t                 948 drivers/mmc/host/wbsd.c 	struct wbsd_host *host = from_timer(host, t, ignore_timer);
t                 816 drivers/mtd/chips/cfi_cmdset_0002.c 	map_word d, t;
t                 832 drivers/mtd/chips/cfi_cmdset_0002.c 	t = map_read(map, addr);
t                 834 drivers/mtd/chips/cfi_cmdset_0002.c 	return map_word_equal(map, d, t);
t                 456 drivers/mtd/devices/mtd_dataflash.c 	struct spi_transfer	t;
t                 477 drivers/mtd/devices/mtd_dataflash.c 	memset(&t, 0, sizeof t);
t                 478 drivers/mtd/devices/mtd_dataflash.c 	t.tx_buf = scratch;
t                 479 drivers/mtd/devices/mtd_dataflash.c 	t.rx_buf = scratch;
t                 480 drivers/mtd/devices/mtd_dataflash.c 	t.len = l;
t                 481 drivers/mtd/devices/mtd_dataflash.c 	spi_message_add_tail(&t, &m);
t                 535 drivers/mtd/devices/mtd_dataflash.c 	struct spi_transfer	t;
t                 563 drivers/mtd/devices/mtd_dataflash.c 	memset(&t, 0, sizeof t);
t                 564 drivers/mtd/devices/mtd_dataflash.c 	t.tx_buf = scratch;
t                 565 drivers/mtd/devices/mtd_dataflash.c 	t.len = l;
t                 566 drivers/mtd/devices/mtd_dataflash.c 	spi_message_add_tail(&t, &m);
t                  70 drivers/mtd/devices/sst25l.c 	struct spi_transfer t;
t                  75 drivers/mtd/devices/sst25l.c 	memset(&t, 0, sizeof(struct spi_transfer));
t                  79 drivers/mtd/devices/sst25l.c 	t.tx_buf = cmd_resp;
t                  80 drivers/mtd/devices/sst25l.c 	t.rx_buf = cmd_resp;
t                  81 drivers/mtd/devices/sst25l.c 	t.len = sizeof(cmd_resp);
t                  82 drivers/mtd/devices/sst25l.c 	spi_message_add_tail(&t, &m);
t                 312 drivers/mtd/devices/sst25l.c 	struct spi_transfer t;
t                 318 drivers/mtd/devices/sst25l.c 	memset(&t, 0, sizeof(struct spi_transfer));
t                 326 drivers/mtd/devices/sst25l.c 	t.tx_buf = cmd_resp;
t                 327 drivers/mtd/devices/sst25l.c 	t.rx_buf = cmd_resp;
t                 328 drivers/mtd/devices/sst25l.c 	t.len = sizeof(cmd_resp);
t                 329 drivers/mtd/devices/sst25l.c 	spi_message_add_tail(&t, &m);
t                 919 drivers/mtd/devices/st_spi_fsm.c 	uint8_t *t = (uint8_t *)&tmp;
t                 935 drivers/mtd/devices/st_spi_fsm.c 		data[i] = t[i];
t                1587 drivers/mtd/devices/st_spi_fsm.c 	uint8_t *t = (uint8_t *)&tmp;
t                1641 drivers/mtd/devices/st_spi_fsm.c 		memset(t, 0xff, write_mask + 1);	/* fill with 0xff's */
t                1643 drivers/mtd/devices/st_spi_fsm.c 			t[i] = *p++;
t                  85 drivers/mtd/maps/lantiq-flash.c 	unsigned char *t = (unsigned char *)to;
t                  90 drivers/mtd/maps/lantiq-flash.c 		*t++ = *f++;
t                  99 drivers/mtd/maps/lantiq-flash.c 	unsigned char *t = (unsigned char *)map->virt + to;
t                 104 drivers/mtd/maps/lantiq-flash.c 		*t++ = *f++;
t                 341 drivers/mtd/maps/pcmciamtd.c 		cistpl_format_t *t = &parse.format;
t                 342 drivers/mtd/maps/pcmciamtd.c 		(void)t; /* Shut up, gcc */
t                 344 drivers/mtd/maps/pcmciamtd.c 			t->type, t->edc, t->offset, t->length);
t                 357 drivers/mtd/maps/pcmciamtd.c 		cistpl_jedec_t *t = &parse.jedec;
t                 358 drivers/mtd/maps/pcmciamtd.c 		for (i = 0; i < t->nid; i++)
t                 360 drivers/mtd/maps/pcmciamtd.c 			      t->id[i].mfr, t->id[i].info);
t                 371 drivers/mtd/maps/pcmciamtd.c 	cistpl_device_t *t = &parse.device;
t                 378 drivers/mtd/maps/pcmciamtd.c 	dev->pcmcia_map.size = t->dev[0].size;
t                 380 drivers/mtd/maps/pcmciamtd.c 	for (i = 0; i < t->ndev; i++) {
t                 381 drivers/mtd/maps/pcmciamtd.c 		pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
t                 382 drivers/mtd/maps/pcmciamtd.c 		pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
t                 383 drivers/mtd/maps/pcmciamtd.c 		pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
t                 384 drivers/mtd/maps/pcmciamtd.c 		pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
t                 395 drivers/mtd/maps/pcmciamtd.c 	cistpl_device_geo_t *t = &parse.device_geo;
t                 401 drivers/mtd/maps/pcmciamtd.c 	dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
t                 403 drivers/mtd/maps/pcmciamtd.c 	for (i = 0; i < t->ngeo; i++) {
t                 404 drivers/mtd/maps/pcmciamtd.c 		pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
t                 405 drivers/mtd/maps/pcmciamtd.c 		pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
t                 406 drivers/mtd/maps/pcmciamtd.c 		pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
t                 407 drivers/mtd/maps/pcmciamtd.c 		pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
t                 408 drivers/mtd/maps/pcmciamtd.c 		pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
t                 409 drivers/mtd/maps/pcmciamtd.c 		pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
t                 196 drivers/mtd/nand/raw/mxc_nand.c 	u32 *t = trg;
t                 200 drivers/mtd/nand/raw/mxc_nand.c 		*t++ = __raw_readl(s++);
t                 206 drivers/mtd/nand/raw/mxc_nand.c 	u16 *t = trg;
t                 216 drivers/mtd/nand/raw/mxc_nand.c 		*t++ = __raw_readw(s++);
t                 228 drivers/mtd/nand/raw/mxc_nand.c 	__iomem u16 *t = trg;
t                 238 drivers/mtd/nand/raw/mxc_nand.c 		__raw_writew(*s++, t++);
t                  26 drivers/mtd/nand/raw/mxic_nand.c #define HC_CFG_TYPE(s, t)	((t) << (23 + ((s) * 2)))
t                 109 drivers/mtd/nand/raw/nand_bch.c 	unsigned int m, t, eccsteps, i;
t                 127 drivers/mtd/nand/raw/nand_bch.c 	t = (eccbytes*8)/m;
t                 133 drivers/mtd/nand/raw/nand_bch.c 	nbc->bch = init_bch(m, t, 0);
t                 174 drivers/mtd/nand/raw/nand_bch.c 	nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
t                 980 drivers/mtd/sm_ftl.c static void sm_cache_flush_timer(struct timer_list *t)
t                 982 drivers/mtd/sm_ftl.c 	struct sm_ftl *ftl = from_timer(ftl, t, timer);
t                 191 drivers/net/appletalk/cops.c static void cops_poll(struct timer_list *t);
t                 385 drivers/net/arcnet/arcnet.c static void arcnet_timer(struct timer_list *t)
t                 387 drivers/net/arcnet/arcnet.c 	struct arcnet_local *lp = from_timer(lp, t, timer);
t                  69 drivers/net/caif/caif_hsi.c static void cfhsi_inactivity_tout(struct timer_list *t)
t                  71 drivers/net/caif/caif_hsi.c 	struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
t                 739 drivers/net/caif/caif_hsi.c static void cfhsi_rx_slowpath(struct timer_list *t)
t                 741 drivers/net/caif/caif_hsi.c 	struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
t                 999 drivers/net/caif/caif_hsi.c static void cfhsi_aggregation_tout(struct timer_list *t)
t                1001 drivers/net/caif/caif_hsi.c 	struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
t                 806 drivers/net/can/grcan.c static void grcan_running_reset(struct timer_list *t)
t                 808 drivers/net/can/grcan.c 	struct grcan_priv *priv = from_timer(priv, t, rr_timer);
t                 897 drivers/net/can/grcan.c static void grcan_initiate_running_reset(struct timer_list *t)
t                 899 drivers/net/can/grcan.c 	struct grcan_priv *priv = from_timer(priv, t, hang_timer);
t                 197 drivers/net/can/m_can/tcan4x5x.c 	struct spi_transfer t[2] = {
t                 205 drivers/net/can/m_can/tcan4x5x.c 	spi_message_add_tail(&t[0], &m);
t                 206 drivers/net/can/m_can/tcan4x5x.c 	spi_message_add_tail(&t[1], &m);
t                 376 drivers/net/can/sja1000/peak_pcmcia.c static void pcan_led_timer(struct timer_list *t)
t                 378 drivers/net/can/sja1000/peak_pcmcia.c 	struct pcan_pccard *card = from_timer(card, t, led_timer);
t                 202 drivers/net/can/spi/hi311x.c 	struct spi_transfer t = {
t                 212 drivers/net/can/spi/hi311x.c 	spi_message_add_tail(&t, &m);
t                 268 drivers/net/can/spi/mcp251x.c 	struct spi_transfer t = {
t                 278 drivers/net/can/spi/mcp251x.c 	spi_message_add_tail(&t, &m);
t                 254 drivers/net/can/usb/peak_usb/pcan_usb.c static void pcan_usb_restart(struct timer_list *t)
t                 256 drivers/net/can/usb/peak_usb/pcan_usb.c 	struct pcan_usb *pdev = from_timer(pdev, t, restart_timer);
t                  98 drivers/net/dsa/microchip/ksz_common.c static void mib_monitor(struct timer_list *t)
t                 100 drivers/net/dsa/microchip/ksz_common.c 	struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
t                 151 drivers/net/dsa/mv88e6xxx/phy.c static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
t                 153 drivers/net/dsa/mv88e6xxx/phy.c 	struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
t                 176 drivers/net/dsa/realtek-smi-core.c 	u32 t;
t                 179 drivers/net/dsa/realtek-smi-core.c 	realtek_smi_read_bits(smi, 8, &t);
t                 180 drivers/net/dsa/realtek-smi-core.c 	*data = (t & 0xff);
t                 190 drivers/net/dsa/realtek-smi-core.c 	u32 t;
t                 193 drivers/net/dsa/realtek-smi-core.c 	realtek_smi_read_bits(smi, 8, &t);
t                 194 drivers/net/dsa/realtek-smi-core.c 	*data = (t & 0xff);
t                  56 drivers/net/dsa/vitesse-vsc73xx-spi.c 	struct spi_transfer t[2];
t                  67 drivers/net/dsa/vitesse-vsc73xx-spi.c 	memset(&t, 0, sizeof(t));
t                  69 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[0].tx_buf = cmd;
t                  70 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[0].len = sizeof(cmd);
t                  71 drivers/net/dsa/vitesse-vsc73xx-spi.c 	spi_message_add_tail(&t[0], &m);
t                  73 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[1].rx_buf = buf;
t                  74 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[1].len = sizeof(buf);
t                  75 drivers/net/dsa/vitesse-vsc73xx-spi.c 	spi_message_add_tail(&t[1], &m);
t                  98 drivers/net/dsa/vitesse-vsc73xx-spi.c 	struct spi_transfer t[2];
t                 109 drivers/net/dsa/vitesse-vsc73xx-spi.c 	memset(&t, 0, sizeof(t));
t                 111 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[0].tx_buf = cmd;
t                 112 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[0].len = sizeof(cmd);
t                 113 drivers/net/dsa/vitesse-vsc73xx-spi.c 	spi_message_add_tail(&t[0], &m);
t                 115 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[1].tx_buf = buf;
t                 116 drivers/net/dsa/vitesse-vsc73xx-spi.c 	t[1].len = sizeof(buf);
t                 117 drivers/net/dsa/vitesse-vsc73xx-spi.c 	spi_message_add_tail(&t[1], &m);
t                 142 drivers/net/eql.c static void eql_timer(struct timer_list *t)
t                 144 drivers/net/eql.c 	equalizer_t *eql = from_timer(eql, t, timer);
t                 370 drivers/net/ethernet/3com/3c515.c static void corkscrew_timer(struct timer_list *t);
t                 872 drivers/net/ethernet/3com/3c515.c static void corkscrew_timer(struct timer_list *t)
t                 875 drivers/net/ethernet/3com/3c515.c 	struct corkscrew_private *vp = from_timer(vp, t, timer);
t                 228 drivers/net/ethernet/3com/3c574_cs.c static void media_check(struct timer_list *t);
t                 860 drivers/net/ethernet/3com/3c574_cs.c static void media_check(struct timer_list *t)
t                 862 drivers/net/ethernet/3com/3c574_cs.c 	struct el3_private *lp = from_timer(lp, t, media);
t                 166 drivers/net/ethernet/3com/3c589_cs.c static void media_check(struct timer_list *t);
t                 679 drivers/net/ethernet/3com/3c589_cs.c static void media_check(struct timer_list *t)
t                 681 drivers/net/ethernet/3com/3c589_cs.c 	struct el3_private *lp = from_timer(lp, t, media);
t                 761 drivers/net/ethernet/3com/3c59x.c static void vortex_timer(struct timer_list *t);
t                1782 drivers/net/ethernet/3com/3c59x.c vortex_timer(struct timer_list *t)
t                1784 drivers/net/ethernet/3com/3c59x.c 	struct vortex_private *vp = from_timer(vp, t, timer);
t                  88 drivers/net/ethernet/8390/axnet_cs.c static void ei_watchdog(struct timer_list *t);
t                 548 drivers/net/ethernet/8390/axnet_cs.c static void ei_watchdog(struct timer_list *t)
t                 550 drivers/net/ethernet/8390/axnet_cs.c     struct axnet_dev *info = from_timer(info, t, watchdog);
t                 101 drivers/net/ethernet/8390/pcnet_cs.c static void ei_watchdog(struct timer_list *t);
t                1010 drivers/net/ethernet/8390/pcnet_cs.c static void ei_watchdog(struct timer_list *t)
t                1012 drivers/net/ethernet/8390/pcnet_cs.c     struct pcnet_dev *info = from_timer(info, t, watchdog);
t                3083 drivers/net/ethernet/agere/et131x.c static void et131x_error_timer_handler(struct timer_list *t)
t                3085 drivers/net/ethernet/agere/et131x.c 	struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
t                 966 drivers/net/ethernet/alteon/acenic.c 		int t;
t                 969 drivers/net/ethernet/alteon/acenic.c 		t = read_eeprom_byte(dev, 0x8c+i);
t                 970 drivers/net/ethernet/alteon/acenic.c 		if (t < 0) {
t                 974 drivers/net/ethernet/alteon/acenic.c 			mac1 |= (t & 0xff);
t                 978 drivers/net/ethernet/alteon/acenic.c 		int t;
t                 981 drivers/net/ethernet/alteon/acenic.c 		t = read_eeprom_byte(dev, 0x8c+i);
t                 982 drivers/net/ethernet/alteon/acenic.c 		if (t < 0) {
t                 986 drivers/net/ethernet/alteon/acenic.c 			mac2 |= (t & 0xff);
t                3111 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_timer_service(struct timer_list *t)
t                3113 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
t                  97 drivers/net/ethernet/amd/7990.c 	int t; \
t                  98 drivers/net/ethernet/amd/7990.c 	for (t = 0; t < RX_RING_SIZE; t++) { \
t                 100 drivers/net/ethernet/amd/7990.c 		       t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
t                 101 drivers/net/ethernet/amd/7990.c 		       ib->brx_ring[t].length, \
t                 102 drivers/net/ethernet/amd/7990.c 		       ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
t                 104 drivers/net/ethernet/amd/7990.c 	for (t = 0; t < TX_RING_SIZE; t++) { \
t                 106 drivers/net/ethernet/amd/7990.c 		       t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
t                 107 drivers/net/ethernet/amd/7990.c 		       ib->btx_ring[t].length, \
t                 108 drivers/net/ethernet/amd/7990.c 		       ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
t                 642 drivers/net/ethernet/amd/a2065.c static void lance_set_multicast_retry(struct timer_list *t)
t                 644 drivers/net/ethernet/amd/a2065.c 	struct lance_private *lp = from_timer(lp, t, multicast_timer);
t                 302 drivers/net/ethernet/amd/am79c961a.c static void am79c961_timer(struct timer_list *t)
t                 304 drivers/net/ethernet/amd/am79c961a.c 	struct dev_priv *priv = from_timer(priv, t, timer);
t                1655 drivers/net/ethernet/amd/amd8111e.c static void amd8111e_config_ipg(struct timer_list *t)
t                1657 drivers/net/ethernet/amd/amd8111e.c 	struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
t                 130 drivers/net/ethernet/amd/ariadne.c 		volatile struct TDRE *t = &lancedata->tx_ring[i];
t                 131 drivers/net/ethernet/amd/ariadne.c 		t->TMD0 = swloww(ARIADNE_RAM +
t                 133 drivers/net/ethernet/amd/ariadne.c 		t->TMD1 = swhighw(ARIADNE_RAM +
t                 136 drivers/net/ethernet/amd/ariadne.c 		t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
t                 137 drivers/net/ethernet/amd/ariadne.c 		t->TMD3 = 0;
t                1005 drivers/net/ethernet/amd/declance.c static void lance_set_multicast_retry(struct timer_list *t)
t                1007 drivers/net/ethernet/amd/declance.c 	struct lance_private *lp = from_timer(lp, t, multicast_timer);
t                2907 drivers/net/ethernet/amd/pcnet32.c static void pcnet32_watchdog(struct timer_list *t)
t                2909 drivers/net/ethernet/amd/pcnet32.c 	struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
t                1252 drivers/net/ethernet/amd/sunlance.c static void lance_set_multicast_retry(struct timer_list *t)
t                1254 drivers/net/ethernet/amd/sunlance.c 	struct lance_private *lp = from_timer(lp, t, multicast_timer);
t                 644 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_tx_timer(struct timer_list *t)
t                 646 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
t                 682 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_service_timer(struct timer_list *t)
t                 684 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
t                 157 drivers/net/ethernet/apple/bmac.c static void bmac_tx_timeout(struct timer_list *t);
t                1469 drivers/net/ethernet/apple/bmac.c static void bmac_tx_timeout(struct timer_list *t)
t                1471 drivers/net/ethernet/apple/bmac.c 	struct bmac_data *bp = from_timer(bp, t, tx_timeout);
t                  90 drivers/net/ethernet/apple/mace.c static void mace_tx_timeout(struct timer_list *t);
t                 803 drivers/net/ethernet/apple/mace.c static void mace_tx_timeout(struct timer_list *t)
t                 805 drivers/net/ethernet/apple/mace.c     struct mace_data *mp = from_timer(mp, t, tx_timeout);
t                 210 drivers/net/ethernet/aquantia/atlantic/aq_nic.c static void aq_nic_service_timer_cb(struct timer_list *t)
t                 212 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	struct aq_nic_s *self = from_timer(self, t, service_timer);
t                 219 drivers/net/ethernet/aquantia/atlantic/aq_nic.c static void aq_nic_polling_timer_cb(struct timer_list *t)
t                 221 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	struct aq_nic_s *self = from_timer(self, t, polling_timer);
t                 189 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 				       unsigned int t)
t                 191 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
t                 493 drivers/net/ethernet/atheros/ag71xx.c 		unsigned long t;
t                 495 drivers/net/ethernet/atheros/ag71xx.c 		t = ref_clock / table[i];
t                 496 drivers/net/ethernet/atheros/ag71xx.c 		if (t <= AG71XX_MDIO_MAX_CLK) {
t                 509 drivers/net/ethernet/atheros/ag71xx.c 	u32 t;
t                 511 drivers/net/ethernet/atheros/ag71xx.c 	err = ag71xx_mdio_get_divider(ag, &t);
t                 515 drivers/net/ethernet/atheros/ag71xx.c 	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
t                 518 drivers/net/ethernet/atheros/ag71xx.c 	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
t                 791 drivers/net/ethernet/atheros/ag71xx.c 	u32 t;
t                 793 drivers/net/ethernet/atheros/ag71xx.c 	t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
t                 796 drivers/net/ethernet/atheros/ag71xx.c 	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
t                 798 drivers/net/ethernet/atheros/ag71xx.c 	t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
t                 799 drivers/net/ethernet/atheros/ag71xx.c 	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
t                1405 drivers/net/ethernet/atheros/ag71xx.c static void ag71xx_oom_timer_handler(struct timer_list *t)
t                1407 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx *ag = from_timer(ag, t, oom_timer);
t                 212 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static void atl1c_phy_config(struct timer_list *t)
t                 214 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	struct atl1c_adapter *adapter = from_timer(adapter, t,
t                 120 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static void atl1e_phy_config(struct timer_list *t)
t                 122 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct atl1e_adapter *adapter = from_timer(adapter, t,
t                2557 drivers/net/ethernet/atheros/atlx/atl1.c static void atl1_phy_config(struct timer_list *t)
t                2559 drivers/net/ethernet/atheros/atlx/atl1.c 	struct atl1_adapter *adapter = from_timer(adapter, t,
t                1016 drivers/net/ethernet/atheros/atlx/atl2.c static void atl2_watchdog(struct timer_list *t)
t                1018 drivers/net/ethernet/atheros/atlx/atl2.c 	struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
t                1041 drivers/net/ethernet/atheros/atlx/atl2.c static void atl2_phy_config(struct timer_list *t)
t                1043 drivers/net/ethernet/atheros/atlx/atl2.c 	struct atl2_adapter *adapter = from_timer(adapter, t,
t                 602 drivers/net/ethernet/broadcom/b44.c static void b44_timer(struct timer_list *t)
t                 604 drivers/net/ethernet/broadcom/b44.c 	struct b44 *bp = from_timer(bp, t, timer);
t                 285 drivers/net/ethernet/broadcom/bcm63xx_enet.c static void bcm_enet_refill_rx_timer(struct timer_list *t)
t                 287 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
t                1368 drivers/net/ethernet/broadcom/bcm63xx_enet.c static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
t                1372 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
t                1981 drivers/net/ethernet/broadcom/bcm63xx_enet.c static void swphy_poll_timer(struct timer_list *t)
t                1983 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
t                6185 drivers/net/ethernet/broadcom/bnx2.c bnx2_timer(struct timer_list *t)
t                6187 drivers/net/ethernet/broadcom/bnx2.c 	struct bnx2 *bp = from_timer(bp, t, timer);
t                5764 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_timer(struct timer_list *t)
t                5766 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	struct bnx2x *bp = from_timer(bp, t, timer);
t                 388 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_STAT64(s, t) \
t                 390 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
t                 391 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
t                 392 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		pstats->mac_stx[0].t##_hi = new->s##_hi; \
t                 393 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		pstats->mac_stx[0].t##_lo = new->s##_lo; \
t                 394 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
t                 395 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		       pstats->mac_stx[1].t##_lo, diff.lo); \
t                 398 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_STAT64_NIG(s, t) \
t                 402 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_64(estats->t##_hi, diff.hi, \
t                 403 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		       estats->t##_lo, diff.lo); \
t                 413 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define ADD_STAT64(diff, t) \
t                 415 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
t                 416 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		       pstats->mac_stx[1].t##_lo, new->diff##_lo); \
t                 426 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_EXTEND_TSTAT_X(s, t, size) \
t                 431 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
t                 434 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
t                 436 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_EXTEND_E_TSTAT(s, t, size) \
t                 438 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		UPDATE_EXTEND_TSTAT_X(s, t, size); \
t                 439 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
t                 442 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_EXTEND_USTAT(s, t) \
t                 446 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
t                 449 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_EXTEND_E_USTAT(s, t) \
t                 451 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		UPDATE_EXTEND_USTAT(s, t); \
t                 452 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
t                 455 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_EXTEND_XSTAT(s, t) \
t                 459 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
t                 462 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_QSTAT(s, t) \
t                 464 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
t                 465 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
t                 466 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 			+ ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
t                 513 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define UPDATE_ESTAT(s, t) \
t                 515 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		SUB_64(estats->s##_hi, estats_old->t##_hi, \
t                 516 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		       estats->s##_lo, estats_old->t##_lo); \
t                 517 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		ADD_64(estats->s##_hi, estats->t##_hi, \
t                 518 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		       estats->s##_lo, estats->t##_lo); \
t                 519 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		estats_old->t##_hi = estats->t##_hi; \
t                 520 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		estats_old->t##_lo = estats->t##_lo; \
t                 535 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h #define SUB_EXTEND_USTAT(s, t) \
t                 538 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h 		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
t                9990 drivers/net/ethernet/broadcom/bnxt/bnxt.c static void bnxt_timer(struct timer_list *t)
t                9992 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct bnxt *bp = from_timer(bp, t, timer);
t                10990 drivers/net/ethernet/broadcom/tg3.c static void tg3_timer(struct timer_list *t)
t                10992 drivers/net/ethernet/broadcom/tg3.c 	struct tg3 *tp = from_timer(tp, t, timer);
t                1539 drivers/net/ethernet/brocade/bna/bfa_ioc.c 	u32 t;
t                1549 drivers/net/ethernet/brocade/bna/bfa_ioc.c 		t = readl(pci_bar + FLI_RDDATA_REG);
t                1688 drivers/net/ethernet/brocade/bna/bnad.c bnad_ioc_timeout(struct timer_list *t)
t                1690 drivers/net/ethernet/brocade/bna/bnad.c 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
t                1699 drivers/net/ethernet/brocade/bna/bnad.c bnad_ioc_hb_check(struct timer_list *t)
t                1701 drivers/net/ethernet/brocade/bna/bnad.c 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
t                1710 drivers/net/ethernet/brocade/bna/bnad.c bnad_iocpf_timeout(struct timer_list *t)
t                1712 drivers/net/ethernet/brocade/bna/bnad.c 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
t                1721 drivers/net/ethernet/brocade/bna/bnad.c bnad_iocpf_sem_timeout(struct timer_list *t)
t                1723 drivers/net/ethernet/brocade/bna/bnad.c 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
t                1743 drivers/net/ethernet/brocade/bna/bnad.c bnad_dim_timeout(struct timer_list *t)
t                1745 drivers/net/ethernet/brocade/bna/bnad.c 	struct bnad *bnad = from_timer(bnad, t, dim_timer);
t                1776 drivers/net/ethernet/brocade/bna/bnad.c bnad_stats_timeout(struct timer_list *t)
t                1778 drivers/net/ethernet/brocade/bna/bnad.c 	struct bnad *bnad = from_timer(bnad, t, stats_timer);
t                 397 drivers/net/ethernet/calxeda/xgmac.c #define dma_ring_space(h, t, s)	CIRC_SPACE(h, t, s)
t                 398 drivers/net/ethernet/calxeda/xgmac.c #define dma_ring_cnt(h, t, s)	CIRC_CNT(h, t, s)
t                 197 drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c 	__be32 t = cpu_to_be32(val);
t                 199 drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
t                 459 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	const struct sge_intr_counts *t;
t                 463 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	t = t1_sge_get_intr_counts(adapter->sge);
t                 511 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->rx_drops;
t                 512 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->pure_rsps;
t                 513 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->unhandled_irqs;
t                 514 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->respQ_empty;
t                 515 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->respQ_overflow;
t                 516 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->freelistQ_empty;
t                 517 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->pkt_too_big;
t                 518 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->pkt_mismatch;
t                 519 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->cmdQ_full[0];
t                 520 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	*data++ = t->cmdQ_full[1];
t                1884 drivers/net/ethernet/chelsio/cxgb/sge.c static void sge_tx_reclaim_cb(struct timer_list *t)
t                1887 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
t                1980 drivers/net/ethernet/chelsio/cxgb/sge.c static void espibug_workaround_t204(struct timer_list *t)
t                1982 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sge *sge = from_timer(sge, t, espibug_timer);
t                2023 drivers/net/ethernet/chelsio/cxgb/sge.c static void espibug_workaround(struct timer_list *t)
t                2025 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sge *sge = from_timer(sge, t, espibug_timer);
t                  47 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h static inline union active_open_entry *atid2entry(const struct tid_info *t,
t                  50 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	return &t->atid_tab[atid - t->atid_base];
t                  53 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h static inline union listen_entry *stid2entry(const struct tid_info *t,
t                  56 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	return &t->stid_tab[stid - t->stid_base];
t                  62 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
t                  65 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
t                  66 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	    &(t->tid_tab[tid]) : NULL;
t                  74 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
t                  79 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
t                  82 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	e = stid2entry(t, tid);
t                  83 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	if ((void *)e->next >= (void *)t->tid_tab &&
t                  84 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	    (void *)e->next < (void *)&t->atid_tab[t->natids])
t                  93 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
t                  98 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	if (tid < t->atid_base || tid >= t->atid_base + t->natids)
t                 101 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	e = atid2entry(t, tid);
t                 102 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	if ((void *)e->next >= (void *)t->tid_tab &&
t                 103 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	    (void *)e->next < (void *)&t->atid_tab[t->natids])
t                2153 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct ch_qset_params t;
t                2159 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_from_user(&t, useraddr, sizeof(t)))
t                2161 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.cmd != CHELSIO_SET_QSET_PARAMS)
t                2163 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.qset_idx >= SGE_QSETS)
t                2165 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
t                2166 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.cong_thres, 0, 255) ||
t                2167 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
t                2169 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
t                2171 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
t                2173 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
t                2175 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
t                2177 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
t                2182 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
t                2183 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
t                2184 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
t                2185 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.polling >= 0 || t.cong_thres >= 0))
t                2197 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.qset_idx < q1)
t                2199 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.qset_idx > q1 + nqsets - 1)
t                2202 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q = &adapter->params.sge.qset[t.qset_idx];
t                2204 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.rspq_size >= 0)
t                2205 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->rspq_size = t.rspq_size;
t                2206 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.fl_size[0] >= 0)
t                2207 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->fl_size = t.fl_size[0];
t                2208 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.fl_size[1] >= 0)
t                2209 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->jumbo_size = t.fl_size[1];
t                2210 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.txq_size[0] >= 0)
t                2211 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->txq_size[0] = t.txq_size[0];
t                2212 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.txq_size[1] >= 0)
t                2213 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->txq_size[1] = t.txq_size[1];
t                2214 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.txq_size[2] >= 0)
t                2215 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->txq_size[2] = t.txq_size[2];
t                2216 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.cong_thres >= 0)
t                2217 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->cong_thres = t.cong_thres;
t                2218 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.intr_lat >= 0) {
t                2220 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				&adapter->sge.qs[t.qset_idx];
t                2222 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			q->coalesce_usecs = t.intr_lat;
t                2225 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.polling >= 0) {
t                2227 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				q->polling = t.polling;
t                2232 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 					t.polling = 0;
t                2237 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 					q->polling = t.polling;
t                2242 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.lro >= 0) {
t                2243 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			if (t.lro)
t                2254 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct ch_qset_params t;
t                2259 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_from_user(&t, useraddr, sizeof(t)))
t                2262 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.cmd != CHELSIO_GET_QSET_PARAMS)
t                2274 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.qset_idx >= nqsets)
t                2276 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
t                2278 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q = &adapter->params.sge.qset[q1 + t.qset_idx];
t                2279 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.rspq_size = q->rspq_size;
t                2280 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.txq_size[0] = q->txq_size[0];
t                2281 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.txq_size[1] = q->txq_size[1];
t                2282 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.txq_size[2] = q->txq_size[2];
t                2283 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.fl_size[0] = q->fl_size;
t                2284 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.fl_size[1] = q->jumbo_size;
t                2285 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.polling = q->polling;
t                2286 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.lro = !!(dev->features & NETIF_F_GRO);
t                2287 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.intr_lat = q->coalesce_usecs;
t                2288 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.cong_thres = q->cong_thres;
t                2289 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.qnum = q1;
t                2292 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
t                2294 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.vector = adapter->pdev->irq;
t                2296 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_to_user(useraddr, &t, sizeof(t)))
t                2346 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct ch_mem_range t;
t                2350 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_from_user(&t, useraddr, sizeof(t)))
t                2352 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.cmd != CHELSIO_LOAD_FW)
t                2355 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
t                2359 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		ret = t3_load_fw(adapter, fw_data, t.len);
t                2445 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct ch_mem_range t;
t                2455 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_from_user(&t, useraddr, sizeof(t)))
t                2457 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.cmd != CHELSIO_GET_MEM)
t                2459 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if ((t.addr & 7) || (t.len & 7))
t                2461 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.mem_id == MEM_CM)
t                2463 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		else if (t.mem_id == MEM_PMRX)
t                2465 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		else if (t.mem_id == MEM_PMTX)
t                2475 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t.version = 3 | (adapter->params.rev << 10);
t                2476 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_to_user(useraddr, &t, sizeof(t)))
t                2483 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		useraddr += sizeof(t);	/* advance to start of buffer */
t                2484 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		while (t.len) {
t                2486 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				min_t(unsigned int, t.len, sizeof(buf));
t                2489 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
t                2496 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.addr += chunk;
t                2497 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t.len -= chunk;
t                2502 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct ch_trace t;
t                2509 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (copy_from_user(&t, useraddr, sizeof(t)))
t                2511 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.cmd != CHELSIO_SET_TRACE_FILTER)
t                2514 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		tp = (const struct trace_params *)&t.sip;
t                2515 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.config_tx)
t                2517 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						t.invert_match,
t                2518 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						t.trace_tx);
t                2519 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (t.config_rx)
t                2521 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						t.invert_match,
t                2522 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						t.trace_rx);
t                 310 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct ch_mem_range *t = data;
t                 313 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if ((t->addr & 7) || (t->len & 7))
t                 315 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (t->mem_id == MEM_CM)
t                 317 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		else if (t->mem_id == MEM_PMRX)
t                 319 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		else if (t->mem_id == MEM_PMTX)
t                 325 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
t                 326 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 					(u64 *) t->buf);
t                 503 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t                 504 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union active_open_entry *p = atid2entry(t, atid);
t                 507 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_bh(&t->atid_lock);
t                 508 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	p->next = t->afree;
t                 509 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->afree = p;
t                 510 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->atids_in_use--;
t                 511 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_unlock_bh(&t->atid_lock);
t                 523 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t                 524 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union listen_entry *p = stid2entry(t, stid);
t                 526 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_bh(&t->stid_lock);
t                 527 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	p->next = t->sfree;
t                 528 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->sfree = p;
t                 529 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->stids_in_use--;
t                 530 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_unlock_bh(&t->stid_lock);
t                 538 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t                 540 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->tid_tab[tid].client = client;
t                 541 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->tid_tab[tid].ctx = ctx;
t                 542 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	atomic_inc(&t->tids_in_use);
t                 629 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t                 631 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	BUG_ON(tid >= t->ntids);
t                 633 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		(void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
t                 641 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			t->tid_tab[tid].ctx = NULL;
t                 645 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	atomic_dec(&t->tids_in_use);
t                 654 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t                 656 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_bh(&t->atid_lock);
t                 657 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (t->afree &&
t                 658 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
t                 659 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    t->ntids) {
t                 660 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		union active_open_entry *p = t->afree;
t                 662 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		atid = (p - t->atid_tab) + t->atid_base;
t                 663 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->afree = p->next;
t                 666 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->atids_in_use++;
t                 668 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_unlock_bh(&t->atid_lock);
t                 678 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t                 680 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_bh(&t->stid_lock);
t                 681 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (t->sfree) {
t                 682 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		union listen_entry *p = t->sfree;
t                 684 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		stid = (p - t->stid_tab) + t->stid_base;
t                 685 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->sfree = p->next;
t                 688 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->stids_in_use++;
t                 690 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_unlock_bh(&t->stid_lock);
t                 799 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
t                 803 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (unlikely(tid >= t->ntids)) {
t                 810 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t3c_tid = lookup_stid(t, stid);
t                 893 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
t                 897 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (unlikely(tid >= t->ntids)) {
t                 904 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t3c_tid = lookup_atid(t, atid);
t                1157 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
t                1161 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned long size = ntids * sizeof(*t->tid_tab) +
t                1162 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
t                1164 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->tid_tab = kvzalloc(size, GFP_KERNEL);
t                1165 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (!t->tid_tab)
t                1168 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
t                1169 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
t                1170 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->ntids = ntids;
t                1171 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->nstids = nstids;
t                1172 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->stid_base = stid_base;
t                1173 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->sfree = NULL;
t                1174 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->natids = natids;
t                1175 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->atid_base = atid_base;
t                1176 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->afree = NULL;
t                1177 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->stids_in_use = t->atids_in_use = 0;
t                1178 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	atomic_set(&t->tids_in_use, 0);
t                1179 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_init(&t->stid_lock);
t                1180 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_init(&t->atid_lock);
t                1187 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
t                1188 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->sfree = t->stid_tab;
t                1192 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t                1193 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->afree = t->atid_tab;
t                1198 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static void free_tid_maps(struct tid_info *t)
t                1200 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	kvfree(t->tid_tab);
t                1221 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct t3c_data *t;
t                1227 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t = kzalloc(sizeof(*t), GFP_KERNEL);
t                1228 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (!t)
t                1232 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
t                1233 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
t                1246 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
t                1251 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->mtus = mtutab.mtus;
t                1252 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->nmtus = mtutab.size;
t                1254 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
t                1255 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	spin_lock_init(&t->tid_release_lock);
t                1256 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	INIT_LIST_HEAD(&t->list_node);
t                1257 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->dev = dev;
t                1260 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	T3C_DATA(dev) = t;
t                1268 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
t                1269 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->release_list_incomplete = 0;
t                1277 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	kfree(t);
t                1291 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct t3c_data *t = T3C_DATA(tdev);
t                1298 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	free_tid_maps(&t->tid_maps);
t                1305 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	kfree_skb(t->nofail_skb);
t                1306 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	kfree(t);
t                 129 drivers/net/ethernet/chelsio/cxgb3/l2t.h static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
t                 134 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	d = L2DATA(t);
t                2921 drivers/net/ethernet/chelsio/cxgb3/sge.c static void sge_timer_tx(struct timer_list *t)
t                2923 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
t                2961 drivers/net/ethernet/chelsio/cxgb3/sge.c static void sge_timer_rx(struct timer_list *t)
t                2964 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
t                3126 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const struct tid_info *t = &adap->tids;
t                3135 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   atomic_read(&t->conns_in_use));
t                3145 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   t->ntids - 1);
t                3147 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   atomic_read(&t->tids_in_use),
t                3148 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   atomic_read(&t->hash_tids_in_use));
t                3151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   t->aftid_base,
t                3152 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   t->aftid_end,
t                3154 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   t->ntids - 1);
t                3156 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   atomic_read(&t->tids_in_use),
t                3157 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   atomic_read(&t->hash_tids_in_use));
t                3161 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   t->ntids - 1);
t                3163 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				   atomic_read(&t->hash_tids_in_use));
t                3165 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	} else if (t->ntids) {
t                3167 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   atomic_read(&t->conns_in_use));
t                3170 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   tid_start + t->ntids - 1);
t                3172 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   atomic_read(&t->tids_in_use));
t                3175 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (t->nstids)
t                3177 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (!t->stid_base &&
t                3179 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->stid_base + 1 : t->stid_base,
t                3180 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->stid_base + t->nstids - 1,
t                3181 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->stids_in_use - t->v6_stids_in_use,
t                3182 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->v6_stids_in_use);
t                3184 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (t->natids)
t                3186 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->natids - 1, t->atids_in_use);
t                3187 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
t                3188 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		   t->ftid_base + t->nftids - 1);
t                3189 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (t->nsftids)
t                3191 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->sftid_base, t->sftid_base + t->nsftids - 2,
t                3192 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   t->sftids_in_use);
t                3193 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (t->ntids)
t                3234 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	unsigned long *t;
t                3237 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
t                3238 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!t)
t                3241 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
t                3243 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		kvfree(t);
t                3247 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
t                3248 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	kvfree(t);
t                 442 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct tid_info *t = &adap->tids;
t                 445 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	spin_lock_bh(&t->ftid_lock);
t                 447 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
t                 448 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		if (ftid >= t->nftids)
t                 452 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			ftid = bitmap_find_free_region(t->ftid_bmap,
t                 453 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 						       t->nftids, 1);
t                 460 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			bitmap_release_region(t->ftid_bmap, ftid, 1);
t                 462 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			ftid = bitmap_find_free_region(t->ftid_bmap,
t                 463 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 						       t->nftids, 2);
t                 467 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			bitmap_release_region(t->ftid_bmap, ftid, 2);
t                 471 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	spin_unlock_bh(&t->ftid_lock);
t                 475 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
t                 478 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	spin_lock_bh(&t->ftid_lock);
t                 480 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	if (test_bit(fidx, t->ftid_bmap)) {
t                 481 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		spin_unlock_bh(&t->ftid_lock);
t                 486 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		__set_bit(fidx, t->ftid_bmap);
t                 489 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			bitmap_allocate_region(t->ftid_bmap, fidx, 2);
t                 491 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			bitmap_allocate_region(t->ftid_bmap, fidx, 1);
t                 494 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	spin_unlock_bh(&t->ftid_lock);
t                 498 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
t                 501 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	spin_lock_bh(&t->ftid_lock);
t                 503 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		__clear_bit(fidx, t->ftid_bmap);
t                 506 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			bitmap_release_region(t->ftid_bmap, fidx, 2);
t                 508 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			bitmap_release_region(t->ftid_bmap, fidx, 1);
t                 510 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	spin_unlock_bh(&t->ftid_lock);
t                1097 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct tid_info *t = &adapter->tids;
t                1151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	atid = cxgb4_alloc_atid(t, f);
t                1225 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	cxgb4_free_atid(t, atid);
t                1454 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct tid_info *t = &adapter->tids;
t                1471 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	f = lookup_tid(t, filter_id);
t                1674 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct tid_info *t = &adap->tids;
t                1682 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	f = lookup_tid(t, tid);
t                1691 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	cxgb4_remove_tid(t, 0, tid, 0);
t                1703 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct tid_info *t = &adap->tids;
t                1711 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	f = lookup_atid(t, ftid);
t                1725 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		cxgb4_insert_tid(t, f, f->tid, 0);
t                1726 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		cxgb4_free_atid(t, ftid);
t                1733 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			cxgb4_remove_tid(t, 0, tid, 0);
t                1755 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		cxgb4_free_atid(t, ftid);
t                1000 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
t                1018 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		for (j = 0; j < pi->nqsets; j++, t++, q++) {
t                1019 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			err = t4_sge_alloc_eth_txq(adap, t, dev,
t                1215 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c int cxgb4_alloc_atid(struct tid_info *t, void *data)
t                1219 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_bh(&t->atid_lock);
t                1220 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (t->afree) {
t                1221 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		union aopen_entry *p = t->afree;
t                1223 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		atid = (p - t->atid_tab) + t->atid_base;
t                1224 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->afree = p->next;
t                1226 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->atids_in_use++;
t                1228 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_unlock_bh(&t->atid_lock);
t                1236 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
t                1238 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
t                1240 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_bh(&t->atid_lock);
t                1241 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p->next = t->afree;
t                1242 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->afree = p;
t                1243 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->atids_in_use--;
t                1244 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_unlock_bh(&t->atid_lock);
t                1251 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
t                1255 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_bh(&t->stid_lock);
t                1257 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
t                1258 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (stid < t->nstids)
t                1259 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			__set_bit(stid, t->stid_bmap);
t                1263 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
t                1268 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->stid_tab[stid].data = data;
t                1269 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid += t->stid_base;
t                1275 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->stids_in_use += 2;
t                1276 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->v6_stids_in_use += 2;
t                1278 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->stids_in_use++;
t                1281 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_unlock_bh(&t->stid_lock);
t                1288 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
t                1292 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_bh(&t->stid_lock);
t                1294 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid = find_next_zero_bit(t->stid_bmap,
t                1295 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				t->nstids + t->nsftids, t->nstids);
t                1296 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (stid < (t->nstids + t->nsftids))
t                1297 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			__set_bit(stid, t->stid_bmap);
t                1304 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->stid_tab[stid].data = data;
t                1305 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid -= t->nstids;
t                1306 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid += t->sftid_base;
t                1307 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->sftids_in_use++;
t                1309 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_unlock_bh(&t->stid_lock);
t                1316 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
t                1319 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (t->nsftids && (stid >= t->sftid_base)) {
t                1320 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid -= t->sftid_base;
t                1321 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid += t->nstids;
t                1323 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		stid -= t->stid_base;
t                1326 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_bh(&t->stid_lock);
t                1328 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		__clear_bit(stid, t->stid_bmap);
t                1330 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		bitmap_release_region(t->stid_bmap, stid, 1);
t                1331 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->stid_tab[stid].data = NULL;
t                1332 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (stid < t->nstids) {
t                1334 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->stids_in_use -= 2;
t                1335 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->v6_stids_in_use -= 2;
t                1337 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->stids_in_use--;
t                1340 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->sftids_in_use--;
t                1343 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_unlock_bh(&t->stid_lock);
t                1365 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
t                1368 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	void **p = &t->tid_tab[tid];
t                1369 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct adapter *adap = container_of(t, struct adapter, tids);
t                1418 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
t                1422 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct adapter *adap = container_of(t, struct adapter, tids);
t                1424 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	WARN_ON(tid >= t->ntids);
t                1426 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (t->tid_tab[tid]) {
t                1427 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->tid_tab[tid] = NULL;
t                1428 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		atomic_dec(&t->conns_in_use);
t                1429 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (t->hash_base && (tid >= t->hash_base)) {
t                1431 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				atomic_sub(2, &t->hash_tids_in_use);
t                1433 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				atomic_dec(&t->hash_tids_in_use);
t                1436 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				atomic_sub(2, &t->tids_in_use);
t                1438 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				atomic_dec(&t->tids_in_use);
t                1447 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		cxgb4_queue_tid_release(t, chan, tid);
t                1454 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int tid_init(struct tid_info *t)
t                1456 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct adapter *adap = container_of(t, struct adapter, tids);
t                1457 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	unsigned int max_ftids = t->nftids + t->nsftids;
t                1458 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	unsigned int natids = t->natids;
t                1463 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
t                1464 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	ftid_bmap_size = BITS_TO_LONGS(t->nftids);
t                1465 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	size = t->ntids * sizeof(*t->tid_tab) +
t                1466 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	       natids * sizeof(*t->atid_tab) +
t                1467 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	       t->nstids * sizeof(*t->stid_tab) +
t                1468 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	       t->nsftids * sizeof(*t->stid_tab) +
t                1470 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	       max_ftids * sizeof(*t->ftid_tab) +
t                1473 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->tid_tab = kvzalloc(size, GFP_KERNEL);
t                1474 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (!t->tid_tab)
t                1477 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t                1478 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t                1479 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t                1480 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
t                1481 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
t                1482 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_init(&t->stid_lock);
t                1483 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_init(&t->atid_lock);
t                1484 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	spin_lock_init(&t->ftid_lock);
t                1486 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->stids_in_use = 0;
t                1487 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->v6_stids_in_use = 0;
t                1488 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->sftids_in_use = 0;
t                1489 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->afree = NULL;
t                1490 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->atids_in_use = 0;
t                1491 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	atomic_set(&t->tids_in_use, 0);
t                1492 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	atomic_set(&t->conns_in_use, 0);
t                1493 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	atomic_set(&t->hash_tids_in_use, 0);
t                1498 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t                1499 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->afree = t->atid_tab;
t                1503 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
t                1505 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		if (!t->stid_base &&
t                1507 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			__set_bit(0, t->stid_bmap);
t                1510 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	bitmap_zero(t->ftid_bmap, t->nftids);
t                 779 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c static void ch_flower_stats_cb(struct timer_list *t)
t                 781 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 	struct adapter *adap = from_timer(adap, t, flower_stats_timer);
t                 154 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	struct cxgb4_tc_u32_table *t;
t                 177 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	t = adapter->tc_u32;
t                 184 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (uhtid != 0x800 && uhtid >= t->size)
t                 188 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (link_uhtid >= t->size)
t                 203 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		if (!t->table[uhtid - 1].link_handle)
t                 207 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		link_start = t->table[uhtid - 1].match_field;
t                 222 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		if (t->table[link_uhtid - 1].link_handle) {
t                 269 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 			link = &t->table[link_uhtid - 1];
t                 287 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
t                 289 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
t                 333 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
t                 334 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		set_bit(filter_id, t->table[uhtid - 1].tid_map);
t                 345 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	struct cxgb4_tc_u32_table *t;
t                 362 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	t = adapter->tc_u32;
t                 369 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (uhtid != 0x800 && uhtid >= t->size)
t                 374 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		link = &t->table[uhtid - 1];
t                 393 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	for (i = 0; i < t->size; i++) {
t                 394 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		link = &t->table[i];
t                 422 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	struct cxgb4_tc_u32_table *t;
t                 429 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	t = adap->tc_u32;
t                 430 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	for (i = 0; i < t->size; i++) {
t                 431 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		struct cxgb4_link *link = &t->table[i];
t                 441 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	struct cxgb4_tc_u32_table *t;
t                 447 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL);
t                 448 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (!t)
t                 451 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	t->size = max_tids;
t                 453 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	for (i = 0; i < t->size; i++) {
t                 454 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		struct cxgb4_link *link = &t->table[i];
t                 465 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	return t;
t                 468 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	for (i = 0; i < t->size; i++) {
t                 469 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		struct cxgb4_link *link = &t->table[i];
t                 475 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 	if (t)
t                 476 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c 		kvfree(t);
t                 138 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
t                 140 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	return tid < t->ntids ? t->tid_tab[tid] : NULL;
t                 143 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
t                 145 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	return atid < t->natids ? t->atid_tab[atid].data : NULL;
t                 148 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
t                 151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	if (t->nsftids && (stid >= t->sftid_base)) {
t                 152 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 		stid -= t->sftid_base;
t                 153 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 		stid += t->nstids;
t                 155 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 		stid -= t->stid_base;
t                 158 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
t                 161 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
t                 164 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	t->tid_tab[tid] = data;
t                 165 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	if (t->hash_base && (tid >= t->hash_base)) {
t                 167 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			atomic_add(2, &t->hash_tids_in_use);
t                 169 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			atomic_inc(&t->hash_tids_in_use);
t                 172 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			atomic_add(2, &t->tids_in_use);
t                 174 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			atomic_inc(&t->tids_in_use);
t                 176 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	atomic_inc(&t->conns_in_use);
t                 179 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h int cxgb4_alloc_atid(struct tid_info *t, void *data);
t                 180 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
t                 181 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
t                 182 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
t                 183 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
t                 184 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
t                3384 drivers/net/ethernet/chelsio/cxgb4/sge.c static void sge_rx_timer_cb(struct timer_list *t)
t                3388 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct adapter *adap = from_timer(adap, t, sge.rx_timer);
t                3421 drivers/net/ethernet/chelsio/cxgb4/sge.c static void sge_tx_timer_cb(struct timer_list *t)
t                3423 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct adapter *adap = from_timer(adap, t, sge.tx_timer);
t                2066 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void sge_rx_timer_cb(struct timer_list *t)
t                2068 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
t                2125 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void sge_tx_timer_cb(struct timer_list *t)
t                2127 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
t                 126 drivers/net/ethernet/cisco/enic/enic_clsf.c void enic_flow_may_expire(struct timer_list *t)
t                 128 drivers/net/ethernet/cisco/enic/enic_clsf.c 	struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
t                  19 drivers/net/ethernet/cisco/enic/enic_clsf.h void enic_flow_may_expire(struct timer_list *t);
t                1721 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_notify_timer(struct timer_list *t)
t                1723 drivers/net/ethernet/cisco/enic/enic_main.c 	struct enic *enic = from_timer(enic, t, notify_timer);
t                 336 drivers/net/ethernet/dec/tulip/de2104x.c static void de21040_media_timer (struct timer_list *t);
t                 337 drivers/net/ethernet/dec/tulip/de2104x.c static void de21041_media_timer (struct timer_list *t);
t                 962 drivers/net/ethernet/dec/tulip/de2104x.c static void de21040_media_timer (struct timer_list *t)
t                 964 drivers/net/ethernet/dec/tulip/de2104x.c 	struct de_private *de = from_timer(de, t, media_timer);
t                1043 drivers/net/ethernet/dec/tulip/de2104x.c static void de21041_media_timer (struct timer_list *t)
t                1045 drivers/net/ethernet/dec/tulip/de2104x.c 	struct de_private *de = from_timer(de, t, media_timer);
t                 915 drivers/net/ethernet/dec/tulip/de4x5.c static void    de4x5_ast(struct timer_list *t);
t                1743 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_ast(struct timer_list *t)
t                1745 drivers/net/ethernet/dec/tulip/de4x5.c 	struct de4x5_private *lp = from_timer(lp, t, timer);
t                5183 drivers/net/ethernet/dec/tulip/de4x5.c     char *p, *q, t;
t                5192 drivers/net/ethernet/dec/tulip/de4x5.c 	t = *q;
t                5216 drivers/net/ethernet/dec/tulip/de4x5.c 	*q = t;
t                1123 drivers/net/ethernet/dec/tulip/dmfe.c static void dmfe_timer(struct timer_list *t)
t                1125 drivers/net/ethernet/dec/tulip/dmfe.c 	struct dmfe_board_info *db = from_timer(db, t, timer);
t                 105 drivers/net/ethernet/dec/tulip/interrupt.c void oom_timer(struct timer_list *t)
t                 107 drivers/net/ethernet/dec/tulip/interrupt.c 	struct tulip_private *tp = from_timer(tp, t, oom_timer);
t                  87 drivers/net/ethernet/dec/tulip/pnic.c void pnic_timer(struct timer_list *t)
t                  89 drivers/net/ethernet/dec/tulip/pnic.c 	struct tulip_private *tp = from_timer(tp, t, timer);
t                  79 drivers/net/ethernet/dec/tulip/pnic2.c void pnic2_timer(struct timer_list *t)
t                  81 drivers/net/ethernet/dec/tulip/pnic2.c 	struct tulip_private *tp = from_timer(tp, t, timer);
t                 140 drivers/net/ethernet/dec/tulip/timer.c void mxic_timer(struct timer_list *t)
t                 142 drivers/net/ethernet/dec/tulip/timer.c 	struct tulip_private *tp = from_timer(tp, t, timer);
t                 157 drivers/net/ethernet/dec/tulip/timer.c void comet_timer(struct timer_list *t)
t                 159 drivers/net/ethernet/dec/tulip/timer.c 	struct tulip_private *tp = from_timer(tp, t, timer);
t                 479 drivers/net/ethernet/dec/tulip/tulip.h void pnic2_timer(struct timer_list *t);
t                 507 drivers/net/ethernet/dec/tulip/tulip.h void pnic_timer(struct timer_list *t);
t                 511 drivers/net/ethernet/dec/tulip/tulip.h void mxic_timer(struct timer_list *t);
t                 512 drivers/net/ethernet/dec/tulip/tulip.h void comet_timer(struct timer_list *t);
t                 519 drivers/net/ethernet/dec/tulip/tulip.h void oom_timer(struct timer_list *t);
t                 126 drivers/net/ethernet/dec/tulip/tulip_core.c static void tulip_timer(struct timer_list *t)
t                 128 drivers/net/ethernet/dec/tulip/tulip_core.c 	struct tulip_private *tp = from_timer(tp, t, timer);
t                 236 drivers/net/ethernet/dec/tulip/uli526x.c static void uli526x_timer(struct timer_list *t);
t                1016 drivers/net/ethernet/dec/tulip/uli526x.c static void uli526x_timer(struct timer_list *t)
t                1018 drivers/net/ethernet/dec/tulip/uli526x.c 	struct uli526x_board_info *db = from_timer(db, t, timer);
t                 330 drivers/net/ethernet/dec/tulip/winbond-840.c static void netdev_timer(struct timer_list *t);
t                 750 drivers/net/ethernet/dec/tulip/winbond-840.c 		int t;
t                 752 drivers/net/ethernet/dec/tulip/winbond-840.c 		t = (csr5 >> 17) & 0x07;
t                 753 drivers/net/ethernet/dec/tulip/winbond-840.c 		if (t==0||t==1) {
t                 755 drivers/net/ethernet/dec/tulip/winbond-840.c 			t = (csr5 >> 20) & 0x07;
t                 756 drivers/net/ethernet/dec/tulip/winbond-840.c 			if (t==0||t==1)
t                 775 drivers/net/ethernet/dec/tulip/winbond-840.c static void netdev_timer(struct timer_list *t)
t                 777 drivers/net/ethernet/dec/tulip/winbond-840.c 	struct netdev_private *np = from_timer(np, t, timer);
t                  68 drivers/net/ethernet/dlink/dl2k.c static void rio_timer (struct timer_list *t);
t                 655 drivers/net/ethernet/dlink/dl2k.c rio_timer (struct timer_list *t)
t                 657 drivers/net/ethernet/dlink/dl2k.c 	struct netdev_private *np = from_timer(np, t, timer);
t                 434 drivers/net/ethernet/dlink/sundance.c static void netdev_timer(struct timer_list *t);
t                 954 drivers/net/ethernet/dlink/sundance.c static void netdev_timer(struct timer_list *t)
t                 956 drivers/net/ethernet/dlink/sundance.c 	struct netdev_private *np = from_timer(np, t, timer);
t                 429 drivers/net/ethernet/fealnx.c static void netdev_timer(struct timer_list *t);
t                 430 drivers/net/ethernet/fealnx.c static void reset_timer(struct timer_list *t);
t                1081 drivers/net/ethernet/fealnx.c static void netdev_timer(struct timer_list *t)
t                1083 drivers/net/ethernet/fealnx.c 	struct netdev_private *np = from_timer(np, t, timer);
t                1170 drivers/net/ethernet/fealnx.c static void reset_timer(struct timer_list *t)
t                1172 drivers/net/ethernet/fealnx.c 	struct netdev_private *np = from_timer(np, t, reset_timer);
t                 106 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_BDR(t, i, r)	(0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
t                 330 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_bdr_rd(hw, t, n, off) \
t                 331 drivers/net/ethernet/freescale/enetc/enetc_hw.h 				enetc_rd(hw, ENETC_BDR(t, n, off))
t                 332 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_bdr_wr(hw, t, n, off, val) \
t                 333 drivers/net/ethernet/freescale/enetc/enetc_hw.h 				enetc_wr(hw, ENETC_BDR(t, n, off), val)
t                2075 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void hns_nic_service_timer(struct timer_list *t)
t                2077 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
t                3810 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c static void hclge_reset_timer(struct timer_list *t)
t                3812 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
t                1738 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_service_timer(struct timer_list *t)
t                1740 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
t                1837 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_keep_alive_timer(struct timer_list *t)
t                1839 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
t                1688 drivers/net/ethernet/intel/e100.c static void e100_watchdog(struct timer_list *t)
t                1690 drivers/net/ethernet/intel/e100.c 	struct nic *nic = from_timer(nic, t, watchdog);
t                4821 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_update_phy_info(struct timer_list *t)
t                4823 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
t                5157 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_watchdog(struct timer_list *t)
t                5159 drivers/net/ethernet/intel/e1000e/netdev.c 	struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
t                 201 drivers/net/ethernet/intel/fm10k/fm10k_pci.c static void fm10k_service_timer(struct timer_list *t)
t                 203 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	struct fm10k_intfc *interface = from_timer(interface, t,
t                10514 drivers/net/ethernet/intel/i40e/i40e_main.c static void i40e_service_timer(struct timer_list *t)
t                10516 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_pf *pf = from_timer(pf, t, service_timer);
t                 899 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 	hw->blk[blk].xlt1.t[ptype] = ptg;
t                1115 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 	hw->blk[blk].xlt2.t[vsi] = vsig;
t                1175 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		ptg = hw->blk[blk].xlt1.t[pt];
t                1195 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		vsig = hw->blk[blk].xlt2.t[vsi];
t                1268 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].xlt1.t);
t                1269 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 			dst = hw->blk[block_id].xlt1.t;
t                1271 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].xlt1.t);
t                1281 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].xlt2.t);
t                1282 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 			dst = (u8 *)hw->blk[block_id].xlt2.t;
t                1284 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].xlt2.t);
t                1294 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].prof.t);
t                1295 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 			dst = (u8 *)hw->blk[block_id].prof.t;
t                1297 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].prof.t);
t                1307 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].prof_redir.t);
t                1308 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 			dst = hw->blk[block_id].prof_redir.t;
t                1310 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].prof_redir.t);
t                1321 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].es.t);
t                1322 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 			dst = (u8 *)hw->blk[block_id].es.t;
t                1325 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				sizeof(*hw->blk[block_id].es.t);
t                1389 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
t                1390 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
t                1393 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
t                1394 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
t                1395 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
t                1421 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
t                1426 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
t                1428 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		memset(prof->t, 0, prof->count * sizeof(*prof->t));
t                1429 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		memset(prof_redir->t, 0,
t                1430 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		       prof_redir->count * sizeof(*prof_redir->t));
t                1432 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		memset(es->t, 0, es->count * sizeof(*es->t));
t                1478 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
t                1479 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				       sizeof(*xlt1->t), GFP_KERNEL);
t                1480 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		if (!xlt1->t)
t                1501 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
t                1502 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				       sizeof(*xlt2->t), GFP_KERNEL);
t                1503 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		if (!xlt2->t)
t                1510 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
t                1511 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				       sizeof(*prof->t), GFP_KERNEL);
t                1513 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		if (!prof->t)
t                1518 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
t                1520 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 					     sizeof(*prof_redir->t),
t                1523 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		if (!prof_redir->t)
t                1529 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		es->t = devm_kcalloc(ice_hw_to_dev(hw),
t                1531 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 				     sizeof(*es->t), GFP_KERNEL);
t                1532 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		if (!es->t)
t                 257 drivers/net/ethernet/intel/ice/ice_flex_type.h 	struct ice_fv_word *t;
t                 303 drivers/net/ethernet/intel/ice/ice_flex_type.h 	u8 *t;
t                 327 drivers/net/ethernet/intel/ice/ice_flex_type.h 	u16 *t;
t                 353 drivers/net/ethernet/intel/ice/ice_flex_type.h 	struct ice_prof_tcam_entry *t;
t                 358 drivers/net/ethernet/intel/ice/ice_flex_type.h 	u8 *t;
t                1209 drivers/net/ethernet/intel/ice/ice_main.c static void ice_service_timer(struct timer_list *t)
t                1211 drivers/net/ethernet/intel/ice/ice_main.c 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
t                5123 drivers/net/ethernet/intel/igb/igb_main.c static void igb_update_phy_info(struct timer_list *t)
t                5125 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
t                5213 drivers/net/ethernet/intel/igb/igb_main.c static void igb_watchdog(struct timer_list *t)
t                5215 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
t                1898 drivers/net/ethernet/intel/igbvf/netdev.c static void igbvf_watchdog(struct timer_list *t)
t                1900 drivers/net/ethernet/intel/igbvf/netdev.c 	struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
t                2843 drivers/net/ethernet/intel/igc/igc_main.c static void igc_update_phy_info(struct timer_list *t)
t                2845 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
t                2893 drivers/net/ethernet/intel/igc/igc_main.c static void igc_watchdog(struct timer_list *t)
t                2895 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
t                  61 drivers/net/ethernet/intel/ixgb/ixgb_main.c static void ixgb_watchdog(struct timer_list *t);
t                1120 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_watchdog(struct timer_list *t)
t                1122 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
t                 320 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		struct tx_sa *t = &ipsec->tx_tbl[i];
t                 331 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		if (t->used) {
t                 332 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 			if (t->mode & IXGBE_RXTXMOD_VF)
t                 333 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 				ixgbe_ipsec_del_sa(t->xs);
t                 335 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 				ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
t                7574 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			u32 h, t;
t                7577 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
t                7579 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (h != t)
t                7816 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_service_timer(struct timer_list *t)
t                7818 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
t                 113 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		struct tx_sa *t = &ipsec->tx_tbl[i];
t                 123 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		if (t->used) {
t                 124 drivers/net/ethernet/intel/ixgbevf/ipsec.c 			ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
t                3144 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_service_timer(struct timer_list *t)
t                3146 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_adapter *adapter = from_timer(adapter, t,
t                 656 drivers/net/ethernet/korina.c static void korina_poll_media(struct timer_list *t)
t                 658 drivers/net/ethernet/korina.c 	struct korina_private *lp = from_timer(lp, t, media_check_timer);
t                1337 drivers/net/ethernet/marvell/mv643xx_eth.c static void mib_counters_timer_wrapper(struct timer_list *t)
t                1339 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
t                2305 drivers/net/ethernet/marvell/mv643xx_eth.c static inline void oom_timer_wrapper(struct timer_list *t)
t                2307 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
t                 354 drivers/net/ethernet/marvell/pxa168_eth.c static inline void rxq_refill_timer_wrapper(struct timer_list *t)
t                 356 drivers/net/ethernet/marvell/pxa168_eth.c 	struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
t                1488 drivers/net/ethernet/marvell/skge.c static void xm_link_timer(struct timer_list *t)
t                1490 drivers/net/ethernet/marvell/skge.c 	struct skge_port *skge = from_timer(skge, t, link_timer);
t                3703 drivers/net/ethernet/marvell/skge.c 		const struct skge_tx_desc *t = e->desc;
t                3705 drivers/net/ethernet/marvell/skge.c 			   t->control, t->dma_hi, t->dma_lo, t->status,
t                3706 drivers/net/ethernet/marvell/skge.c 			   t->csum_offs, t->csum_write, t->csum_start);
t                2960 drivers/net/ethernet/marvell/sky2.c static void sky2_watchdog(struct timer_list *t)
t                2962 drivers/net/ethernet/marvell/sky2.c 	struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
t                 580 drivers/net/ethernet/mellanox/mlx4/alloc.c 	dma_addr_t t;
t                 586 drivers/net/ethernet/mellanox/mlx4/alloc.c 		dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
t                 591 drivers/net/ethernet/mellanox/mlx4/alloc.c 	buf->direct.map = t;
t                 593 drivers/net/ethernet/mellanox/mlx4/alloc.c 	while (t & ((1 << buf->page_shift) - 1)) {
t                 612 drivers/net/ethernet/mellanox/mlx4/alloc.c 		dma_addr_t t;
t                 627 drivers/net/ethernet/mellanox/mlx4/alloc.c 						   PAGE_SIZE, &t, GFP_KERNEL);
t                 631 drivers/net/ethernet/mellanox/mlx4/alloc.c 			buf->page_list[i].map = t;
t                 232 drivers/net/ethernet/mellanox/mlx4/catas.c static void poll_catas(struct timer_list *t)
t                 234 drivers/net/ethernet/mellanox/mlx4/catas.c 	struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
t                  52 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	int i, t;
t                  55 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                  56 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                  57 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
t                  58 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
t                  61 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 							   priv->tx_cq[t][i]);
t                1922 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		int t;
t                1927 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
t                1928 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 				for (i = 0; i < priv->tx_ring_num[t]; i++)
t                1930 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 						priv->tx_ring[t][i]->bf_alloced;
t                1942 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
t                1943 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			for (i = 0; i < priv->tx_ring_num[t]; i++)
t                1944 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 				priv->tx_ring[t][i]->bf_enabled =
t                1405 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                1430 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                1431 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                1432 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			cq = priv->tx_cq[t][i];
t                1624 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                1709 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                1710 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		u8 num_tx_rings_p_up = t == TX ?
t                1711 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			priv->num_tx_rings_p_up : priv->tx_ring_num[t];
t                1713 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                1715 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			cq = priv->tx_cq[t][i];
t                1732 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			tx_ring = priv->tx_ring[t][i];
t                1741 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (t != TX_XDP) {
t                1843 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (t == MLX4_EN_NUM_TX_TYPES) {
t                1844 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		t--;
t                1845 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		i = priv->tx_ring_num[t];
t                1847 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	while (t >= 0) {
t                1849 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
t                1850 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
t                1852 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!t--)
t                1854 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		i = priv->tx_ring_num[t];
t                1879 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                1969 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                1970 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                1971 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
t                1972 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
t                1977 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
t                1978 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++)
t                1979 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
t                2111 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                2117 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                2118 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                2119 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (priv->tx_ring[t] && priv->tx_ring[t][i])
t                2121 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 							&priv->tx_ring[t][i]);
t                2122 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (priv->tx_cq[t] && priv->tx_cq[t][i])
t                2123 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
t                2125 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		kfree(priv->tx_ring[t]);
t                2126 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		kfree(priv->tx_cq[t]);
t                2142 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                2146 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                2147 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                2149 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
t                2150 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 					      prof->tx_ring_size, i, t, node))
t                2153 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
t                2190 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                2191 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (i = 0; i < priv->tx_ring_num[t]; i++) {
t                2192 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (priv->tx_ring[t][i])
t                2194 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 							&priv->tx_ring[t][i]);
t                2195 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			if (priv->tx_cq[t][i])
t                2196 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
t                2207 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int t;
t                2221 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                2222 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_ring_num[t] = prof->tx_ring_num[t];
t                2223 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!dst->tx_ring_num[t])
t                2226 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
t                2229 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!dst->tx_ring[t])
t                2232 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
t                2235 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!dst->tx_cq[t]) {
t                2236 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			kfree(dst->tx_ring[t]);
t                2244 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	while (t--) {
t                2245 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		kfree(dst->tx_ring[t]);
t                2246 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		kfree(dst->tx_cq[t]);
t                2254 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int t;
t                2261 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                2262 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_ring_num[t] = src->tx_ring_num[t];
t                2263 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_ring[t] = src->tx_ring[t];
t                2264 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		dst->tx_cq[t] = src->tx_cq[t];
t                2277 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                2285 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                2286 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			kfree(tmp->tx_ring[t]);
t                2287 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			kfree(tmp->tx_cq[t]);
t                3243 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int i, t;
t                3291 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
t                3292 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		priv->tx_ring_num[t] = prof->tx_ring_num[t];
t                3293 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!priv->tx_ring_num[t])
t                3296 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
t                3299 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!priv->tx_ring[t]) {
t                3303 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
t                3306 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!priv->tx_cq[t]) {
t                 977 drivers/net/ethernet/mellanox/mlx4/eq.c 	dma_addr_t t;
t                1009 drivers/net/ethernet/mellanox/mlx4/eq.c 							  PAGE_SIZE, &t,
t                1014 drivers/net/ethernet/mellanox/mlx4/eq.c 		dma_list[i] = t;
t                1015 drivers/net/ethernet/mellanox/mlx4/eq.c 		eq->page_list[i].map = t;
t                 505 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	int t;
t                 514 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
t                 516 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				       slave_list[i].res_list[t]);
t                 552 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		for (t = 0; t < dev->persist->num_vfs + 1; t++) {
t                 554 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				mlx4_get_active_ports(dev, t);
t                 558 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 						      t, dev->caps.num_qps -
t                 564 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 						      t, dev->caps.num_cqs -
t                 569 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 						      t, dev->caps.num_srqs -
t                 574 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 						      t, dev->caps.num_mpts -
t                 579 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 						      t, dev->caps.num_mtts -
t                 583 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				if (t == mlx4_master_func_num(dev)) {
t                 598 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->quota[t] =
t                 601 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->guaranteed[t] = 2;
t                 606 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
t                 607 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->guaranteed[t] = 2;
t                 611 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				if (t == mlx4_master_func_num(dev)) {
t                 612 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
t                 613 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
t                 616 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 							res_alloc->quota[t];
t                 618 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
t                 619 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					res_alloc->guaranteed[t] = 0;
t                 623 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				res_alloc->quota[t] = dev->caps.max_counters;
t                 624 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				res_alloc->guaranteed[t] =
t                 625 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 					mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
t                 634 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 							res_alloc->guaranteed[t];
t                 636 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 				res_alloc->res_reserved += res_alloc->guaranteed[t];
t                 852 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
t                 854 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	switch (t) {
t                  77 drivers/net/ethernet/mellanox/mlx5/core/alloc.c 	dma_addr_t t;
t                  88 drivers/net/ethernet/mellanox/mlx5/core/alloc.c 							  &t, node);
t                  92 drivers/net/ethernet/mellanox/mlx5/core/alloc.c 	buf->frags->map = t;
t                  94 drivers/net/ethernet/mellanox/mlx5/core/alloc.c 	while (t & ((1 << buf->page_shift) - 1)) {
t                  15 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h 	struct mlx5_flow_table		*t;
t                  28 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h 	struct mlx5_flow_table *t;
t                 134 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
t                 166 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
t                 199 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
t                 283 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                 292 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                 323 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
t                 324 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	if (IS_ERR(ft->t)) {
t                 325 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		err = PTR_ERR(ft->t);
t                 326 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		ft->t = NULL;
t                 484 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	ft = arfs_table->ft.t;
t                 165 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
t                 172 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	dest.ft = priv->fs.l2.ft.t;
t                 666 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	mlx5_destroy_flow_table(ft->t);
t                 667 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = NULL;
t                 865 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft = ttc->ft.t;
t                 886 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	dest.ft   = params->inner_ttc->ft.t;
t                 938 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                 948 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                 958 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1023 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft = ttc->ft.t;
t                1075 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1085 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1095 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1146 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
t                1147 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (IS_ERR(ft->t)) {
t                1148 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		err = PTR_ERR(ft->t);
t                1149 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		ft->t = NULL;
t                1192 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
t                1193 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (IS_ERR(ft->t)) {
t                1194 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		err = PTR_ERR(ft->t);
t                1195 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		ft->t = NULL;
t                1225 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
t                1243 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	dest.ft = priv->fs.ttc.ft.t;
t                1308 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1319 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1330 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1365 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
t                1366 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (IS_ERR(ft->t)) {
t                1367 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		err = PTR_ERR(ft->t);
t                1368 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		ft->t = NULL;
t                1379 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	mlx5_destroy_flow_table(ft->t);
t                1380 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = NULL;
t                1409 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1421 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1432 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1443 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
t                1486 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
t                1488 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (IS_ERR(ft->t)) {
t                1489 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		err = PTR_ERR(ft->t);
t                1490 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		ft->t = NULL;
t                1510 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	mlx5_destroy_flow_table(ft->t);
t                1511 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	ft->t = NULL;
t                 279 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		new_dest.ft = priv->fs.ttc.ft.t;
t                 614 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		   hp->num_channels, hp->ttc.ft.t->id);
t                 865 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
t                 932 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		dest[dest_ix].ft = priv->fs.vlan.ft.t;
t                 956 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
t                 968 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		priv->fs.tc.t =
t                 974 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		if (IS_ERR(priv->fs.tc.t)) {
t                 980 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			return PTR_ERR(priv->fs.tc.t);
t                 987 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
t                1006 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
t                1007 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_destroy_flow_table(priv->fs.tc.t);
t                1008 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		priv->fs.tc.t = NULL;
t                4151 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!IS_ERR_OR_NULL(tc->t)) {
t                4152 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_destroy_flow_table(tc->t);
t                4153 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		tc->t = NULL;
t                 700 drivers/net/ethernet/mellanox/mlx5/core/health.c static void poll_health(struct timer_list *t)
t                 702 drivers/net/ethernet/mellanox/mlx5/core/health.c 	struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
t                 403 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	struct ip6_tnl *t = netdev_priv(to_dev);
t                 404 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	struct flowi6 fl6 = t->fl.u.ip6;
t                 412 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	fl6.flowi6_mark = t->parms.fwmark;
t                 413 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
t                 416 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	dst = ip6_route_output(t->net, NULL, &fl6);
t                6681 drivers/net/ethernet/micrel/ksz884x.c static void mib_monitor(struct timer_list *t)
t                6683 drivers/net/ethernet/micrel/ksz884x.c 	struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
t                6708 drivers/net/ethernet/micrel/ksz884x.c static void dev_monitor(struct timer_list *t)
t                6710 drivers/net/ethernet/micrel/ksz884x.c 	struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
t                  42 drivers/net/ethernet/microchip/encx24j600-regmap.c 	struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
t                  45 drivers/net/ethernet/microchip/encx24j600-regmap.c 	spi_message_add_tail(&t[0], &m);
t                  46 drivers/net/ethernet/microchip/encx24j600-regmap.c 	spi_message_add_tail(&t[1], &m);
t                 124 drivers/net/ethernet/microchip/encx24j600-regmap.c 	struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), },
t                 162 drivers/net/ethernet/microchip/encx24j600-regmap.c 	spi_message_add_tail(&t[0], &m);
t                 165 drivers/net/ethernet/microchip/encx24j600-regmap.c 		t[1].tx_buf = &reg;
t                 166 drivers/net/ethernet/microchip/encx24j600-regmap.c 		spi_message_add_tail(&t[1], &m);
t                 169 drivers/net/ethernet/microchip/encx24j600-regmap.c 	spi_message_add_tail(&t[2], &m);
t                3500 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static void myri10ge_watchdog_timer(struct timer_list *t)
t                3508 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	mgp = from_timer(mgp, t, watchdog_timer);
t                 613 drivers/net/ethernet/natsemi/natsemi.c static void netdev_timer(struct timer_list *t);
t                1790 drivers/net/ethernet/natsemi/natsemi.c static void netdev_timer(struct timer_list *t)
t                1792 drivers/net/ethernet/natsemi/natsemi.c 	struct netdev_private *np = from_timer(np, t, timer);
t                1589 drivers/net/ethernet/natsemi/ns83820.c static void ns83820_tx_watch(struct timer_list *t)
t                1591 drivers/net/ethernet/natsemi/ns83820.c 	struct ns83820 *dev = from_timer(dev, t, tx_watchdog);
t                4186 drivers/net/ethernet/neterion/s2io.c s2io_alarm_handle(struct timer_list *t)
t                4188 drivers/net/ethernet/neterion/s2io.c 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
t                1077 drivers/net/ethernet/neterion/s2io.h static void s2io_alarm_handle(struct timer_list *t);
t                2588 drivers/net/ethernet/neterion/vxge/vxge-main.c static void vxge_poll_vp_reset(struct timer_list *t)
t                2590 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
t                2607 drivers/net/ethernet/neterion/vxge/vxge-main.c static void vxge_poll_vp_lockup(struct timer_list *t)
t                2609 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
t                 176 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_reconfig_timer(struct timer_list *t)
t                 178 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
t                1911 drivers/net/ethernet/nvidia/forcedeth.c static void nv_do_rx_refill(struct timer_list *t)
t                1913 drivers/net/ethernet/nvidia/forcedeth.c 	struct fe_priv *np = from_timer(np, t, oom_kick);
t                4121 drivers/net/ethernet/nvidia/forcedeth.c static void nv_do_nic_poll(struct timer_list *t)
t                4123 drivers/net/ethernet/nvidia/forcedeth.c 	struct fe_priv *np = from_timer(np, t, nic_poll);
t                4238 drivers/net/ethernet/nvidia/forcedeth.c static void nv_do_stats_poll(struct timer_list *t)
t                4242 drivers/net/ethernet/nvidia/forcedeth.c 	struct fe_priv *np = from_timer(np, t, stats_poll);
t                1039 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c static void pch_gbe_watchdog(struct timer_list *t)
t                1041 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct pch_gbe_adapter *adapter = from_timer(adapter, t,
t                 416 drivers/net/ethernet/packetengines/hamachi.c static void hamachi_timer(struct timer_list *t);
t                 422 drivers/net/ethernet/packetengines/hamachi.c 	void (*media_timer)(struct timer_list *t);
t                 550 drivers/net/ethernet/packetengines/hamachi.c static void hamachi_timer(struct timer_list *t);
t                1020 drivers/net/ethernet/packetengines/hamachi.c static void hamachi_timer(struct timer_list *t)
t                1022 drivers/net/ethernet/packetengines/hamachi.c 	struct hamachi_private *hmp = from_timer(hmp, t, timer);
t                 346 drivers/net/ethernet/packetengines/yellowfin.c static void yellowfin_timer(struct timer_list *t);
t                 646 drivers/net/ethernet/packetengines/yellowfin.c static void yellowfin_timer(struct timer_list *t)
t                 648 drivers/net/ethernet/packetengines/yellowfin.c 	struct yellowfin_private *yp = from_timer(yp, t, timer);
t                 935 drivers/net/ethernet/pasemi/pasemi_mac.c static void pasemi_mac_tx_timer(struct timer_list *t)
t                 937 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
t                  63 drivers/net/ethernet/qlogic/qede/qede_filter.c 	void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
t                  66 drivers/net/ethernet/qlogic/qede/qede_filter.c 	void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
t                1481 drivers/net/ethernet/qlogic/qede/qede_filter.c static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
t                1485 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (t->eth_proto == htons(ETH_P_IP))
t                1490 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (t->ip_proto == IPPROTO_TCP)
t                1509 drivers/net/ethernet/qlogic/qede/qede_filter.c static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
t                1516 drivers/net/ethernet/qlogic/qede/qede_filter.c 	eth->h_proto = t->eth_proto;
t                1517 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ip->saddr = t->src_ipv4;
t                1518 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ip->daddr = t->dst_ipv4;
t                1521 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ip->protocol = t->ip_proto;
t                1522 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
t                1525 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ports[0] = t->src_port;
t                1526 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ports[1] = t->dst_port;
t                1529 drivers/net/ethernet/qlogic/qede/qede_filter.c static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
t                1532 drivers/net/ethernet/qlogic/qede/qede_filter.c 	const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
t                1536 drivers/net/ethernet/qlogic/qede/qede_filter.c 		 prefix, &t->src_ipv4, t->src_port,
t                1537 drivers/net/ethernet/qlogic/qede/qede_filter.c 		 &t->dst_ipv4, t->dst_port);
t                1556 drivers/net/ethernet/qlogic/qede/qede_filter.c static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
t                1563 drivers/net/ethernet/qlogic/qede/qede_filter.c 	eth->h_proto = t->eth_proto;
t                1564 drivers/net/ethernet/qlogic/qede/qede_filter.c 	memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
t                1565 drivers/net/ethernet/qlogic/qede/qede_filter.c 	memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
t                1568 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (t->ip_proto == IPPROTO_TCP) {
t                1577 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ports[0] = t->src_port;
t                1578 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ports[1] = t->dst_port;
t                1606 drivers/net/ethernet/qlogic/qede/qede_filter.c 					struct qede_arfs_tuple *t)
t                1611 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
t                1612 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
t                1613 drivers/net/ethernet/qlogic/qede/qede_filter.c 	} else if (!t->src_port && t->dst_port &&
t                1614 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   !t->src_ipv4 && !t->dst_ipv4) {
t                1615 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
t                1616 drivers/net/ethernet/qlogic/qede/qede_filter.c 	} else if (!t->src_port && !t->dst_port &&
t                1617 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   !t->dst_ipv4 && t->src_ipv4) {
t                1618 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
t                1619 drivers/net/ethernet/qlogic/qede/qede_filter.c 	} else if (!t->src_port && !t->dst_port &&
t                1620 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   t->dst_ipv4 && !t->src_ipv4) {
t                1621 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
t                1627 drivers/net/ethernet/qlogic/qede/qede_filter.c 	t->ip_comp = qede_flow_spec_ipv4_cmp;
t                1628 drivers/net/ethernet/qlogic/qede/qede_filter.c 	t->build_hdr = qede_flow_build_ipv4_hdr;
t                1629 drivers/net/ethernet/qlogic/qede/qede_filter.c 	t->stringify = qede_flow_stringify_ipv4_hdr;
t                1635 drivers/net/ethernet/qlogic/qede/qede_filter.c 					struct qede_arfs_tuple *t,
t                1641 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (t->src_port && t->dst_port &&
t                1642 drivers/net/ethernet/qlogic/qede/qede_filter.c 	    memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
t                1643 drivers/net/ethernet/qlogic/qede/qede_filter.c 	    memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
t                1644 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
t                1645 drivers/net/ethernet/qlogic/qede/qede_filter.c 	} else if (!t->src_port && t->dst_port &&
t                1646 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
t                1647 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
t                1648 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
t                1649 drivers/net/ethernet/qlogic/qede/qede_filter.c 	} else if (!t->src_port && !t->dst_port &&
t                1650 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
t                1651 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t                1652 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
t                1653 drivers/net/ethernet/qlogic/qede/qede_filter.c 	} else if (!t->src_port && !t->dst_port &&
t                1654 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
t                1655 drivers/net/ethernet/qlogic/qede/qede_filter.c 		   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t                1656 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
t                1662 drivers/net/ethernet/qlogic/qede/qede_filter.c 	t->ip_comp = qede_flow_spec_ipv6_cmp;
t                1663 drivers/net/ethernet/qlogic/qede/qede_filter.c 	t->build_hdr = qede_flow_build_ipv6_hdr;
t                1670 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
t                1679 drivers/net/ethernet/qlogic/qede/qede_filter.c 		if (fltr->tuple.ip_proto == t->ip_proto &&
t                1680 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    fltr->tuple.src_port == t->src_port &&
t                1681 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    fltr->tuple.dst_port == t->dst_port &&
t                1682 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    t->ip_comp(&fltr->tuple, t))
t                1782 drivers/net/ethernet/qlogic/qede/qede_filter.c 		      struct qede_arfs_tuple *t)
t                1794 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->src_port = match.key->src;
t                1795 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->dst_port = match.key->dst;
t                1803 drivers/net/ethernet/qlogic/qede/qede_filter.c 			  struct qede_arfs_tuple *t)
t                1823 drivers/net/ethernet/qlogic/qede/qede_filter.c 		memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
t                1824 drivers/net/ethernet/qlogic/qede/qede_filter.c 		memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
t                1827 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (qede_flow_parse_ports(edev, rule, t))
t                1830 drivers/net/ethernet/qlogic/qede/qede_filter.c 	return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
t                1835 drivers/net/ethernet/qlogic/qede/qede_filter.c 			struct qede_arfs_tuple *t)
t                1847 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->src_ipv4 = match.key->src;
t                1848 drivers/net/ethernet/qlogic/qede/qede_filter.c 		t->dst_ipv4 = match.key->dst;
t                1851 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (qede_flow_parse_ports(edev, rule, t))
t                1854 drivers/net/ethernet/qlogic/qede/qede_filter.c 	return qede_set_v4_tuple_to_profile(edev, t);
t                1950 drivers/net/ethernet/qlogic/qede/qede_filter.c 	struct qede_arfs_tuple t;
t                1960 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (qede_parse_flow_attr(edev, proto, f->rule, &t))
t                1964 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
t                1968 drivers/net/ethernet/qlogic/qede/qede_filter.c 			  t.mode, edev->arfs->mode, edev->arfs->filter_count);
t                1976 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (qede_flow_find_fltr(edev, &t)) {
t                1987 drivers/net/ethernet/qlogic/qede/qede_filter.c 	min_hlen = qede_flow_get_min_header_size(&t);
t                1996 drivers/net/ethernet/qlogic/qede/qede_filter.c 	memcpy(&n->tuple, &t, sizeof(n->tuple));
t                2018 drivers/net/ethernet/qlogic/qede/qede_filter.c 				   struct qede_arfs_tuple *t,
t                2034 drivers/net/ethernet/qlogic/qede/qede_filter.c 	    edev->arfs->mode != t->mode) {
t                2037 drivers/net/ethernet/qlogic/qede/qede_filter.c 			t->mode, edev->arfs->filter_count);
t                2048 drivers/net/ethernet/qlogic/qede/qede_filter.c 				  struct qede_arfs_tuple *t,
t                2079 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
t                2085 drivers/net/ethernet/qlogic/qede/qede_filter.c 	err = qede_flow_spec_validate(edev, &flow->rule->action, t,
t                2097 drivers/net/ethernet/qlogic/qede/qede_filter.c 	struct qede_arfs_tuple t;
t                2108 drivers/net/ethernet/qlogic/qede/qede_filter.c 	rc = qede_flow_spec_to_rule(edev, &t, fsp);
t                2112 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (qede_flow_find_fltr(edev, &t)) {
t                2123 drivers/net/ethernet/qlogic/qede/qede_filter.c 	min_hlen = qede_flow_get_min_header_size(&t);
t                2135 drivers/net/ethernet/qlogic/qede/qede_filter.c 	memcpy(&n->tuple, &t, sizeof(n->tuple));
t                3751 drivers/net/ethernet/qlogic/qla3xxx.c static void ql3xxx_timer(struct timer_list *t)
t                3753 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
t                 188 drivers/net/ethernet/realtek/atp.c static void atp_timed_checker(struct timer_list *t);
t                 716 drivers/net/ethernet/realtek/atp.c static void atp_timed_checker(struct timer_list *t)
t                 718 drivers/net/ethernet/realtek/atp.c 	struct net_local *lp = from_timer(lp, t, timer);
t                1856 drivers/net/ethernet/realtek/r8169_main.c #define rxtx_x1822(r, t) {		\
t                1857 drivers/net/ethernet/realtek/r8169_main.c 	{{(r),		(t)}},		\
t                1858 drivers/net/ethernet/realtek/r8169_main.c 	{{(r)*8,	(t)*8}},	\
t                1859 drivers/net/ethernet/realtek/r8169_main.c 	{{(r)*8*2,	(t)*8*2}},	\
t                1860 drivers/net/ethernet/realtek/r8169_main.c 	{{(r)*8*2*2,	(t)*8*2*2}},	\
t                1982 drivers/net/ethernet/rocker/rocker_ofdpa.c static void ofdpa_fdb_cleanup(struct timer_list *t)
t                1984 drivers/net/ethernet/rocker/rocker_ofdpa.c 	struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
t                 105 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static void sxgbe_eee_ctrl_timer(struct timer_list *t)
t                 107 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
t                1005 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static void sxgbe_tx_timer(struct timer_list *t)
t                1007 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
t                 171 drivers/net/ethernet/seeq/ether3.c static void ether3_ledoff(struct timer_list *t)
t                 173 drivers/net/ethernet/seeq/ether3.c 	struct dev_priv *private = from_timer(private, t, timer);
t                 249 drivers/net/ethernet/seeq/sgiseeq.c 	struct sgiseeq_tx_desc *t = gpriv->tx_desc;
t                 268 drivers/net/ethernet/seeq/sgiseeq.c 		       i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
t                 269 drivers/net/ethernet/seeq/sgiseeq.c 		       t[i].tdma.pnext);
t                 272 drivers/net/ethernet/seeq/sgiseeq.c 		       i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
t                 273 drivers/net/ethernet/seeq/sgiseeq.c 		       t[i].tdma.pnext);
t                  42 drivers/net/ethernet/sfc/efx.h void efx_rx_slow_fill(struct timer_list *t);
t                  45 drivers/net/ethernet/sfc/falcon/efx.h void ef4_rx_slow_fill(struct timer_list *t);
t                1454 drivers/net/ethernet/sfc/falcon/falcon.c static void falcon_stats_timer_func(struct timer_list *t)
t                1456 drivers/net/ethernet/sfc/falcon/falcon.c 	struct falcon_nic_data *nic_data = from_timer(nic_data, t,
t                 376 drivers/net/ethernet/sfc/falcon/rx.c void ef4_rx_slow_fill(struct timer_list *t)
t                 378 drivers/net/ethernet/sfc/falcon/rx.c 	struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
t                  48 drivers/net/ethernet/sfc/mcdi.c static void efx_mcdi_timeout_async(struct timer_list *t);
t                 607 drivers/net/ethernet/sfc/mcdi.c static void efx_mcdi_timeout_async(struct timer_list *t)
t                 609 drivers/net/ethernet/sfc/mcdi.c 	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
t                 375 drivers/net/ethernet/sfc/rx.c void efx_rx_slow_fill(struct timer_list *t)
t                 377 drivers/net/ethernet/sfc/rx.c 	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
t                 746 drivers/net/ethernet/sgi/ioc3-eth.c static void ioc3_timer(struct timer_list *t)
t                 748 drivers/net/ethernet/sgi/ioc3-eth.c 	struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
t                1021 drivers/net/ethernet/sis/sis190.c static void sis190_phy_timer(struct timer_list *t)
t                1023 drivers/net/ethernet/sis/sis190.c 	struct sis190_private *tp = from_timer(tp, t, timer);
t                 223 drivers/net/ethernet/sis/sis900.c static void sis900_timer(struct timer_list *t);
t                1307 drivers/net/ethernet/sis/sis900.c static void sis900_timer(struct timer_list *t)
t                1309 drivers/net/ethernet/sis/sis900.c 	struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
t                 293 drivers/net/ethernet/smsc/epic100.c static void epic_timer(struct timer_list *t);
t                 844 drivers/net/ethernet/smsc/epic100.c static void epic_timer(struct timer_list *t)
t                 846 drivers/net/ethernet/smsc/epic100.c 	struct epic_private *ep = from_timer(ep, t, timer);
t                 283 drivers/net/ethernet/smsc/smc91c92_cs.c static void media_check(struct timer_list *t);
t                1711 drivers/net/ethernet/smsc/smc91c92_cs.c static void media_check(struct timer_list *t)
t                1713 drivers/net/ethernet/smsc/smc91c92_cs.c     struct smc_private *smc = from_timer(smc, t, media);
t                 191 drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c static void aneg_link_timer_callback(struct timer_list *t)
t                 193 drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c 	struct tse_pcs *pcs = from_timer(pcs, t, aneg_link_timer);
t                 562 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c 	u32 t, r;
t                 564 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c 	t = readl(ioaddr + EMAC_TX_CTL0);
t                 567 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c 		t |= EMAC_TX_TRANSMITTER_EN;
t                 570 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c 		t &= ~EMAC_TX_TRANSMITTER_EN;
t                 573 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c 	writel(t, ioaddr + EMAC_TX_CTL0);
t                 367 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_eee_ctrl_timer(struct timer_list *t)
t                 369 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
t                2258 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_tx_timer(struct timer_list *t)
t                2260 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
t                4070 drivers/net/ethernet/sun/cassini.c static void cas_link_timer(struct timer_list *t)
t                4072 drivers/net/ethernet/sun/cassini.c 	struct cas *cp = from_timer(cp, t, link_timer);
t                2207 drivers/net/ethernet/sun/niu.c static void niu_timer(struct timer_list *t)
t                2209 drivers/net/ethernet/sun/niu.c 	struct niu *np = from_timer(np, t, timer);
t                 527 drivers/net/ethernet/sun/sunbmac.c static void bigmac_timer(struct timer_list *t)
t                 529 drivers/net/ethernet/sun/sunbmac.c 	struct bigmac *bp = from_timer(bp, t, bigmac_timer);
t                1502 drivers/net/ethernet/sun/sungem.c static void gem_link_timer(struct timer_list *t)
t                1504 drivers/net/ethernet/sun/sungem.c 	struct gem *gp = from_timer(gp, t, link_timer);
t                 689 drivers/net/ethernet/sun/sunhme.c static void happy_meal_timer(struct timer_list *t)
t                 691 drivers/net/ethernet/sun/sunhme.c 	struct happy_meal *hp = from_timer(hp, t, happy_timer);
t                1044 drivers/net/ethernet/sun/sunvnet_common.c void sunvnet_clean_timer_expire_common(struct timer_list *t)
t                1046 drivers/net/ethernet/sun/sunvnet_common.c 	struct vnet_port *port = from_timer(port, t, clean_timer);
t                 133 drivers/net/ethernet/sun/sunvnet_common.h void sunvnet_clean_timer_expire_common(struct timer_list *t);
t                 361 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static void xlgmac_tx_timer(struct timer_list *t)
t                 363 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
t                 755 drivers/net/ethernet/ti/cpsw_ale.c static void cpsw_ale_timer(struct timer_list *t)
t                 757 drivers/net/ethernet/ti/cpsw_ale.c 	struct cpsw_ale *ale = from_timer(ale, t, timer);
t                2847 drivers/net/ethernet/ti/netcp_ethss.c static void netcp_ethss_timer(struct timer_list *t)
t                2849 drivers/net/ethernet/ti/netcp_ethss.c 	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
t                 177 drivers/net/ethernet/ti/tlan.c static void	tlan_timer(struct timer_list *t);
t                 178 drivers/net/ethernet/ti/tlan.c static void	tlan_phy_monitor(struct timer_list *t);
t                1838 drivers/net/ethernet/ti/tlan.c static void tlan_timer(struct timer_list *t)
t                1840 drivers/net/ethernet/ti/tlan.c 	struct tlan_priv	*priv = from_timer(priv, t, timer);
t                2762 drivers/net/ethernet/ti/tlan.c static void tlan_phy_monitor(struct timer_list *t)
t                2764 drivers/net/ethernet/ti/tlan.c 	struct tlan_priv *priv = from_timer(priv, t, media_timer);
t                 903 drivers/net/ethernet/toshiba/spider_net.c spider_net_cleanup_tx_ring(struct timer_list *t)
t                 905 drivers/net/ethernet/toshiba/spider_net.c 	struct spider_net_card *card = from_timer(card, t, tx_timer);
t                1969 drivers/net/ethernet/toshiba/spider_net.c static void spider_net_link_phy(struct timer_list *t)
t                1971 drivers/net/ethernet/toshiba/spider_net.c 	struct spider_net_card *card = from_timer(card, t, aneg_timer);
t                 157 drivers/net/ethernet/tundra/tsi108_eth.c static void tsi108_timed_checker(struct timer_list *t);
t                1662 drivers/net/ethernet/tundra/tsi108_eth.c static void tsi108_timed_checker(struct timer_list *t)
t                1664 drivers/net/ethernet/tundra/tsi108_eth.c 	struct tsi108_prv_data *data = from_timer(data, t, timer);
t                1318 drivers/net/ethernet/via/via-velocity.h #define VELOCITY_PRT_CAMMASK(p,t) {\
t                1320 drivers/net/ethernet/via/via-velocity.h 	if ((t)==VELOCITY_MULTICAST_CAM) {\
t                 252 drivers/net/fddi/defza.c 	long t;
t                 265 drivers/net/fddi/defza.c 	t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
t                 280 drivers/net/fddi/defza.c 		 (45 * HZ - t) * 1000 / HZ);
t                 384 drivers/net/fddi/defza.c 	long t;
t                 397 drivers/net/fddi/defza.c 	t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
t                 411 drivers/net/fddi/defza.c 		 (3 * HZ - t) * 1000 / HZ);
t                1045 drivers/net/fddi/defza.c static void fza_reset_timer(struct timer_list *t)
t                1047 drivers/net/fddi/defza.c 	struct fza_private *fp = from_timer(fp, t, reset_timer);
t                1156 drivers/net/fddi/defza.c 	long t;
t                1200 drivers/net/fddi/defza.c 	t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
t                1214 drivers/net/fddi/defza.c 		 (3 * HZ - t) * 1000 / HZ);
t                1224 drivers/net/fddi/defza.c 	long t;
t                1240 drivers/net/fddi/defza.c 	t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
t                1252 drivers/net/fddi/defza.c 		 (15 * HZ - t) * 1000 / HZ);
t                 382 drivers/net/fddi/skfp/h/cmtdef.h #define MIB2US(t)		((t)/12)
t                  83 drivers/net/fddi/skfp/h/fplustm.h 	struct	s_smt_fp_txd t ;		/* pointer to the TxD */
t                  43 drivers/net/fddi/skfp/h/mbuf.h #define	smtod(x,t)	((t)((x)->sm_data + (x)->sm_off))
t                  44 drivers/net/fddi/skfp/h/mbuf.h #define	smtodoff(x,t,o)	((t)((x)->sm_data + (o)))
t                 897 drivers/net/fddi/skfp/h/skfbi.h #define	COUNT(t)	((t)<<6)	/* counter */
t                 450 drivers/net/fddi/skfp/h/smc.h 	struct s_timer	t ;		/* timer */
t                 110 drivers/net/fddi/skfp/h/smt.h #define SMTSETPARA(p,t)		(p)->para.p_type = (t),\
t                 179 drivers/net/fddi/skfp/h/supern_2.h 	} t ;
t                 209 drivers/net/fddi/skfp/h/supern_2.h 	} t ;
t                 588 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_fp_txd volatile *t ;
t                 592 drivers/net/fddi/skfp/hwmtm.c 	t = queue->tx_curr_get ;
t                 595 drivers/net/fddi/skfp/hwmtm.c 		t = t->txd_next ;
t                 597 drivers/net/fddi/skfp/hwmtm.c 	phys = le32_to_cpu(t->txd_ntdadr) ;
t                 599 drivers/net/fddi/skfp/hwmtm.c 	t = queue->tx_curr_get ;
t                 601 drivers/net/fddi/skfp/hwmtm.c 		DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
t                 602 drivers/net/fddi/skfp/hwmtm.c 		tbctrl = le32_to_cpu(t->txd_tbctrl) ;
t                 612 drivers/net/fddi/skfp/hwmtm.c 				t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
t                 615 drivers/net/fddi/skfp/hwmtm.c 		phys = le32_to_cpu(t->txd_ntdadr) ;
t                 616 drivers/net/fddi/skfp/hwmtm.c 		DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t                 617 drivers/net/fddi/skfp/hwmtm.c 		t = t->txd_next ;
t                1627 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_fp_txd volatile *t ;
t                1639 drivers/net/fddi/skfp/hwmtm.c 	t = queue->tx_curr_put ;
t                1644 drivers/net/fddi/skfp/hwmtm.c 		DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
t                1645 drivers/net/fddi/skfp/hwmtm.c 		t->txd_virt = virt ;
t                1646 drivers/net/fddi/skfp/hwmtm.c 		t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
t                1647 drivers/net/fddi/skfp/hwmtm.c 		t->txd_tbadr = cpu_to_le32(phys) ;
t                1651 drivers/net/fddi/skfp/hwmtm.c 		t->txd_tbctrl = tbctrl ;
t                1654 drivers/net/fddi/skfp/hwmtm.c 		DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t                1657 drivers/net/fddi/skfp/hwmtm.c 		DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t                1667 drivers/net/fddi/skfp/hwmtm.c 		queue->tx_curr_put = t->txd_next ;
t                1684 drivers/net/fddi/skfp/hwmtm.c 				hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
t                1706 drivers/net/fddi/skfp/hwmtm.c 				hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
t                1723 drivers/net/fddi/skfp/hwmtm.c 	NDD_TRACE("THfE",t,queue->tx_free,0) ;
t                1813 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_fp_txd volatile *t ;
t                1877 drivers/net/fddi/skfp/hwmtm.c 		t = queue->tx_curr_put ;
t                1880 drivers/net/fddi/skfp/hwmtm.c 			DB_TX(5, "init TxD = 0x%p", t);
t                1883 drivers/net/fddi/skfp/hwmtm.c 				t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
t                1886 drivers/net/fddi/skfp/hwmtm.c 			t->txd_virt = virt[i] ;
t                1889 drivers/net/fddi/skfp/hwmtm.c 			t->txd_tbadr = cpu_to_le32(phys) ;
t                1893 drivers/net/fddi/skfp/hwmtm.c 			t->txd_tbctrl = tbctrl ;
t                1895 drivers/net/fddi/skfp/hwmtm.c 			DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t                1898 drivers/net/fddi/skfp/hwmtm.c 			DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t                1902 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_curr_put = t = t->txd_next ;
t                1920 drivers/net/fddi/skfp/hwmtm.c 	NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
t                2029 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_fp_txd volatile *t ;
t                2047 drivers/net/fddi/skfp/hwmtm.c 		t = queue->tx_curr_get ;
t                2050 drivers/net/fddi/skfp/hwmtm.c 			DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
t                2051 drivers/net/fddi/skfp/hwmtm.c 			DB_TX(5, "switch OWN bit of TxD 0x%p", t);
t                2052 drivers/net/fddi/skfp/hwmtm.c 			t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
t                2053 drivers/net/fddi/skfp/hwmtm.c 			DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t                2054 drivers/net/fddi/skfp/hwmtm.c 			t = t->txd_next ;
t                2066 drivers/net/fddi/skfp/hwmtm.c 		t = queue->tx_curr_get ;
t                2074 drivers/net/fddi/skfp/hwmtm.c 			outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
t                2077 drivers/net/fddi/skfp/hwmtm.c 			outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
t                  29 drivers/net/fddi/skfp/smttimer.c 	smc->t.st_queue = NULL;
t                  30 drivers/net/fddi/skfp/smttimer.c 	smc->t.st_fast.tm_active = FALSE ;
t                  31 drivers/net/fddi/skfp/smttimer.c 	smc->t.st_fast.tm_next = NULL;
t                  44 drivers/net/fddi/skfp/smttimer.c 	if (smc->t.st_queue == timer && !timer->tm_next) {
t                  47 drivers/net/fddi/skfp/smttimer.c 	for (prev = &smc->t.st_queue ; (tm = *prev) ; prev = &tm->tm_next ) {
t                  72 drivers/net/fddi/skfp/smttimer.c 	if (!smc->t.st_queue) {
t                  73 drivers/net/fddi/skfp/smttimer.c 		smc->t.st_queue = timer ;
t                  88 drivers/net/fddi/skfp/smttimer.c 	for (prev = &smc->t.st_queue ; (tm = *prev) ; prev = &tm->tm_next ) {
t                 103 drivers/net/fddi/skfp/smttimer.c 	hwt_start(smc,smc->t.st_queue->tm_delta) ;
t                 108 drivers/net/fddi/skfp/smttimer.c 	smt_timer_start(smc,&smc->t.st_fast,32L, EV_TOKEN(EVENT_SMT,SM_FAST)); 
t                 125 drivers/net/fddi/skfp/smttimer.c 	last = &smc->t.st_queue ;
t                 126 drivers/net/fddi/skfp/smttimer.c 	tm = smc->t.st_queue ;
t                 141 drivers/net/fddi/skfp/smttimer.c 	next = smc->t.st_queue ;
t                 142 drivers/net/fddi/skfp/smttimer.c 	smc->t.st_queue = tm ;
t                 149 drivers/net/fddi/skfp/smttimer.c 	if (restart && smc->t.st_queue)
t                 150 drivers/net/fddi/skfp/smttimer.c 		hwt_start(smc,smc->t.st_queue->tm_delta) ;
t                1224 drivers/net/geneve.c 	struct geneve_dev *geneve, *t = NULL;
t                1236 drivers/net/geneve.c 			t = geneve;
t                1238 drivers/net/geneve.c 	return t;
t                1264 drivers/net/geneve.c 	struct geneve_dev *t, *geneve = netdev_priv(dev);
t                1277 drivers/net/geneve.c 	t = geneve_find_dev(gn, info, &tun_on_same_port, &tun_collect_md);
t                1278 drivers/net/geneve.c 	if (t)
t                 139 drivers/net/hamradio/6pack.c static void sp_xmit_on_air(struct timer_list *t)
t                 141 drivers/net/hamradio/6pack.c 	struct sixpack *sp = from_timer(sp, t, tx_t);
t                 503 drivers/net/hamradio/6pack.c static void resync_tnc(struct timer_list *t)
t                 505 drivers/net/hamradio/6pack.c 	struct sixpack *sp = from_timer(sp, t, resync_t);
t                 235 drivers/net/hamradio/dmascc.c static void start_timer(struct scc_priv *priv, int t, int r15);
t                1065 drivers/net/hamradio/dmascc.c static void start_timer(struct scc_priv *priv, int t, int r15)
t                1068 drivers/net/hamradio/dmascc.c 	if (t == 0) {
t                1070 drivers/net/hamradio/dmascc.c 	} else if (t > 0) {
t                1071 drivers/net/hamradio/dmascc.c 		outb(t & 0xFF, priv->tmr_cnt);
t                1072 drivers/net/hamradio/dmascc.c 		outb((t >> 8) & 0xFF, priv->tmr_cnt);
t                 188 drivers/net/hamradio/scc.c static void t_dwait(struct timer_list *t);
t                 189 drivers/net/hamradio/scc.c static void t_txdelay(struct timer_list *t);
t                 190 drivers/net/hamradio/scc.c static void t_tail(struct timer_list *t);
t                 193 drivers/net/hamradio/scc.c static void t_idle(struct timer_list *t);
t                 997 drivers/net/hamradio/scc.c 				 void (*handler)(struct timer_list *t),
t                1015 drivers/net/hamradio/scc.c 			       void (*handler)(struct timer_list *t),
t                1126 drivers/net/hamradio/scc.c static void t_dwait(struct timer_list *t)
t                1128 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_t);
t                1168 drivers/net/hamradio/scc.c static void t_txdelay(struct timer_list *t)
t                1170 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_t);
t                1189 drivers/net/hamradio/scc.c static void t_tail(struct timer_list *t)
t                1191 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_t);
t                1216 drivers/net/hamradio/scc.c static void t_busy(struct timer_list *t)
t                1218 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_wdog);
t                1235 drivers/net/hamradio/scc.c static void t_maxkeyup(struct timer_list *t)
t                1237 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_wdog);
t                1269 drivers/net/hamradio/scc.c static void t_idle(struct timer_list *t)
t                1271 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_t);
t                1402 drivers/net/hamradio/scc.c static void scc_stop_calibrate(struct timer_list *t)
t                1404 drivers/net/hamradio/scc.c 	struct scc_channel *scc = from_timer(scc, t, tx_wdog);
t                1145 drivers/net/hippi/rrunner.c static void rr_timer(struct timer_list *t)
t                1147 drivers/net/hippi/rrunner.c 	struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
t                1246 drivers/net/hyperv/netvsc_drv.c 			       struct rtnl_link_stats64 *t)
t                1259 drivers/net/hyperv/netvsc_drv.c 	netdev_stats_to_stats64(t, &net->stats);
t                1262 drivers/net/hyperv/netvsc_drv.c 	t->rx_packets += vf_tot.rx_packets;
t                1263 drivers/net/hyperv/netvsc_drv.c 	t->tx_packets += vf_tot.tx_packets;
t                1264 drivers/net/hyperv/netvsc_drv.c 	t->rx_bytes   += vf_tot.rx_bytes;
t                1265 drivers/net/hyperv/netvsc_drv.c 	t->tx_bytes   += vf_tot.tx_bytes;
t                1266 drivers/net/hyperv/netvsc_drv.c 	t->tx_dropped += vf_tot.tx_dropped;
t                1281 drivers/net/hyperv/netvsc_drv.c 		t->tx_bytes	+= bytes;
t                1282 drivers/net/hyperv/netvsc_drv.c 		t->tx_packets	+= packets;
t                1292 drivers/net/hyperv/netvsc_drv.c 		t->rx_bytes	+= bytes;
t                1293 drivers/net/hyperv/netvsc_drv.c 		t->rx_packets	+= packets;
t                1294 drivers/net/hyperv/netvsc_drv.c 		t->multicast	+= multicast;
t                 230 drivers/net/ntb_netdev.c static void ntb_netdev_tx_timer(struct timer_list *t)
t                 232 drivers/net/ntb_netdev.c 	struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
t                 554 drivers/net/phy/phylink.c static void phylink_fixed_poll(struct timer_list *t)
t                 556 drivers/net/phy/phylink.c 	struct phylink *pl = container_of(t, struct phylink, link_poll);
t                 558 drivers/net/phy/phylink.c 	mod_timer(t, jiffies + HZ);
t                 208 drivers/net/phy/spi_ks8995.c 	struct spi_transfer t[2];
t                 215 drivers/net/phy/spi_ks8995.c 	memset(&t, 0, sizeof(t));
t                 217 drivers/net/phy/spi_ks8995.c 	t[0].tx_buf = &cmd;
t                 218 drivers/net/phy/spi_ks8995.c 	t[0].len = sizeof(cmd);
t                 219 drivers/net/phy/spi_ks8995.c 	spi_message_add_tail(&t[0], &m);
t                 221 drivers/net/phy/spi_ks8995.c 	t[1].rx_buf = buf;
t                 222 drivers/net/phy/spi_ks8995.c 	t[1].len = count;
t                 223 drivers/net/phy/spi_ks8995.c 	spi_message_add_tail(&t[1], &m);
t                 236 drivers/net/phy/spi_ks8995.c 	struct spi_transfer t[2];
t                 243 drivers/net/phy/spi_ks8995.c 	memset(&t, 0, sizeof(t));
t                 245 drivers/net/phy/spi_ks8995.c 	t[0].tx_buf = &cmd;
t                 246 drivers/net/phy/spi_ks8995.c 	t[0].len = sizeof(cmd);
t                 247 drivers/net/phy/spi_ks8995.c 	spi_message_add_tail(&t[0], &m);
t                 249 drivers/net/phy/spi_ks8995.c 	t[1].tx_buf = buf;
t                 250 drivers/net/phy/spi_ks8995.c 	t[1].len = count;
t                 251 drivers/net/phy/spi_ks8995.c 	spi_message_add_tail(&t[1], &m);
t                 109 drivers/net/slip/slip.c static void sl_keepalive(struct timer_list *t);
t                 110 drivers/net/slip/slip.c static void sl_outfill(struct timer_list *t);
t                1380 drivers/net/slip/slip.c static void sl_outfill(struct timer_list *t)
t                1382 drivers/net/slip/slip.c 	struct slip *sl = from_timer(sl, t, outfill_timer);
t                1411 drivers/net/slip/slip.c static void sl_keepalive(struct timer_list *t)
t                1413 drivers/net/slip/slip.c 	struct slip *sl = from_timer(sl, t, keepalive_timer);
t                 480 drivers/net/tun.c static void tun_flow_cleanup(struct timer_list *t)
t                 482 drivers/net/tun.c 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
t                 603 drivers/net/usb/catc.c static void catc_stats_timer(struct timer_list *t)
t                 605 drivers/net/usb/catc.c 	struct catc *catc = from_timer(catc, t, timer);
t                3701 drivers/net/usb/lan78xx.c static void lan78xx_stat_monitor(struct timer_list *t)
t                3703 drivers/net/usb/lan78xx.c 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
t                 227 drivers/net/usb/pegasus.c 		__le16 *t = (__le16 *) & data[1];
t                 228 drivers/net/usb/pegasus.c 		*t = cpu_to_le16(*regd);
t                 579 drivers/net/usb/sierra_net.c static void sierra_sync_timer(struct timer_list *t)
t                 581 drivers/net/usb/sierra_net.c 	struct sierra_net_data *priv = from_timer(priv, t, sync_timer);
t                1517 drivers/net/usb/usbnet.c static void usbnet_bh (struct timer_list *t)
t                1519 drivers/net/usb/usbnet.c 	struct usbnet		*dev = from_timer(dev, t, delay);
t                2719 drivers/net/vxlan.c static void vxlan_cleanup(struct timer_list *t)
t                2721 drivers/net/vxlan.c 	struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
t                 257 drivers/net/wan/hdlc_cisco.c static void cisco_timer(struct timer_list *t)
t                 259 drivers/net/wan/hdlc_cisco.c 	struct cisco_state *st = from_timer(st, t, timer);
t                 598 drivers/net/wan/hdlc_fr.c static void fr_timer(struct timer_list *t)
t                 600 drivers/net/wan/hdlc_fr.c 	struct frad_state *st = from_timer(st, t, timer);
t                 558 drivers/net/wan/hdlc_ppp.c static void ppp_timer(struct timer_list *t)
t                 560 drivers/net/wan/hdlc_ppp.c 	struct proto *proto = from_timer(proto, t, timer);
t                  99 drivers/net/wan/lmc/lmc_main.c static void lmc_watchdog(struct timer_list *t);
t                 629 drivers/net/wan/lmc/lmc_main.c static void lmc_watchdog(struct timer_list *t) /*fold00*/
t                 631 drivers/net/wan/lmc/lmc_main.c     lmc_softc_t *sc = from_timer(sc, t, timer);
t                1033 drivers/net/wan/sbni.c sbni_watchdog(struct timer_list *t)
t                1035 drivers/net/wan/sbni.c 	struct net_local   *nl  = from_timer(nl, t, watchdog);
t                1063 drivers/net/wan/sbni.c 	mod_timer(t, jiffies + SBNI_TIMEOUT);
t                1430 drivers/net/wan/sbni.c 		struct net_local  *t = netdev_priv(p);
t                1431 drivers/net/wan/sbni.c 		if( t->link == dev ) {
t                1432 drivers/net/wan/sbni.c 			t->link = snl->link;
t                1435 drivers/net/wan/sbni.c 		p = t->link;
t                 927 drivers/net/wan/sdla.c static void sdla_poll(struct timer_list *t)
t                 929 drivers/net/wan/sdla.c 	struct frad_local *flp = from_timer(flp, t, timer);
t                 893 drivers/net/wireless/ath/ar5523/ar5523.c static void ar5523_tx_wd_timer(struct timer_list *t)
t                 895 drivers/net/wireless/ath/ar5523/ar5523.c 	struct ar5523 *ar = from_timer(ar, t, tx_wd_timer);
t                 245 drivers/net/wireless/ath/ath10k/htt_rx.c static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
t                 247 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
t                 591 drivers/net/wireless/ath/ath10k/pci.c static void ath10k_pci_ps_timer(struct timer_list *t)
t                 593 drivers/net/wireless/ath/ath10k/pci.c 	struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
t                 844 drivers/net/wireless/ath/ath10k/pci.c void ath10k_pci_rx_replenish_retry(struct timer_list *t)
t                 846 drivers/net/wireless/ath/ath10k/pci.c 	struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
t                 234 drivers/net/wireless/ath/ath10k/pci.h void ath10k_pci_rx_replenish_retry(struct timer_list *t);
t                 638 drivers/net/wireless/ath/ath10k/snoc.c static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
t                 640 drivers/net/wireless/ath/ath10k/snoc.c 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
t                3482 drivers/net/wireless/ath/ath10k/wmi.c 	__le32 t;
t                3503 drivers/net/wireless/ath/ath10k/wmi.c 			t = tim_info->tim_bitmap[i / 4];
t                3504 drivers/net/wireless/ath/ath10k/wmi.c 			v = __le32_to_cpu(t);
t                 896 drivers/net/wireless/ath/ath6kl/core.h void disconnect_timer_handler(struct timer_list *t);
t                 501 drivers/net/wireless/ath/ath6kl/main.c void disconnect_timer_handler(struct timer_list *t)
t                 503 drivers/net/wireless/ath/ath6kl/main.c 	struct ath6kl_vif *vif = from_timer(vif, t, disconnect_timer);
t                  63 drivers/net/wireless/ath/ath6kl/recovery.c static void ath6kl_recovery_hb_timer(struct timer_list *t)
t                  65 drivers/net/wireless/ath/ath6kl/recovery.c 	struct ath6kl *ar = from_timer(ar, t, fw_recovery.hb_timer);
t                1623 drivers/net/wireless/ath/ath6kl/txrx.c static void aggr_timeout(struct timer_list *t)
t                1626 drivers/net/wireless/ath/ath6kl/txrx.c 	struct aggr_info_conn *aggr_conn = from_timer(aggr_conn, t, timer);
t                1079 drivers/net/wireless/ath/ath6kl/wmi.c void ath6kl_wmi_sscan_timer(struct timer_list *t)
t                1081 drivers/net/wireless/ath/ath6kl/wmi.c 	struct ath6kl_vif *vif = from_timer(vif, t, sched_scan_timer);
t                2722 drivers/net/wireless/ath/ath6kl/wmi.h void ath6kl_wmi_sscan_timer(struct timer_list *t);
t                3840 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int f[3], t[3];
t                3848 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[0] = eep->base_ext2.xatten1DBLow[chain];
t                3850 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[1] = eep->modalHeader5G.xatten1DB[chain];
t                3852 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[2] = eep->base_ext2.xatten1DBHigh[chain];
t                3855 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 							    f, t, 3);
t                3868 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int f[3], t[3];
t                3876 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[0] = eep->base_ext2.xatten1MarginLow[chain];
t                3878 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[1] = eep->modalHeader5G.xatten1Margin[chain];
t                3880 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[2] = eep->base_ext2.xatten1MarginHigh[chain];
t                3883 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 							    f, t, 3);
t                4069 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	s32 t[3], f[3] = {5180, 5500, 5785};
t                4078 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[0] = eep->base_ext1.quick_drop_low;
t                4079 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[1] = eep->modalHeader5G.quick_drop;
t                4080 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[2] = eep->base_ext1.quick_drop_high;
t                4081 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
t                4812 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int f[8], t[8], t1[3], t2[3], i;
t                4847 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[0] = eep->base_ext1.tempslopextension[2];
t                4852 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[1] = eep->modalHeader5G.tempSlope;
t                4857 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[2] = eep->base_ext1.tempslopextension[5];
t                4863 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 								 f, t, 3);
t                4874 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 				t[i] = eep->base_ext1.tempslopextension[i];
t                4878 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 								 f, t, 8);
t                4880 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[0] = eep->base_ext2.tempSlopeLow;
t                4882 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[1] = eep->modalHeader5G.tempSlope;
t                4884 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			t[2] = eep->base_ext2.tempSlopeHigh;
t                4887 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 								 f, t, 3);
t                 746 drivers/net/wireless/ath/ath9k/ath9k.h void ath_ani_calibrate(struct timer_list *t);
t                 753 drivers/net/wireless/ath/ath9k/ath9k.h void ath_ps_full_sleep(struct timer_list *t);
t                1046 drivers/net/wireless/ath/ath9k/channel.c static void ath_chanctx_timer(struct timer_list *t)
t                1048 drivers/net/wireless/ath/ath9k/channel.c 	struct ath_softc *sc = from_timer(sc, t, sched.timer);
t                1057 drivers/net/wireless/ath/ath9k/channel.c static void ath_offchannel_timer(struct timer_list *t)
t                1059 drivers/net/wireless/ath/ath9k/channel.c 	struct ath_softc *sc = from_timer(sc, t, offchannel.timer);
t                 194 drivers/net/wireless/ath/ath9k/gpio.c static void ath_btcoex_period_timer(struct timer_list *t)
t                 196 drivers/net/wireless/ath/ath9k/gpio.c 	struct ath_softc *sc = from_timer(sc, t, btcoex.period_timer);
t                 255 drivers/net/wireless/ath/ath9k/gpio.c static void ath_btcoex_no_stomp_timer(struct timer_list *t)
t                 257 drivers/net/wireless/ath/ath9k/gpio.c 	struct ath_softc *sc = from_timer(sc, t, btcoex.no_stomp_timer);
t                 587 drivers/net/wireless/ath/ath9k/htc.h void ath9k_htc_tx_cleanup_timer(struct timer_list *t);
t                 755 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c void ath9k_htc_tx_cleanup_timer(struct timer_list *t)
t                 757 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct ath9k_htc_priv *priv = from_timer(priv, t, tx.cleanup_timer);
t                 304 drivers/net/wireless/ath/ath9k/link.c void ath_ani_calibrate(struct timer_list *t)
t                 306 drivers/net/wireless/ath/ath9k/link.c 	struct ath_common *common = from_timer(common, t, ani.timer);
t                  96 drivers/net/wireless/ath/ath9k/main.c void ath_ps_full_sleep(struct timer_list *t)
t                  98 drivers/net/wireless/ath/ath9k/main.c 	struct ath_softc *sc = from_timer(sc, t, sleep_timer);
t                  32 drivers/net/wireless/ath/ath9k/xmit.c #define TIME_SYMBOLS(t)         ((t) >> 2)
t                  33 drivers/net/wireless/ath/ath9k/xmit.c #define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
t                 663 drivers/net/wireless/ath/carl9170/tx.c 	unsigned int r, t, q;
t                 683 drivers/net/wireless/ath/carl9170/tx.c 	t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
t                 685 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
t                 401 drivers/net/wireless/ath/carl9170/wlan.h static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
t                 403 drivers/net/wireless/ath/carl9170/wlan.h 	return (t->SAidx & 0xc0) >> 4 |
t                 404 drivers/net/wireless/ath/carl9170/wlan.h 	       (t->DAidx & 0xc0) >> 6;
t                2354 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct wil_probe_client_req *req, *t;
t                2361 drivers/net/wireless/ath/wil6210/cfg80211.c 	list_for_each_entry_safe(req, t, &vif->probe_client_pending, list) {
t                1347 drivers/net/wireless/ath/wil6210/debugfs.c static void print_temp(struct seq_file *s, const char *prefix, s32 t)
t                1349 drivers/net/wireless/ath/wil6210/debugfs.c 	switch (t) {
t                1355 drivers/net/wireless/ath/wil6210/debugfs.c 		seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
t                1356 drivers/net/wireless/ath/wil6210/debugfs.c 			   abs(t / 1000), abs(t % 1000));
t                 239 drivers/net/wireless/ath/wil6210/netdev.c static void wil_connect_timer_fn(struct timer_list *t)
t                 241 drivers/net/wireless/ath/wil6210/netdev.c 	struct wil6210_vif *vif = from_timer(vif, t, connect_timer);
t                 255 drivers/net/wireless/ath/wil6210/netdev.c static void wil_scan_timer_fn(struct timer_list *t)
t                 257 drivers/net/wireless/ath/wil6210/netdev.c 	struct wil6210_vif *vif = from_timer(vif, t, scan_timer);
t                 265 drivers/net/wireless/ath/wil6210/netdev.c static void wil_p2p_discovery_timer_fn(struct timer_list *t)
t                 267 drivers/net/wireless/ath/wil6210/netdev.c 	struct wil6210_vif *vif = from_timer(vif, t, p2p.discovery_timer);
t                3055 drivers/net/wireless/ath/wil6210/wmi.c 	struct pending_wmi_event *evt, *t;
t                3061 drivers/net/wireless/ath/wil6210/wmi.c 	list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
t                 589 drivers/net/wireless/atmel/atmel.c static void atmel_management_timer(struct timer_list *t);
t                3421 drivers/net/wireless/atmel/atmel.c static void atmel_management_timer(struct timer_list *t)
t                3423 drivers/net/wireless/atmel/atmel.c 	struct atmel_private *priv = from_timer(priv, t, management_timer);
t                 272 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c static void brcmf_btcoex_timerfunc(struct timer_list *t)
t                 274 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c 	struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
t                3052 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static void brcmf_escan_timeout(struct timer_list *t)
t                3055 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			from_timer(cfg, t, escan_timeout);
t                4065 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c brcmf_sdio_watchdog(struct timer_list *t)
t                4067 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	struct brcmf_sdio *bus = from_timer(bus, t, timer);
t                  31 drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h 	TP_PROTO(struct brcms_timer *t),
t                  36 drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h 	TP_ARGS(t),
t                  48 drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h 		__entry->ms = t->ms;
t                  49 drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h 		__entry->set = t->set;
t                  50 drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h 		__entry->periodic = t->periodic;
t                 338 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static uint ntxdactive(struct dma_info *di, uint h, uint t)
t                 340 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	return txd(di, t-h);
t                 343 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static uint nrxdactive(struct dma_info *di, uint h, uint t)
t                 345 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	return rxd(di, t-h);
t                 289 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	struct brcms_timer *t, *next;
t                 319 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	for (t = wl->timers; t; t = next) {
t                 320 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		next = t->next;
t                 322 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		kfree(t->name);
t                 324 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		kfree(t);
t                1458 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	struct brcms_timer *t = container_of(work, struct brcms_timer,
t                1461 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	spin_lock_bh(&t->wl->lock);
t                1463 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (t->set) {
t                1464 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		if (t->periodic) {
t                1465 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			atomic_inc(&t->wl->callbacks);
t                1466 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			ieee80211_queue_delayed_work(t->wl->pub->ieee_hw,
t                1467 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 						     &t->dly_wrk,
t                1468 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 						     msecs_to_jiffies(t->ms));
t                1470 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			t->set = false;
t                1473 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		t->fn(t->arg);
t                1476 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	atomic_dec(&t->wl->callbacks);
t                1478 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	spin_unlock_bh(&t->wl->lock);
t                1491 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	struct brcms_timer *t;
t                1493 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC);
t                1494 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (!t)
t                1497 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	INIT_DELAYED_WORK(&t->dly_wrk, _brcms_timer);
t                1498 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->wl = wl;
t                1499 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->fn = fn;
t                1500 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->arg = arg;
t                1501 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->next = wl->timers;
t                1502 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	wl->timers = t;
t                1505 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->name = kstrdup(name, GFP_ATOMIC);
t                1508 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	return t;
t                1517 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
t                1519 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	struct ieee80211_hw *hw = t->wl->pub->ieee_hw;
t                1522 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (t->set)
t                1523 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		brcms_dbg_info(t->wl->wlc->hw->d11core,
t                1525 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			       __func__, t->name, periodic);
t                1527 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->ms = ms;
t                1528 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	t->periodic = (bool) periodic;
t                1529 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (!t->set) {
t                1530 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		t->set = true;
t                1531 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		atomic_inc(&t->wl->callbacks);
t                1534 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
t                1542 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c bool brcms_del_timer(struct brcms_timer *t)
t                1544 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (t->set) {
t                1545 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		t->set = false;
t                1546 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		if (!cancel_delayed_work(&t->dly_wrk))
t                1549 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		atomic_dec(&t->wl->callbacks);
t                1558 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c void brcms_free_timer(struct brcms_timer *t)
t                1560 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	struct brcms_info *wl = t->wl;
t                1564 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	brcms_del_timer(t);
t                1566 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (wl->timers == t) {
t                1569 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		kfree(t->name);
t                1571 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		kfree(t);
t                1578 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		if (tmp->next == t) {
t                1579 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			tmp->next = t->next;
t                1581 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			kfree(t->name);
t                1583 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			kfree(t);
t                 110 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h void brcms_timer(struct brcms_timer *t);
t                3396 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	u16 num_samps, t, k;
t                3430 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	for (t = 0; t < num_samps; t++) {
t                3438 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		data_buf[t] = (i_samp << 10) | q_samp;
t                23021 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 	u16 t;
t                23031 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 	for (t = 0; t < num_samps; t++)
t                23032 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 		data_buf[t] = ((((unsigned int)tone_buf[t].i) & 0x3ff) << 10) |
t                23033 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 			      (((unsigned int)tone_buf[t].q) & 0x3ff);
t                23048 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 	u16 num_samps, t, spur;
t                23075 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 	for (t = 0; t < num_samps; t++) {
t                23077 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 		tone_buf[t] = cordic_calc_iq(theta);
t                23081 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 		tone_buf[t].q = (s32)CORDIC_FLOAT(tone_buf[t].q * max_val);
t                23082 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c 		tone_buf[t].i = (s32)CORDIC_FLOAT(tone_buf[t].i * max_val);
t                  68 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c void wlapi_free_timer(struct wlapi_timer *t)
t                  70 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c 	brcms_free_timer((struct brcms_timer *)t);
t                  74 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic)
t                  76 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c 	brcms_add_timer((struct brcms_timer *)t, ms, periodic);
t                  79 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c bool wlapi_del_timer(struct wlapi_timer *t)
t                  81 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c 	return brcms_del_timer((struct brcms_timer *)t);
t                 136 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h void wlapi_free_timer(struct wlapi_timer *t);
t                 137 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
t                 138 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h bool wlapi_del_timer(struct wlapi_timer *t);
t                 169 drivers/net/wireless/intel/iwlegacy/3945-rs.c il3945_bg_rate_scale_flush(struct timer_list *t)
t                 171 drivers/net/wireless/intel/iwlegacy/3945-rs.c 	struct il3945_rs_sta *rs_sta = from_timer(rs_sta, t, rate_scale_flush);
t                4058 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_bg_stats_periodic(struct timer_list *t)
t                4060 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_priv *il = from_timer(il, t, stats_periodic);
t                 252 drivers/net/wireless/intel/iwlegacy/4965.h #define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
t                 253 drivers/net/wireless/intel/iwlegacy/4965.h 	((t) < IL_TX_POWER_TEMPERATURE_MIN || \
t                 254 drivers/net/wireless/intel/iwlegacy/4965.h 	 (t) > IL_TX_POWER_TEMPERATURE_MAX)
t                  30 drivers/net/wireless/intel/iwlegacy/common.c 	int t = 0;
t                  34 drivers/net/wireless/intel/iwlegacy/common.c 			return t;
t                  36 drivers/net/wireless/intel/iwlegacy/common.c 		t += interval;
t                  37 drivers/net/wireless/intel/iwlegacy/common.c 	} while (t < timeout);
t                 111 drivers/net/wireless/intel/iwlegacy/common.c 	int t = 0;
t                 115 drivers/net/wireless/intel/iwlegacy/common.c 			return t;
t                 117 drivers/net/wireless/intel/iwlegacy/common.c 		t += interval;
t                 118 drivers/net/wireless/intel/iwlegacy/common.c 	} while (t < timeout);
t                4832 drivers/net/wireless/intel/iwlegacy/common.c il_bg_watchdog(struct timer_list *t)
t                4834 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_priv *il = from_timer(il, t, watchdog);
t                1820 drivers/net/wireless/intel/iwlegacy/common.h void il_bg_watchdog(struct timer_list *t);
t                 388 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_statistics_periodic(struct timer_list *t)
t                 390 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
t                 545 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_bg_ucode_trace(struct timer_list *t)
t                 547 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
t                 153 drivers/net/wireless/intel/iwlwifi/dvm/tt.c static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
t                 155 drivers/net/wireless/intel/iwlwifi/dvm/tt.c 	struct iwl_priv *priv = from_timer(priv, t,
t                 204 drivers/net/wireless/intel/iwlwifi/dvm/tt.c static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
t                 206 drivers/net/wireless/intel/iwlwifi/dvm/tt.c 	struct iwl_priv *priv = from_timer(priv, t,
t                 139 drivers/net/wireless/intel/iwlwifi/iwl-io.c 	int t = 0;
t                 143 drivers/net/wireless/intel/iwlwifi/iwl-io.c 			return t;
t                 145 drivers/net/wireless/intel/iwlwifi/iwl-io.c 		t += IWL_POLL_INTERVAL;
t                 146 drivers/net/wireless/intel/iwlwifi/iwl-io.c 	} while (t < timeout);
t                 190 drivers/net/wireless/intel/iwlwifi/iwl-io.c 	int t = 0;
t                 194 drivers/net/wireless/intel/iwlwifi/iwl-io.c 			return t;
t                 196 drivers/net/wireless/intel/iwlwifi/iwl-io.c 		t += IWL_POLL_INTERVAL;
t                 197 drivers/net/wireless/intel/iwlwifi/iwl-io.c 	} while (t < timeout);
t                 253 drivers/net/wireless/intel/iwlwifi/iwl-io.c 	int t = 0;
t                 257 drivers/net/wireless/intel/iwlwifi/iwl-io.c 			return t;
t                 259 drivers/net/wireless/intel/iwlwifi/iwl-io.c 		t += IWL_POLL_INTERVAL;
t                 260 drivers/net/wireless/intel/iwlwifi/iwl-io.c 	} while (t < timeout);
t                  81 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_mvm_loc_entry *e, *t;
t                  87 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
t                2044 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h void iwl_mvm_reorder_timer_expired(struct timer_list *t);
t                 606 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c void iwl_mvm_reorder_timer_expired(struct timer_list *t)
t                 608 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
t                 252 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
t                 255 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		from_timer(data, t, session_timer);
t                 633 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	int t = 0;
t                 658 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			t += 200;
t                 659 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		} while (t < 150000);
t                 183 drivers/net/wireless/intel/iwlwifi/pcie/tx.c static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
t                 185 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
t                 175 drivers/net/wireless/intersil/hostap/hostap_ap.c static void ap_handle_timer(struct timer_list *t)
t                 177 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct sta_info *sta = from_timer(sta, t, timer);
t                2786 drivers/net/wireless/intersil/hostap/hostap_hw.c static void hostap_passive_scan(struct timer_list *t)
t                2788 drivers/net/wireless/intersil/hostap/hostap_hw.c 	local_info_t *local = from_timer(local, t, passive_scan_timer);
t                2861 drivers/net/wireless/intersil/hostap/hostap_hw.c static void hostap_tick_timer(struct timer_list *t)
t                2864 drivers/net/wireless/intersil/hostap/hostap_hw.c 	local_info_t *local = from_timer(local, t, tick_timer);
t                 318 drivers/net/wireless/intersil/orinoco/orinoco_usb.c static void ezusb_request_timerfn(struct timer_list *t)
t                 320 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	struct request_context *ctx = from_timer(ctx, t, timer);
t                  48 drivers/net/wireless/intersil/p54/p54spi.c 	struct spi_transfer t[2];
t                  56 drivers/net/wireless/intersil/p54/p54spi.c 	memset(t, 0, sizeof(t));
t                  58 drivers/net/wireless/intersil/p54/p54spi.c 	t[0].tx_buf = &addr;
t                  59 drivers/net/wireless/intersil/p54/p54spi.c 	t[0].len = sizeof(addr);
t                  60 drivers/net/wireless/intersil/p54/p54spi.c 	spi_message_add_tail(&t[0], &m);
t                  62 drivers/net/wireless/intersil/p54/p54spi.c 	t[1].rx_buf = buf;
t                  63 drivers/net/wireless/intersil/p54/p54spi.c 	t[1].len = len;
t                  64 drivers/net/wireless/intersil/p54/p54spi.c 	spi_message_add_tail(&t[1], &m);
t                  73 drivers/net/wireless/intersil/p54/p54spi.c 	struct spi_transfer t[3];
t                  81 drivers/net/wireless/intersil/p54/p54spi.c 	memset(t, 0, sizeof(t));
t                  83 drivers/net/wireless/intersil/p54/p54spi.c 	t[0].tx_buf = &addr;
t                  84 drivers/net/wireless/intersil/p54/p54spi.c 	t[0].len = sizeof(addr);
t                  85 drivers/net/wireless/intersil/p54/p54spi.c 	spi_message_add_tail(&t[0], &m);
t                  87 drivers/net/wireless/intersil/p54/p54spi.c 	t[1].tx_buf = buf;
t                  88 drivers/net/wireless/intersil/p54/p54spi.c 	t[1].len = len & ~1;
t                  89 drivers/net/wireless/intersil/p54/p54spi.c 	spi_message_add_tail(&t[1], &m);
t                  95 drivers/net/wireless/intersil/p54/p54spi.c 		t[2].tx_buf = &last_word;
t                  96 drivers/net/wireless/intersil/p54/p54spi.c 		t[2].len = sizeof(last_word);
t                  97 drivers/net/wireless/intersil/p54/p54spi.c 		spi_message_add_tail(&t[2], &m);
t                  36 drivers/net/wireless/intersil/prism54/oid_mgt.c #define OID_STRUCT(name,oid,s,t) [name] = {oid, 0, sizeof(s), t}
t                  37 drivers/net/wireless/intersil/prism54/oid_mgt.c #define OID_STRUCT_C(name,oid,s,t) OID_STRUCT(name,oid,s,t | OID_FLAG_CACHED)
t                 586 drivers/net/wireless/intersil/prism54/oid_mgt.c 		struct oid_t *t = &(isl_oid[l[i]]);
t                 589 drivers/net/wireless/intersil/prism54/oid_mgt.c 		u32 oid = t->oid;
t                 591 drivers/net/wireless/intersil/prism54/oid_mgt.c 		while (j <= t->range) {
t                 593 drivers/net/wireless/intersil/prism54/oid_mgt.c 						      oid, data, t->size,
t                 606 drivers/net/wireless/intersil/prism54/oid_mgt.c 			data += t->size;
t                 820 drivers/net/wireless/intersil/prism54/oid_mgt.c 			int i, t;
t                 822 drivers/net/wireless/intersil/prism54/oid_mgt.c 			t = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
t                 824 drivers/net/wireless/intersil/prism54/oid_mgt.c 				t += snprintf(str + t, PRIV_STR_SIZE - t,
t                 826 drivers/net/wireless/intersil/prism54/oid_mgt.c 			return t;
t                 862 drivers/net/wireless/intersil/prism54/oid_mgt.c 			int t, i;
t                 863 drivers/net/wireless/intersil/prism54/oid_mgt.c 			t = snprintf(str, PRIV_STR_SIZE,
t                 867 drivers/net/wireless/intersil/prism54/oid_mgt.c 				t += snprintf(str + t, PRIV_STR_SIZE - t,
t                 869 drivers/net/wireless/intersil/prism54/oid_mgt.c 			t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
t                 870 drivers/net/wireless/intersil/prism54/oid_mgt.c 			return t;
t                 876 drivers/net/wireless/intersil/prism54/oid_mgt.c 			int t, i;
t                 877 drivers/net/wireless/intersil/prism54/oid_mgt.c 			t = snprintf(str, PRIV_STR_SIZE, "hex data=");
t                 879 drivers/net/wireless/intersil/prism54/oid_mgt.c 				t += snprintf(str + t, PRIV_STR_SIZE - t,
t                 881 drivers/net/wireless/intersil/prism54/oid_mgt.c 			t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
t                 882 drivers/net/wireless/intersil/prism54/oid_mgt.c 			return t;
t                 703 drivers/net/wireless/marvell/libertas/cmd.c 	struct ieee80211_country_ie_triplet *t;
t                 768 drivers/net/wireless/marvell/libertas/cmd.c 				t = &domain->triplet[num_triplet];
t                 769 drivers/net/wireless/marvell/libertas/cmd.c 				t->chans.first_channel = first_channel;
t                 770 drivers/net/wireless/marvell/libertas/cmd.c 				t->chans.num_channels = num_parsed_chan;
t                 771 drivers/net/wireless/marvell/libertas/cmd.c 				t->chans.max_power = max_pwr;
t                 781 drivers/net/wireless/marvell/libertas/cmd.c 			t = &domain->triplet[num_triplet];
t                 782 drivers/net/wireless/marvell/libertas/cmd.c 			t->chans.first_channel = first_channel;
t                 783 drivers/net/wireless/marvell/libertas/cmd.c 			t->chans.num_channels = num_parsed_chan;
t                 784 drivers/net/wireless/marvell/libertas/cmd.c 			t->chans.max_power = max_pwr;
t                 166 drivers/net/wireless/marvell/libertas/if_usb.c static void if_usb_fw_timeo(struct timer_list *t)
t                 168 drivers/net/wireless/marvell/libertas/if_usb.c 	struct if_usb_card *cardp = from_timer(cardp, t, fw_timeout);
t                 726 drivers/net/wireless/marvell/libertas/main.c static void lbs_cmd_timeout_handler(struct timer_list *t)
t                 728 drivers/net/wireless/marvell/libertas/main.c 	struct lbs_private *priv = from_timer(priv, t, command_timer);
t                 760 drivers/net/wireless/marvell/libertas/main.c static void lbs_tx_lockup_handler(struct timer_list *t)
t                 762 drivers/net/wireless/marvell/libertas/main.c 	struct lbs_private *priv = from_timer(priv, t, tx_lockup_timer);
t                 783 drivers/net/wireless/marvell/libertas/main.c static void auto_deepsleep_timer_fn(struct timer_list *t)
t                 785 drivers/net/wireless/marvell/libertas/main.c 	struct lbs_private *priv = from_timer(priv, t, auto_deepsleep_timer);
t                 114 drivers/net/wireless/marvell/libertas_tf/if_usb.c static void if_usb_fw_timeo(struct timer_list *t)
t                 116 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	struct if_usb_card *cardp = from_timer(cardp, t, fw_timeout);
t                 128 drivers/net/wireless/marvell/libertas_tf/main.c static void command_timer_fn(struct timer_list *t)
t                 130 drivers/net/wireless/marvell/libertas_tf/main.c 	struct lbtf_private *priv = from_timer(priv, t, command_timer);
t                 312 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_flush_data(struct timer_list *t)
t                 315 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		from_timer(ctx, t, timer);
t                 540 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	struct ieee80211_country_ie_triplet *t;
t                 584 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			t = &domain_info->triplet[no_of_triplet];
t                 585 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			t->chans.first_channel = first_chan;
t                 586 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			t->chans.num_channels = no_of_parsed_chan;
t                 587 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			t->chans.max_power = max_pwr;
t                 597 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		t = &domain_info->triplet[no_of_triplet];
t                 598 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		t->chans.first_channel = first_chan;
t                 599 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		t->chans.num_channels = no_of_parsed_chan;
t                 600 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		t->chans.max_power = max_pwr;
t                 931 drivers/net/wireless/marvell/mwifiex/cmdevt.c mwifiex_cmd_timeout_func(struct timer_list *t)
t                 933 drivers/net/wireless/marvell/mwifiex/cmdevt.c 	struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
t                  54 drivers/net/wireless/marvell/mwifiex/init.c static void wakeup_timer_fn(struct timer_list *t)
t                  56 drivers/net/wireless/marvell/mwifiex/init.c 	struct mwifiex_adapter *adapter = from_timer(adapter, t, wakeup_timer);
t                  66 drivers/net/wireless/marvell/mwifiex/init.c static void fw_dump_timer_fn(struct timer_list *t)
t                  68 drivers/net/wireless/marvell/mwifiex/init.c 	struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
t                1096 drivers/net/wireless/marvell/mwifiex/main.h void mwifiex_cmd_timeout_func(struct timer_list *t);
t                1641 drivers/net/wireless/marvell/mwifiex/main.h void mwifiex_check_auto_tdls(struct timer_list *t);
t                1413 drivers/net/wireless/marvell/mwifiex/tdls.c void mwifiex_check_auto_tdls(struct timer_list *t)
t                1415 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct mwifiex_private *priv = from_timer(priv, t, auto_tdls_timer);
t                1123 drivers/net/wireless/marvell/mwifiex/usb.c static void mwifiex_usb_tx_aggr_tmo(struct timer_list *t)
t                1128 drivers/net/wireless/marvell/mwifiex/usb.c 		from_timer(timer_context, t, hold_timer);
t                 291 drivers/net/wireless/mediatek/mt76/dma.c 	struct mt76_txwi_cache *t;
t                 296 drivers/net/wireless/mediatek/mt76/dma.c 	t = mt76_get_txwi(dev);
t                 297 drivers/net/wireless/mediatek/mt76/dma.c 	if (!t) {
t                 301 drivers/net/wireless/mediatek/mt76/dma.c 	txwi = mt76_get_txwi_ptr(dev, t);
t                 312 drivers/net/wireless/mediatek/mt76/dma.c 	tx_info.buf[n].addr = t->dma_addr;
t                 331 drivers/net/wireless/mediatek/mt76/dma.c 	dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
t                 334 drivers/net/wireless/mediatek/mt76/dma.c 	dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
t                 345 drivers/net/wireless/mediatek/mt76/dma.c 				tx_info.info, tx_info.skb, t);
t                 354 drivers/net/wireless/mediatek/mt76/dma.c 	e.txwi = t;
t                 356 drivers/net/wireless/mediatek/mt76/dma.c 	mt76_put_txwi(dev, t);
t                 636 drivers/net/wireless/mediatek/mt76/mt76.h mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
t                 638 drivers/net/wireless/mediatek/mt76/mt76.h 	return (u8 *)t - dev->drv->txwi_size;
t                 775 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
t                 232 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		struct mt76_txwi_cache *t;
t                 240 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		t = idr_remove(&dev->token, le16_to_cpu(txp->token));
t                 242 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		e->skb = t ? t->skb : NULL;
t                 447 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			  struct mt76_txwi_cache *t)
t                 452 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	txp = mt7615_txwi_to_txp(dev, t);
t                 775 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt76_txwi_cache *t;
t                 819 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t                 820 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	t->skb = tx_info->skb;
t                 823 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
t                 321 drivers/net/wireless/mediatek/mt76/mt7615/mac.h mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
t                 325 drivers/net/wireless/mediatek/mt76/mt7615/mac.h 	if (!t)
t                 328 drivers/net/wireless/mediatek/mt76/mt7615/mac.h 	txwi = mt76_get_txwi_ptr(dev, t);
t                 154 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 				  struct mt76_rate_power *t)
t                 160 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	memset(t, 0, sizeof(*t));
t                 164 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->cck[0] = t->cck[1] = s6_to_s8(val);
t                 165 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
t                 170 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
t                 171 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
t                 176 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
t                 177 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
t                 182 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
t                 183 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
t                 188 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t                 189 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
t                 194 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->stbc[0] = t->stbc[1] = s6_to_s8(val);
t                 195 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
t                 200 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->stbc[4] = t->stbc[5] = s6_to_s8(val);
t                 201 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
t                 205 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->vht[8] = s6_to_s8(val);
t                 206 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	t->vht[9] = s6_to_s8(val >> 8);
t                 209 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 	mt76x02_add_rate_power_offset(t, delta);
t                  22 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h 				  struct mt76_rate_power *t);
t                 264 drivers/net/wireless/mediatek/mt76/mt76x0/init.c 	struct mt76_rate_power t;
t                 271 drivers/net/wireless/mediatek/mt76/mt76x0/init.c 		mt76x0_get_tx_power_per_rate(dev, chan, &t);
t                 274 drivers/net/wireless/mediatek/mt76/mt76x0/init.c 		chan->orig_mpwr = (mt76x02_get_max_rate_power(&t) + tp) / 2;
t                 844 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 	struct mt76_rate_power *t = &dev->mt76.rate_power;
t                 847 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 	mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
t                 850 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 	mt76x02_add_rate_power_offset(t, info);
t                 851 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 	mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
t                 852 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 	dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
t                 853 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 	mt76x02_add_rate_power_offset(t, -info);
t                 181 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c 	struct mt76_txwi_cache __maybe_unused *t;
t                  95 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 	struct mt76_rate_power *t = &dev->mt76.rate_power;
t                 101 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
t                 102 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 				      t->ofdm[2]));
t                 104 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
t                 105 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 				      t->ht[2]));
t                 107 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
t                 108 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 				      t->ht[10]));
t                 110 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
t                 111 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 				      t->stbc[2]));
t                 113 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
t                 115 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
t                 116 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 				      t->vht[9]));
t                 118 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
t                 120 drivers/net/wireless/mediatek/mt76/mt76x02_phy.c 		mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
t                 278 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
t                 286 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	memset(t, 0, sizeof(*t));
t                 289 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
t                 290 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
t                 296 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
t                 297 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
t                 303 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
t                 304 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
t                 307 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
t                 308 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
t                 311 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
t                 312 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
t                 315 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
t                 316 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
t                 319 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
t                 320 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
t                 323 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
t                 324 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
t                 327 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
t                 328 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
t                 333 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
t                 335 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
t                 336 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->stbc[8] = t->vht[8];
t                 337 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->stbc[9] = t->vht[9];
t                 343 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 			 struct mt76x2_tx_power_info *t,
t                 361 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].tssi_slope = data[0];
t                 362 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].tssi_offset = data[1];
t                 363 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].target_power = data[2];
t                 364 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].delta =
t                 368 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->target_power = val >> 8;
t                 373 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 			 struct mt76x2_tx_power_info *t,
t                 413 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].tssi_slope = data[0];
t                 414 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].tssi_offset = data[1];
t                 415 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].target_power = data[2];
t                 416 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->chain[chain].delta =
t                 420 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->target_power = val & 0xff;
t                 424 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 			   struct mt76x2_tx_power_info *t,
t                 429 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	memset(t, 0, sizeof(*t));
t                 436 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 		mt76x2_get_power_info_5g(dev, t, chan, 0,
t                 438 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 		mt76x2_get_power_info_5g(dev, t, chan, 1,
t                 441 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 		mt76x2_get_power_info_2g(dev, t, chan, 0,
t                 443 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 		mt76x2_get_power_info_2g(dev, t, chan, 1,
t                 448 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	    !mt76x02_field_valid(t->target_power))
t                 449 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 		t->target_power = t->chain[0].target_power;
t                 451 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->delta_bw40 = mt76x02_rate_power_val(bw40);
t                 452 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->delta_bw80 = mt76x02_rate_power_val(bw80);
t                 456 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
t                 462 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	memset(t, 0, sizeof(*t));
t                 471 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->temp_25_ref = val & 0x7f;
t                 481 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->high_slope = slope & 0xff;
t                 482 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->low_slope = slope >> 8;
t                 483 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->lower_bound = 0 - (bounds & 0xf);
t                 484 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c 	t->upper_bound = (bounds >> 4) & 0xf;
t                  43 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
t                  46 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h 			   struct mt76x2_tx_power_info *t,
t                  48 drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t);
t                 156 drivers/net/wireless/mediatek/mt76/mt76x2/init.c 	struct mt76_rate_power t = {};
t                 163 drivers/net/wireless/mediatek/mt76/mt76x2/init.c 		mt76x2_get_rate_power(dev, &t, chan);
t                 165 drivers/net/wireless/mediatek/mt76/mt76x2/init.c 		chan->orig_mpwr = mt76x02_get_max_rate_power(&t) +
t                 256 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 	struct mt76x2_temp_comp t;
t                 259 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 	if (mt76x2_get_temp_comp(dev, &t))
t                 263 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 	temp -= t.temp_25_ref;
t                 268 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 		db_diff = (temp - 25) / t.high_slope;
t                 270 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 		db_diff = (25 - temp) / t.low_slope;
t                 272 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 	db_diff = min(db_diff, t.upper_bound);
t                 273 drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c 	db_diff = max(db_diff, t.lower_bound);
t                 143 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	struct mt76_rate_power t = {};
t                 153 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	mt76x2_get_rate_power(dev, &t, chan);
t                 154 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
t                 155 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
t                 156 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
t                 158 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	base_power = mt76x2_get_min_rate_power(&t);
t                 174 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	mt76x02_add_rate_power_offset(&t, -base_power);
t                 178 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	dev->mt76.rate_power = t;
t                 207 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 	struct mt76x2_tssi_comp t = {};
t                 214 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		t.cal_mode = BIT(0);
t                 215 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		mt76x2_mcu_tssi_comp(dev, &t);
t                 225 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 			t.pa_mode = 1;
t                 227 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		t.cal_mode = BIT(1);
t                 228 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		t.slope0 = txp.chain[0].tssi_slope;
t                 229 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		t.offset0 = txp.chain[0].tssi_offset;
t                 230 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		t.slope1 = txp.chain[1].tssi_slope;
t                 231 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		t.offset1 = txp.chain[1].tssi_offset;
t                 232 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		mt76x2_mcu_tssi_comp(dev, &t);
t                 234 drivers/net/wireless/mediatek/mt76/mt76x2/phy.c 		if (t.pa_mode || dev->cal.dpd_cal_done || dev->ed_tx_blocked)
t                  11 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_txwi_cache *t;
t                  16 drivers/net/wireless/mediatek/mt76/tx.c 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
t                  23 drivers/net/wireless/mediatek/mt76/tx.c 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t                  24 drivers/net/wireless/mediatek/mt76/tx.c 	t->dma_addr = addr;
t                  26 drivers/net/wireless/mediatek/mt76/tx.c 	return t;
t                  32 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_txwi_cache *t = NULL;
t                  36 drivers/net/wireless/mediatek/mt76/tx.c 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
t                  38 drivers/net/wireless/mediatek/mt76/tx.c 		list_del(&t->list);
t                  42 drivers/net/wireless/mediatek/mt76/tx.c 	return t;
t                  48 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
t                  50 drivers/net/wireless/mediatek/mt76/tx.c 	if (t)
t                  51 drivers/net/wireless/mediatek/mt76/tx.c 		return t;
t                  57 drivers/net/wireless/mediatek/mt76/tx.c mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
t                  59 drivers/net/wireless/mediatek/mt76/tx.c 	if (!t)
t                  63 drivers/net/wireless/mediatek/mt76/tx.c 	list_add(&t->list, &dev->txwi_cache);
t                  70 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_txwi_cache *t;
t                  72 drivers/net/wireless/mediatek/mt76/tx.c 	while ((t = __mt76_get_txwi(dev)) != NULL)
t                  73 drivers/net/wireless/mediatek/mt76/tx.c 		dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
t                 265 drivers/net/wireless/mediatek/mt7601u/eeprom.c 	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
t                 269 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
t                 270 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
t                 272 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
t                 273 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
t                 275 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
t                 276 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
t                 279 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
t                 280 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
t                 281 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
t                 282 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
t                 285 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
t                 286 drivers/net/wireless/mediatek/mt7601u/eeprom.c 		mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
t                 291 drivers/net/wireless/mediatek/mt7601u/phy.c 	const struct reg_table *t;
t                 296 drivers/net/wireless/mediatek/mt7601u/phy.c 	t = &bbp_mode_table[dev->temp_mode][dev->bw];
t                 298 drivers/net/wireless/mediatek/mt7601u/phy.c 	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
t                 303 drivers/net/wireless/mediatek/mt7601u/phy.c 	const struct reg_table *t;
t                 312 drivers/net/wireless/mediatek/mt7601u/phy.c 	t = bbp_mode_table[dev->temp_mode];
t                 314 drivers/net/wireless/mediatek/mt7601u/phy.c 				      t[2].regs, t[2].n);
t                 319 drivers/net/wireless/mediatek/mt7601u/phy.c 				       t[dev->bw].regs, t[dev->bw].n);
t                 324 drivers/net/wireless/mediatek/mt7601u/phy.c 	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
t                 330 drivers/net/wireless/mediatek/mt7601u/phy.c 		t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
t                 331 drivers/net/wireless/mediatek/mt7601u/phy.c 		t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
t                 337 drivers/net/wireless/mediatek/mt7601u/phy.c 		t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
t                 338 drivers/net/wireless/mediatek/mt7601u/phy.c 		t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
t                 374 drivers/net/wireless/mediatek/mt7601u/phy.c 	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
t                 429 drivers/net/wireless/mediatek/mt7601u/phy.c 	mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
t                 430 drivers/net/wireless/mediatek/mt7601u/phy.c 					 int_to_s6(t->ofdm[0].bw20) << 16 |
t                 431 drivers/net/wireless/mediatek/mt7601u/phy.c 					 int_to_s6(t->cck[1].bw20) << 8 |
t                 432 drivers/net/wireless/mediatek/mt7601u/phy.c 					 int_to_s6(t->cck[0].bw20));
t                5047 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	u8 t, i;
t                5079 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		t = eeprom & 0x3f;
t                5080 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		if (t == 32)
t                5081 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			t++;
t                5083 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		gdata = t;
t                5085 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		t = (eeprom & 0x3f00) >> 8;
t                5086 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		if (t == 32)
t                5087 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			t++;
t                5089 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		gdata |= (t << 8);
t                5095 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		t = eeprom & 0x3f;
t                5096 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		if (t == 32)
t                5097 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			t++;
t                5099 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		gdata |= (t << 16);
t                5101 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		t = (eeprom & 0x3f00) >> 8;
t                5102 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		if (t == 32)
t                5103 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			t++;
t                5105 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		gdata |= (t << 24);
t                5136 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	t = rt2x00_get_field32(reg, TX_PWR_CFG_1B_48MBS);
t                5137 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_54MBS, t);
t                5141 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	t = rt2x00_get_field32(reg, TX_PWR_CFG_2B_MCS6_MCS7);
t                5142 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_MCS7, t);
t                5148 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	t = rt2x00_get_field32(reg, TX_PWR_CFG_3B_MCS14);
t                5149 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&pwreg, TX_PWR_CFG_8B_MCS15, t);
t                5155 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	t = rt2x00_get_field32(reg, TX_PWR_CFG_4B_STBC_MCS6);
t                5156 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t);
t                  83 drivers/net/wireless/ray_cs.c static void authenticate_timeout(struct timer_list *t);
t                  93 drivers/net/wireless/ray_cs.c static void verify_dl_startup(struct timer_list *t);
t                 111 drivers/net/wireless/ray_cs.c static void join_net(struct timer_list *t);
t                 112 drivers/net/wireless/ray_cs.c static void start_net(struct timer_list *t);
t                 626 drivers/net/wireless/ray_cs.c static void verify_dl_startup(struct timer_list *t)
t                 628 drivers/net/wireless/ray_cs.c 	ray_dev_t *local = from_timer(local, t, timer);
t                 668 drivers/net/wireless/ray_cs.c static void start_net(struct timer_list *t)
t                 670 drivers/net/wireless/ray_cs.c 	ray_dev_t *local = from_timer(local, t, timer);
t                 695 drivers/net/wireless/ray_cs.c static void join_net(struct timer_list *t)
t                 697 drivers/net/wireless/ray_cs.c 	ray_dev_t *local = from_timer(local, t, timer);
t                1626 drivers/net/wireless/ray_cs.c static void authenticate_timeout(struct timer_list *t)
t                1628 drivers/net/wireless/ray_cs.c 	ray_dev_t *local = from_timer(local, t, timer);
t                1008 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 				      int result[][8], int t)
t                1042 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 	if (t == 0) {
t                1084 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 			result[t][0] = (val32 >> 16) & 0x3ff;
t                1087 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 			result[t][1] = (val32 >> 16) & 0x3ff;
t                1101 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 			result[t][2] = (val32 >> 16) & 0x3ff;
t                1104 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 			result[t][3] = (val32 >> 16) & 0x3ff;
t                1130 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 				result[t][4] = (val32 >> 16) & 0x3ff;
t                1132 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 				result[t][5] = (val32 >> 16) & 0x3ff;
t                1145 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 				result[t][6] = (val32 >> 16) & 0x3ff;
t                1148 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 				result[t][7] = (val32 >> 16) & 0x3ff;
t                1160 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c 	if (t) {
t                 879 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 				      int result[][8], int t)
t                 913 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 	if (t == 0) {
t                 966 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 			result[t][0] = (val32 >> 16) & 0x3ff;
t                 969 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 			result[t][1] = (val32 >> 16) & 0x3ff;
t                 983 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 			result[t][2] = (val32 >> 16) & 0x3ff;
t                 986 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 			result[t][3] = (val32 >> 16) & 0x3ff;
t                1020 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 				result[t][4] = (val32 >> 16) & 0x3ff;
t                1022 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 				result[t][5] = (val32 >> 16) & 0x3ff;
t                1035 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 				result[t][6] = (val32 >> 16) & 0x3ff;
t                1038 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 				result[t][7] = (val32 >> 16) & 0x3ff;
t                1053 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c 	if (t) {
t                3112 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				     int result[][8], int t)
t                3144 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	if (t == 0) {
t                3155 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	if (t == 0) {
t                3213 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			result[t][0] = (val32 >> 16) & 0x3ff;
t                3216 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			result[t][1] = (val32 >> 16) & 0x3ff;
t                3219 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			result[t][2] = (val32 >> 16) & 0x3ff;
t                3222 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			result[t][3] = (val32 >> 16) & 0x3ff;
t                3231 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			result[t][0] = (val32 >> 16) & 0x3ff;
t                3234 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			result[t][1] = (val32 >> 16) & 0x3ff;
t                3256 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				result[t][4] = (val32 >> 16) & 0x3ff;
t                3258 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				result[t][5] = (val32 >> 16) & 0x3ff;
t                3260 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				result[t][6] = (val32 >> 16) & 0x3ff;
t                3262 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				result[t][7] = (val32 >> 16) & 0x3ff;
t                3267 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				result[t][4] = (val32 >> 16) & 0x3ff;
t                3269 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				result[t][5] = (val32 >> 16) & 0x3ff;
t                3280 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	if (t) {
t                2232 drivers/net/wireless/realtek/rtlwifi/base.c void rtl_watch_dog_timer_callback(struct timer_list *t)
t                2234 drivers/net/wireless/realtek/rtlwifi/base.c 	struct rtl_priv *rtlpriv = from_timer(rtlpriv, t, works.watchdog_timer);
t                2382 drivers/net/wireless/realtek/rtlwifi/base.c void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
t                2385 drivers/net/wireless/realtek/rtlwifi/base.c 		from_timer(rtlpriv, t, works.dualmac_easyconcurrent_retrytimer);
t                  74 drivers/net/wireless/realtek/rtlwifi/base.h void rtl_watch_dog_timer_callback(struct timer_list *t);
t                 130 drivers/net/wireless/realtek/rtlwifi/base.h void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t);
t                1690 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c void rtl88e_dm_fast_antenna_training_callback(struct timer_list *t)
t                1693 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c 		from_timer(rtlpriv, t, works.fast_antenna_training_timer);
t                 251 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h void rtl88e_dm_fast_antenna_training_callback(struct timer_list *t);
t                 235 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c void rtl88ee_fw_clk_off_timer_callback(struct timer_list *t)
t                 237 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	struct rtl_priv *rtlpriv = from_timer(rtlpriv, t,
t                  39 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h void rtl88ee_fw_clk_off_timer_callback(struct timer_list *t);
t                1693 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				     long result[][8], u8 t, bool is2t)
t                1715 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 	if (t == 0) {
t                1725 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 	if (t == 0) {
t                1761 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                1763 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                1774 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
t                1776 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
t                1794 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				result[t][4] = (rtl_get_bbreg(hw,
t                1798 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				result[t][5] =
t                1801 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				result[t][6] =
t                1804 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				result[t][7] =
t                1809 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				result[t][4] = (rtl_get_bbreg(hw,
t                1814 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
t                1821 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 	if (t != 0) {
t                1207 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 				     long result[][8], u8 t, bool is2t)
t                1225 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 	if (t == 0) {
t                1234 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 	if (t == 0) {
t                1242 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 	if (t == 0) {
t                1265 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                1267 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                1269 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 			result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
t                1271 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 			result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
t                1276 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94,
t                1279 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 		result[t][1] =
t                1290 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 				result[t][4] = (rtl_get_bbreg(hw,
t                1294 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 				result[t][5] =
t                1297 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 				result[t][6] =
t                1300 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 				result[t][7] =
t                1305 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 				result[t][4] = (rtl_get_bbreg(hw,
t                1310 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 			result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
t                1321 drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c 	if (t != 0) {
t                1755 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				     u8 t, bool is2t)
t                1781 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	if (t == 0) {
t                1796 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	if (t == 0)
t                1832 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                1834 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                1836 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
t                1838 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
t                1846 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                1848 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                1863 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
t                1865 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				result[t][5] = (rtl_get_bbreg(hw, 0xebc,
t                1867 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				result[t][6] = (rtl_get_bbreg(hw, 0xec4,
t                1869 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				result[t][7] = (rtl_get_bbreg(hw, 0xecc,
t                1876 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
t                1878 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 				result[t][5] = (rtl_get_bbreg(hw, 0xebc,
t                1892 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	if (t != 0) {
t                1918 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 					       long result[][8], u8 t)
t                1948 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	if (t == 0) {
t                1972 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	if (t == 0)
t                1996 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 		result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                1998 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 		result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                2000 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 		result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
t                2002 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 		result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
t                2008 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 		result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                2010 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 		result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                2023 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
t                2025 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
t                2027 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][6] = (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
t                2029 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][7] = (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
t                2034 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
t                2036 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
t                2048 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	if (t != 0) {
t                2490 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 				      long result[][8], u8 t, bool is2t)
t                2515 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 	if (t == 0) {
t                2553 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			result[t][0] = (rtl_get_bbreg(hw,
t                2557 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			result[t][1] = (rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
t                2573 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			result[t][2] = (rtl_get_bbreg(hw,
t                2577 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			result[t][3] = (rtl_get_bbreg(hw,
t                2606 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 				result[t][4] = (rtl_get_bbreg(hw,
t                2610 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 				result[t][5] = (rtl_get_bbreg(hw,
t                2626 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 				result[t][6] = (rtl_get_bbreg(hw,
t                2630 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 				result[t][7] = (rtl_get_bbreg(hw,
t                2650 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 	if (t != 0) {
t                1136 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 				       long result[][8], u8 t, bool is2t)
t                1157 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 	if (t == 0) {
t                1166 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 	if (t == 0) {
t                1174 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 	if (t == 0) {
t                1197 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                1199 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                1201 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
t                1203 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
t                1208 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94,
t                1211 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 		result[t][1] =
t                1222 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 				result[t][4] = (rtl_get_bbreg(hw,
t                1226 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 				result[t][5] =
t                1229 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 				result[t][6] =
t                1232 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 				result[t][7] =
t                1237 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 				result[t][4] = (rtl_get_bbreg(hw,
t                1242 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
t                1253 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 	if (t != 0) {
t                2002 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					long result[][8], u8 t, bool is2t)
t                2033 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 	if (t == 0) {
t                2043 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 	if (t == 0) {
t                2065 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
t                2067 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
t                2081 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
t                2083 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
t                2101 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 				result[t][4] = (rtl_get_bbreg(hw, 0xe94,
t                2104 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 				result[t][5] = (rtl_get_bbreg(hw, 0xe9c,
t                2118 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 				result[t][6] = (rtl_get_bbreg(hw, 0xea4,
t                2121 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 				result[t][7] = (rtl_get_bbreg(hw, 0xeac,
t                2134 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 	if (t != 0) {
t                3026 drivers/net/wireless/realtek/rtlwifi/wifi.h #define MSECS(t)		msecs_to_jiffies(t)
t                1411 drivers/net/wireless/realtek/rtw88/main.h void rtw_tx_report_purge_timer(struct timer_list *t);
t                 185 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	u32 p, m, t, i;
t                 197 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		t = p - m;
t                 198 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		t = t / (DACK_SN_8822C - 20);
t                 200 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		t = m - p;
t                 201 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		t = t / (DACK_SN_8822C - 20);
t                 202 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		if (t != 0x0)
t                 203 drivers/net/wireless/realtek/rtw88/rtw8822c.c 			t = 0x400 - t;
t                 206 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	*val = t;
t                 147 drivers/net/wireless/realtek/rtw88/tx.c void rtw_tx_report_purge_timer(struct timer_list *t)
t                 149 drivers/net/wireless/realtek/rtw88/tx.c 	struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
t                 487 drivers/net/wireless/rsi/rsi_91x_hal.c static void bl_cmd_timeout(struct timer_list *t)
t                 489 drivers/net/wireless/rsi/rsi_91x_hal.c 	struct rsi_hw *adapter = from_timer(adapter, t, bl_cmd_timer);
t                1759 drivers/net/wireless/rsi/rsi_91x_mac80211.c void rsi_roc_timeout(struct timer_list *t)
t                1761 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	struct rsi_common *common = from_timer(common, t, roc_timer);
t                  91 drivers/net/wireless/rsi/rsi_common.h void rsi_roc_timeout(struct timer_list *t);
t                 130 drivers/net/wireless/st/cw1200/queue.c static void cw1200_queue_gc(struct timer_list *t)
t                 134 drivers/net/wireless/st/cw1200/queue.c 		from_timer(queue, t, gc);
t                2112 drivers/net/wireless/st/cw1200/sta.c void cw1200_mcast_timeout(struct timer_list *t)
t                2114 drivers/net/wireless/st/cw1200/sta.c 	struct cw1200_common *priv = from_timer(priv, t, mcast_timeout);
t                 117 drivers/net/wireless/st/cw1200/sta.h void cw1200_mcast_timeout(struct timer_list *t);
t                 438 drivers/net/wireless/st/cw1200/txrx.c 			  struct cw1200_txinfo *t)
t                 440 drivers/net/wireless/st/cw1200/txrx.c 	if (t->sta && t->sta_priv->link_id)
t                 441 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.raw_link_id =
t                 442 drivers/net/wireless/st/cw1200/txrx.c 				t->txpriv.link_id =
t                 443 drivers/net/wireless/st/cw1200/txrx.c 				t->sta_priv->link_id;
t                 445 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.raw_link_id =
t                 446 drivers/net/wireless/st/cw1200/txrx.c 				t->txpriv.link_id = 0;
t                 447 drivers/net/wireless/st/cw1200/txrx.c 	else if (is_multicast_ether_addr(t->da)) {
t                 449 drivers/net/wireless/st/cw1200/txrx.c 			t->txpriv.raw_link_id = 0;
t                 450 drivers/net/wireless/st/cw1200/txrx.c 			t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
t                 452 drivers/net/wireless/st/cw1200/txrx.c 			t->txpriv.raw_link_id = 0;
t                 453 drivers/net/wireless/st/cw1200/txrx.c 			t->txpriv.link_id = 0;
t                 456 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
t                 457 drivers/net/wireless/st/cw1200/txrx.c 		if (!t->txpriv.link_id)
t                 458 drivers/net/wireless/st/cw1200/txrx.c 			t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
t                 459 drivers/net/wireless/st/cw1200/txrx.c 		if (!t->txpriv.link_id) {
t                 464 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.raw_link_id = t->txpriv.link_id;
t                 466 drivers/net/wireless/st/cw1200/txrx.c 	if (t->txpriv.raw_link_id)
t                 467 drivers/net/wireless/st/cw1200/txrx.c 		priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
t                 469 drivers/net/wireless/st/cw1200/txrx.c 	if (t->sta && (t->sta->uapsd_queues & BIT(t->queue)))
t                 470 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
t                 476 drivers/net/wireless/st/cw1200/txrx.c 	       struct cw1200_txinfo *t)
t                 478 drivers/net/wireless/st/cw1200/txrx.c 	if (ieee80211_is_auth(t->hdr->frame_control)) {
t                 479 drivers/net/wireless/st/cw1200/txrx.c 		u32 mask = ~BIT(t->txpriv.raw_link_id);
t                 489 drivers/net/wireless/st/cw1200/txrx.c 		     struct cw1200_txinfo *t)
t                 491 drivers/net/wireless/st/cw1200/txrx.c 	if (ieee80211_is_data_qos(t->hdr->frame_control)) {
t                 492 drivers/net/wireless/st/cw1200/txrx.c 		u8 *qos = ieee80211_get_qos_ctl(t->hdr);
t                 493 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
t                 494 drivers/net/wireless/st/cw1200/txrx.c 	} else if (ieee80211_is_data(t->hdr->frame_control)) {
t                 495 drivers/net/wireless/st/cw1200/txrx.c 		t->txpriv.tid = 0;
t                 501 drivers/net/wireless/st/cw1200/txrx.c 		  struct cw1200_txinfo *t)
t                 503 drivers/net/wireless/st/cw1200/txrx.c 	if (!t->tx_info->control.hw_key ||
t                 504 drivers/net/wireless/st/cw1200/txrx.c 	    !ieee80211_has_protected(t->hdr->frame_control))
t                 507 drivers/net/wireless/st/cw1200/txrx.c 	t->hdrlen += t->tx_info->control.hw_key->iv_len;
t                 508 drivers/net/wireless/st/cw1200/txrx.c 	skb_put(t->skb, t->tx_info->control.hw_key->icv_len);
t                 510 drivers/net/wireless/st/cw1200/txrx.c 	if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
t                 511 drivers/net/wireless/st/cw1200/txrx.c 		skb_put(t->skb, 8); /* MIC space */
t                 518 drivers/net/wireless/st/cw1200/txrx.c 		  struct cw1200_txinfo *t,
t                 521 drivers/net/wireless/st/cw1200/txrx.c 	size_t offset = (size_t)t->skb->data & 3;
t                 533 drivers/net/wireless/st/cw1200/txrx.c 	if (skb_headroom(t->skb) < offset) {
t                 536 drivers/net/wireless/st/cw1200/txrx.c 			  skb_headroom(t->skb));
t                 539 drivers/net/wireless/st/cw1200/txrx.c 	skb_push(t->skb, offset);
t                 540 drivers/net/wireless/st/cw1200/txrx.c 	t->hdrlen += offset;
t                 541 drivers/net/wireless/st/cw1200/txrx.c 	t->txpriv.offset += offset;
t                 549 drivers/net/wireless/st/cw1200/txrx.c 		   struct cw1200_txinfo *t)
t                 552 drivers/net/wireless/st/cw1200/txrx.c 		(struct ieee80211_mgmt *)t->hdr;
t                 553 drivers/net/wireless/st/cw1200/txrx.c 	if (ieee80211_is_action(t->hdr->frame_control) &&
t                 563 drivers/net/wireless/st/cw1200/txrx.c 		struct cw1200_txinfo *t)
t                 567 drivers/net/wireless/st/cw1200/txrx.c 	if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
t                 570 drivers/net/wireless/st/cw1200/txrx.c 			  skb_headroom(t->skb));
t                 574 drivers/net/wireless/st/cw1200/txrx.c 	wsm = skb_push(t->skb, sizeof(struct wsm_tx));
t                 575 drivers/net/wireless/st/cw1200/txrx.c 	t->txpriv.offset += sizeof(struct wsm_tx);
t                 577 drivers/net/wireless/st/cw1200/txrx.c 	wsm->hdr.len = __cpu_to_le16(t->skb->len);
t                 579 drivers/net/wireless/st/cw1200/txrx.c 	wsm->queue_id = wsm_queue_id_to_wsm(t->queue);
t                 586 drivers/net/wireless/st/cw1200/txrx.c 	       struct cw1200_txinfo *t,
t                 594 drivers/net/wireless/st/cw1200/txrx.c 	if (ieee80211_is_nullfunc(t->hdr->frame_control)) {
t                 596 drivers/net/wireless/st/cw1200/txrx.c 	} else if (ieee80211_is_data(t->hdr->frame_control)) {
t                 598 drivers/net/wireless/st/cw1200/txrx.c 		u8 *payload = &t->skb->data[t->hdrlen];
t                 602 drivers/net/wireless/st/cw1200/txrx.c 	} else if (ieee80211_is_assoc_req(t->hdr->frame_control) ||
t                 603 drivers/net/wireless/st/cw1200/txrx.c 		ieee80211_is_reassoc_req(t->hdr->frame_control)) {
t                 605 drivers/net/wireless/st/cw1200/txrx.c 				(struct ieee80211_mgmt *)t->hdr;
t                 620 drivers/net/wireless/st/cw1200/txrx.c 		if (ieee80211_is_action(t->hdr->frame_control))
t                 622 drivers/net/wireless/st/cw1200/txrx.c 		else if (ieee80211_is_mgmt(t->hdr->frame_control))
t                 639 drivers/net/wireless/st/cw1200/txrx.c 			struct cw1200_txinfo *t,
t                 644 drivers/net/wireless/st/cw1200/txrx.c 	t->txpriv.rate_id = tx_policy_get(priv,
t                 645 drivers/net/wireless/st/cw1200/txrx.c 		t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
t                 647 drivers/net/wireless/st/cw1200/txrx.c 	if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID)
t                 650 drivers/net/wireless/st/cw1200/txrx.c 	wsm->flags |= t->txpriv.rate_id << 4;
t                 652 drivers/net/wireless/st/cw1200/txrx.c 	t->rate = cw1200_get_tx_rate(priv,
t                 653 drivers/net/wireless/st/cw1200/txrx.c 		&t->tx_info->control.rates[0]),
t                 654 drivers/net/wireless/st/cw1200/txrx.c 	wsm->max_tx_rate = t->rate->hw_value;
t                 655 drivers/net/wireless/st/cw1200/txrx.c 	if (t->rate->flags & IEEE80211_TX_RC_MCS) {
t                 683 drivers/net/wireless/st/cw1200/txrx.c 		     struct cw1200_txinfo *t)
t                 687 drivers/net/wireless/st/cw1200/txrx.c 	if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
t                 695 drivers/net/wireless/st/cw1200/txrx.c 	if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
t                 696 drivers/net/wireless/st/cw1200/txrx.c 		was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++;
t                 708 drivers/net/wireless/st/cw1200/txrx.c 	struct cw1200_txinfo t = {
t                 725 drivers/net/wireless/st/cw1200/txrx.c 	t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
t                 726 drivers/net/wireless/st/cw1200/txrx.c 	t.da = ieee80211_get_DA(t.hdr);
t                 728 drivers/net/wireless/st/cw1200/txrx.c 		t.sta = control->sta;
t                 729 drivers/net/wireless/st/cw1200/txrx.c 		t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv;
t                 732 drivers/net/wireless/st/cw1200/txrx.c 	if (WARN_ON(t.queue >= 4))
t                 735 drivers/net/wireless/st/cw1200/txrx.c 	ret = cw1200_tx_h_calc_link_ids(priv, &t);
t                 740 drivers/net/wireless/st/cw1200/txrx.c 		 skb->len, t.queue, t.txpriv.link_id,
t                 741 drivers/net/wireless/st/cw1200/txrx.c 		 t.txpriv.raw_link_id);
t                 743 drivers/net/wireless/st/cw1200/txrx.c 	cw1200_tx_h_pm(priv, &t);
t                 744 drivers/net/wireless/st/cw1200/txrx.c 	cw1200_tx_h_calc_tid(priv, &t);
t                 745 drivers/net/wireless/st/cw1200/txrx.c 	ret = cw1200_tx_h_crypt(priv, &t);
t                 748 drivers/net/wireless/st/cw1200/txrx.c 	ret = cw1200_tx_h_align(priv, &t, &flags);
t                 751 drivers/net/wireless/st/cw1200/txrx.c 	ret = cw1200_tx_h_action(priv, &t);
t                 754 drivers/net/wireless/st/cw1200/txrx.c 	wsm = cw1200_tx_h_wsm(priv, &t);
t                 760 drivers/net/wireless/st/cw1200/txrx.c 	cw1200_tx_h_bt(priv, &t, wsm);
t                 761 drivers/net/wireless/st/cw1200/txrx.c 	ret = cw1200_tx_h_rate_policy(priv, &t, wsm);
t                 766 drivers/net/wireless/st/cw1200/txrx.c 	sta = rcu_dereference(t.sta);
t                 770 drivers/net/wireless/st/cw1200/txrx.c 		tid_update = cw1200_tx_h_pm_state(priv, &t);
t                 771 drivers/net/wireless/st/cw1200/txrx.c 		BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
t                 772 drivers/net/wireless/st/cw1200/txrx.c 					t.skb, &t.txpriv));
t                 777 drivers/net/wireless/st/cw1200/txrx.c 		ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
t                 786 drivers/net/wireless/st/cw1200/txrx.c 	cw1200_skb_dtor(priv, skb, &t.txpriv);
t                  46 drivers/net/wireless/ti/wl1251/spi.c 	struct spi_transfer t;
t                  55 drivers/net/wireless/ti/wl1251/spi.c 	memset(&t, 0, sizeof(t));
t                  60 drivers/net/wireless/ti/wl1251/spi.c 	t.tx_buf = cmd;
t                  61 drivers/net/wireless/ti/wl1251/spi.c 	t.len = WSPI_INIT_CMD_LEN;
t                  62 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t, &m);
t                  73 drivers/net/wireless/ti/wl1251/spi.c 	struct spi_transfer t;
t                  82 drivers/net/wireless/ti/wl1251/spi.c 	memset(&t, 0, sizeof(t));
t                 112 drivers/net/wireless/ti/wl1251/spi.c 	t.tx_buf = cmd;
t                 113 drivers/net/wireless/ti/wl1251/spi.c 	t.len = WSPI_INIT_CMD_LEN;
t                 114 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t, &m);
t                 132 drivers/net/wireless/ti/wl1251/spi.c 	struct spi_transfer t[3];
t                 146 drivers/net/wireless/ti/wl1251/spi.c 	memset(t, 0, sizeof(t));
t                 148 drivers/net/wireless/ti/wl1251/spi.c 	t[0].tx_buf = cmd;
t                 149 drivers/net/wireless/ti/wl1251/spi.c 	t[0].len = 4;
t                 150 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t[0], &m);
t                 153 drivers/net/wireless/ti/wl1251/spi.c 	t[1].rx_buf = busy_buf;
t                 154 drivers/net/wireless/ti/wl1251/spi.c 	t[1].len = WL1251_BUSY_WORD_LEN;
t                 155 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t[1], &m);
t                 157 drivers/net/wireless/ti/wl1251/spi.c 	t[2].rx_buf = buf;
t                 158 drivers/net/wireless/ti/wl1251/spi.c 	t[2].len = len;
t                 159 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t[2], &m);
t                 172 drivers/net/wireless/ti/wl1251/spi.c 	struct spi_transfer t[2];
t                 184 drivers/net/wireless/ti/wl1251/spi.c 	memset(t, 0, sizeof(t));
t                 186 drivers/net/wireless/ti/wl1251/spi.c 	t[0].tx_buf = cmd;
t                 187 drivers/net/wireless/ti/wl1251/spi.c 	t[0].len = sizeof(*cmd);
t                 188 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t[0], &m);
t                 190 drivers/net/wireless/ti/wl1251/spi.c 	t[1].tx_buf = buf;
t                 191 drivers/net/wireless/ti/wl1251/spi.c 	t[1].len = len;
t                 192 drivers/net/wireless/ti/wl1251/spi.c 	spi_message_add_tail(&t[1], &m);
t                 195 drivers/net/wireless/ti/wlcore/main.c static void wl1271_rx_streaming_timer(struct timer_list *t)
t                 197 drivers/net/wireless/ti/wlcore/main.c 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
t                  94 drivers/net/wireless/ti/wlcore/spi.c 	struct spi_transfer t;
t                 104 drivers/net/wireless/ti/wlcore/spi.c 	memset(&t, 0, sizeof(t));
t                 109 drivers/net/wireless/ti/wlcore/spi.c 	t.tx_buf = cmd;
t                 110 drivers/net/wireless/ti/wlcore/spi.c 	t.len = WSPI_INIT_CMD_LEN;
t                 111 drivers/net/wireless/ti/wlcore/spi.c 	spi_message_add_tail(&t, &m);
t                 121 drivers/net/wireless/ti/wlcore/spi.c 	struct spi_transfer t;
t                 132 drivers/net/wireless/ti/wlcore/spi.c 	memset(&t, 0, sizeof(t));
t                 164 drivers/net/wireless/ti/wlcore/spi.c 	t.tx_buf = cmd;
t                 165 drivers/net/wireless/ti/wlcore/spi.c 	t.len = WSPI_INIT_CMD_LEN;
t                 166 drivers/net/wireless/ti/wlcore/spi.c 	spi_message_add_tail(&t, &m);
t                 183 drivers/net/wireless/ti/wlcore/spi.c 	t.tx_buf = cmd;
t                 184 drivers/net/wireless/ti/wlcore/spi.c 	t.len = 4;
t                 185 drivers/net/wireless/ti/wlcore/spi.c 	spi_message_add_tail(&t, &m);
t                 200 drivers/net/wireless/ti/wlcore/spi.c 	struct spi_transfer t[1];
t                 215 drivers/net/wireless/ti/wlcore/spi.c 		memset(t, 0, sizeof(t));
t                 216 drivers/net/wireless/ti/wlcore/spi.c 		t[0].rx_buf = busy_buf;
t                 217 drivers/net/wireless/ti/wlcore/spi.c 		t[0].len = sizeof(u32);
t                 218 drivers/net/wireless/ti/wlcore/spi.c 		t[0].cs_change = true;
t                 219 drivers/net/wireless/ti/wlcore/spi.c 		spi_message_add_tail(&t[0], &m);
t                 236 drivers/net/wireless/ti/wlcore/spi.c 	struct spi_transfer t[2];
t                 258 drivers/net/wireless/ti/wlcore/spi.c 		memset(t, 0, sizeof(t));
t                 260 drivers/net/wireless/ti/wlcore/spi.c 		t[0].tx_buf = cmd;
t                 261 drivers/net/wireless/ti/wlcore/spi.c 		t[0].len = 4;
t                 262 drivers/net/wireless/ti/wlcore/spi.c 		t[0].cs_change = true;
t                 263 drivers/net/wireless/ti/wlcore/spi.c 		spi_message_add_tail(&t[0], &m);
t                 266 drivers/net/wireless/ti/wlcore/spi.c 		t[1].rx_buf = busy_buf;
t                 267 drivers/net/wireless/ti/wlcore/spi.c 		t[1].len = WL1271_BUSY_WORD_LEN;
t                 268 drivers/net/wireless/ti/wlcore/spi.c 		t[1].cs_change = true;
t                 269 drivers/net/wireless/ti/wlcore/spi.c 		spi_message_add_tail(&t[1], &m);
t                 280 drivers/net/wireless/ti/wlcore/spi.c 		memset(t, 0, sizeof(t));
t                 282 drivers/net/wireless/ti/wlcore/spi.c 		t[0].rx_buf = buf;
t                 283 drivers/net/wireless/ti/wlcore/spi.c 		t[0].len = chunk_len;
t                 284 drivers/net/wireless/ti/wlcore/spi.c 		t[0].cs_change = true;
t                 285 drivers/net/wireless/ti/wlcore/spi.c 		spi_message_add_tail(&t[0], &m);
t                 302 drivers/net/wireless/ti/wlcore/spi.c 	struct spi_transfer *t;
t                 310 drivers/net/wireless/ti/wlcore/spi.c 	t = kzalloc(sizeof(*t) * 2 * WSPI_MAX_NUM_OF_CHUNKS, GFP_KERNEL);
t                 311 drivers/net/wireless/ti/wlcore/spi.c 	if (!t)
t                 332 drivers/net/wireless/ti/wlcore/spi.c 		t[i].tx_buf = cmd;
t                 333 drivers/net/wireless/ti/wlcore/spi.c 		t[i].len = sizeof(*cmd);
t                 334 drivers/net/wireless/ti/wlcore/spi.c 		spi_message_add_tail(&t[i++], &m);
t                 336 drivers/net/wireless/ti/wlcore/spi.c 		t[i].tx_buf = buf;
t                 337 drivers/net/wireless/ti/wlcore/spi.c 		t[i].len = chunk_len;
t                 338 drivers/net/wireless/ti/wlcore/spi.c 		spi_message_add_tail(&t[i++], &m);
t                 349 drivers/net/wireless/ti/wlcore/spi.c 	kfree(t);
t                 188 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	unsigned int i, j, t, max;
t                 193 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	for (i = 0; i < count; i += j + t) {
t                 194 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 		t = 0;
t                 200 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 				t = 1;
t                 226 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	unsigned int i, j, t, max;
t                 230 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	for (i = 0; i < count; i += j + t) {
t                 231 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 		t = 0;
t                 237 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 				t = 1;
t                 662 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	int r, t;
t                 676 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	t = zd_chip_unlock_phy_regs(chip);
t                 677 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	if (t && !r)
t                 678 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 		r = t;
t                 744 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	int r, t;
t                 753 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	t = zd_chip_unlock_phy_regs(chip);
t                 754 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	if (t && !r)
t                 755 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 		r = t;
t                1263 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	int r, t;
t                1283 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	t = zd_chip_unlock_phy_regs(chip);
t                1284 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 	if (t && !r)
t                1285 drivers/net/wireless/zydas/zd1211rw/zd_chip.c 		r = t;
t                 629 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			u32 t = bits % 11;
t                 631 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			if (0 < t && t <= 3) {
t                  61 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	int t;
t                  95 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	t = rf->init_hw(rf);
t                  97 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	if (t)
t                  98 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 		r = t;
t                 126 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	int r, t;
t                 133 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	t = rf->switch_radio_on(rf);
t                 135 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	if (t)
t                 136 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 		r = t;
t                 142 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	int r, t;
t                 150 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	t = rf->switch_radio_off(rf);
t                 152 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 	if (t)
t                 153 drivers/net/wireless/zydas/zd1211rw/zd_rf.c 		r = t;
t                 329 drivers/net/xen-netback/common.h void xenvif_tx_credit_callback(struct timer_list *t);
t                 186 drivers/net/xen-netback/netback.c void xenvif_tx_credit_callback(struct timer_list *t)
t                 188 drivers/net/xen-netback/netback.c 	struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
t                 233 drivers/net/xen-netfront.c static void rx_refill_timeout(struct timer_list *t)
t                 235 drivers/net/xen-netfront.c 	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
t                 133 drivers/nfc/nfcmrvl/fw_dnld.c static void fw_dnld_timeout(struct timer_list *t)
t                 135 drivers/nfc/nfcmrvl/fw_dnld.c 	struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
t                1223 drivers/nfc/pn533/pn533.c static void pn533_listen_mode_timer(struct timer_list *t)
t                1225 drivers/nfc/pn533/pn533.c 	struct pn533 *dev = from_timer(dev, t, listen_timer);
t                1911 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb, *tmp, *t;
t                1924 drivers/nfc/pn533/pn533.c 	skb_queue_walk_safe(&dev->resp_q, tmp, t)
t                1936 drivers/nfc/pn533/pn533.c 	skb_queue_walk_safe(&dev->resp_q, tmp, t) {
t                 238 drivers/nfc/st-nci/ndlc.c static void ndlc_t1_timeout(struct timer_list *t)
t                 240 drivers/nfc/st-nci/ndlc.c 	struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
t                 247 drivers/nfc/st-nci/ndlc.c static void ndlc_t2_timeout(struct timer_list *t)
t                 249 drivers/nfc/st-nci/ndlc.c 	struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
t                 671 drivers/nfc/st-nci/se.c static void st_nci_se_wt_timeout(struct timer_list *t)
t                 684 drivers/nfc/st-nci/se.c 	struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
t                 702 drivers/nfc/st-nci/se.c static void st_nci_se_activation_timeout(struct timer_list *t)
t                 704 drivers/nfc/st-nci/se.c 	struct st_nci_info *info = from_timer(info, t,
t                 244 drivers/nfc/st21nfca/se.c static void st21nfca_se_wt_timeout(struct timer_list *t)
t                 257 drivers/nfc/st21nfca/se.c 	struct st21nfca_hci_info *info = from_timer(info, t,
t                 276 drivers/nfc/st21nfca/se.c static void st21nfca_se_activation_timeout(struct timer_list *t)
t                 278 drivers/nfc/st21nfca/se.c 	struct st21nfca_hci_info *info = from_timer(info, t,
t                  76 drivers/nfc/st95hf/spi.c 	struct spi_transfer t[2] = {
t                  89 drivers/nfc/st95hf/spi.c 	spi_message_add_tail(&t[0], &m);
t                  90 drivers/nfc/st95hf/spi.c 	spi_message_add_tail(&t[1], &m);
t                 133 drivers/nfc/st95hf/spi.c 	struct spi_transfer t[2] = {
t                 144 drivers/nfc/st95hf/spi.c 	spi_message_add_tail(&t[0], &m);
t                 145 drivers/nfc/st95hf/spi.c 	spi_message_add_tail(&t[1], &m);
t                 489 drivers/nfc/trf7970a.c 	struct spi_transfer t[2];
t                 497 drivers/nfc/trf7970a.c 	memset(&t, 0, sizeof(t));
t                 499 drivers/nfc/trf7970a.c 	t[0].tx_buf = &addr;
t                 500 drivers/nfc/trf7970a.c 	t[0].len = sizeof(addr);
t                 501 drivers/nfc/trf7970a.c 	spi_message_add_tail(&t[0], &m);
t                 503 drivers/nfc/trf7970a.c 	t[1].rx_buf = buf;
t                 504 drivers/nfc/trf7970a.c 	t[1].len = len;
t                 505 drivers/nfc/trf7970a.c 	spi_message_add_tail(&t[1], &m);
t                 649 drivers/nfc/trf7970a.c 	struct spi_transfer t[2];
t                 659 drivers/nfc/trf7970a.c 	memset(&t, 0, sizeof(t));
t                 661 drivers/nfc/trf7970a.c 	t[0].tx_buf = prefix;
t                 662 drivers/nfc/trf7970a.c 	t[0].len = prefix_len;
t                 663 drivers/nfc/trf7970a.c 	spi_message_add_tail(&t[0], &m);
t                 665 drivers/nfc/trf7970a.c 	t[1].tx_buf = skb->data;
t                 666 drivers/nfc/trf7970a.c 	t[1].len = len;
t                 667 drivers/nfc/trf7970a.c 	spi_message_add_tail(&t[1], &m);
t                 215 drivers/ntb/test/ntb_pingpong.c static enum hrtimer_restart pp_timer_func(struct hrtimer *t)
t                 217 drivers/ntb/test/ntb_pingpong.c 	struct pp_ctx *pp = to_pp_timer(t);
t                 166 drivers/nubus/nubus.c 	unsigned char *t = (unsigned char *)dest;
t                 170 drivers/nubus/nubus.c 		*t++ = nubus_get_rom(&p, 1, dirent->mask);
t                 179 drivers/nubus/nubus.c 	char *t = dest;
t                 187 drivers/nubus/nubus.c 		*t++ = c;
t                 191 drivers/nubus/nubus.c 		*t = '\0';
t                 192 drivers/nubus/nubus.c 	return t - dest;
t                 573 drivers/nvme/host/multipath.c static void nvme_anatt_timeout(struct timer_list *t)
t                 575 drivers/nvme/host/multipath.c 	struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
t                 629 drivers/parisc/iosapic.c 	u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL);
t                 631 drivers/parisc/iosapic.c 	for ( ; t < vi->eoi_addr; t++)
t                 632 drivers/parisc/iosapic.c 		printk(" %x", readl(t));
t                 890 drivers/parisc/lba_pci.c 	u##size t; \
t                 891 drivers/parisc/lba_pci.c 	t = READ_REG##size(astro_iop_base + addr); \
t                 892 drivers/parisc/lba_pci.c 	DBG_PORT(" 0x%x\n", t); \
t                 893 drivers/parisc/lba_pci.c 	return (t); \
t                 972 drivers/parisc/lba_pci.c 	u##size t; \
t                 974 drivers/parisc/lba_pci.c 	t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
t                 975 drivers/parisc/lba_pci.c 	DBG_PORT(" 0x%x\n", t); \
t                 976 drivers/parisc/lba_pci.c 	return (t); \
t                1995 drivers/parisc/sba_iommu.c 	char t = sba_dev->id.hw_type;
t                1998 drivers/parisc/sba_iommu.c 	WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
t                2016 drivers/parisc/sba_iommu.c 	char t = sba_dev->id.hw_type;
t                2020 drivers/parisc/sba_iommu.c 	BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
t                2059 drivers/parisc/sba_iommu.c 	char t = sba_dev->id.hw_type;
t                2063 drivers/parisc/sba_iommu.c 	BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
t                  47 drivers/parport/ieee1284.c static void timeout_waiting_on_port (struct timer_list *t)
t                  49 drivers/parport/ieee1284.c 	struct parport *port = from_timer(port, t, timer);
t                 478 drivers/parport/procfs.c 	struct parport_sysctl_table *t;
t                 481 drivers/parport/procfs.c 	t = kmemdup(&parport_sysctl_template, sizeof(*t), GFP_KERNEL);
t                 482 drivers/parport/procfs.c 	if (t == NULL)
t                 485 drivers/parport/procfs.c 	t->device_dir[0].extra1 = port;
t                 488 drivers/parport/procfs.c 		t->vars[i].extra1 = port;
t                 490 drivers/parport/procfs.c 	t->vars[0].data = &port->spintime;
t                 491 drivers/parport/procfs.c 	t->vars[5].child = t->device_dir;
t                 494 drivers/parport/procfs.c 		t->vars[6 + i].extra2 = &port->probe_info[i];
t                 496 drivers/parport/procfs.c 	t->port_dir[0].procname = port->name;
t                 498 drivers/parport/procfs.c 	t->port_dir[0].child = t->vars;
t                 499 drivers/parport/procfs.c 	t->parport_dir[0].child = t->port_dir;
t                 500 drivers/parport/procfs.c 	t->dev_dir[0].child = t->parport_dir;
t                 502 drivers/parport/procfs.c 	t->sysctl_header = register_sysctl_table(t->dev_dir);
t                 503 drivers/parport/procfs.c 	if (t->sysctl_header == NULL) {
t                 504 drivers/parport/procfs.c 		kfree(t);
t                 505 drivers/parport/procfs.c 		t = NULL;
t                 507 drivers/parport/procfs.c 	port->sysctl_table = t;
t                 514 drivers/parport/procfs.c 		struct parport_sysctl_table *t = port->sysctl_table;
t                 516 drivers/parport/procfs.c 		unregister_sysctl_table(t->sysctl_header);
t                 517 drivers/parport/procfs.c 		kfree(t);
t                 524 drivers/parport/procfs.c 	struct parport_device_sysctl_table *t;
t                 527 drivers/parport/procfs.c 	t = kmemdup(&parport_device_sysctl_template, sizeof(*t), GFP_KERNEL);
t                 528 drivers/parport/procfs.c 	if (t == NULL)
t                 531 drivers/parport/procfs.c 	t->dev_dir[0].child = t->parport_dir;
t                 532 drivers/parport/procfs.c 	t->parport_dir[0].child = t->port_dir;
t                 533 drivers/parport/procfs.c 	t->port_dir[0].procname = port->name;
t                 534 drivers/parport/procfs.c 	t->port_dir[0].child = t->devices_root_dir;
t                 535 drivers/parport/procfs.c 	t->devices_root_dir[0].child = t->device_dir;
t                 537 drivers/parport/procfs.c 	t->device_dir[0].procname = device->name;
t                 538 drivers/parport/procfs.c 	t->device_dir[0].child = t->vars;
t                 539 drivers/parport/procfs.c 	t->vars[0].data = &device->timeslice;
t                 541 drivers/parport/procfs.c 	t->sysctl_header = register_sysctl_table(t->dev_dir);
t                 542 drivers/parport/procfs.c 	if (t->sysctl_header == NULL) {
t                 543 drivers/parport/procfs.c 		kfree(t);
t                 544 drivers/parport/procfs.c 		t = NULL;
t                 546 drivers/parport/procfs.c 	device->sysctl_table = t;
t                 553 drivers/parport/procfs.c 		struct parport_device_sysctl_table *t = device->sysctl_table;
t                 555 drivers/parport/procfs.c 		unregister_sysctl_table(t->sysctl_header);
t                 556 drivers/parport/procfs.c 		kfree(t);
t                 399 drivers/pci/hotplug/cpqphp.h void cpqhp_pushbutton_thread(struct timer_list *t);
t                1719 drivers/pci/hotplug/cpqphp_ctrl.c static void pushbutton_helper_thread(struct timer_list *t)
t                1721 drivers/pci/hotplug/cpqphp_ctrl.c 	pushbutton_pending = t;
t                1885 drivers/pci/hotplug/cpqphp_ctrl.c void cpqhp_pushbutton_thread(struct timer_list *t)
t                1890 drivers/pci/hotplug/cpqphp_ctrl.c 	struct slot *p_slot = from_timer(p_slot, t, task_event);
t                 218 drivers/pci/hotplug/shpchp_hpc.c static void int_poll_timeout(struct timer_list *t)
t                 220 drivers/pci/hotplug/shpchp_hpc.c 	struct controller *ctrl = from_timer(ctrl, t, poll_timer);
t                 770 drivers/pci/pci.c 					       pci_power_t t)
t                 772 drivers/pci/pci.c 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
t                 472 drivers/pci/pcie/aer.c #define AER_AGENT_REQUESTER_MASK(t)	((t == AER_CORRECTABLE) ?	\
t                 474 drivers/pci/pcie/aer.c #define AER_AGENT_COMPLETER_MASK(t)	((t == AER_CORRECTABLE) ?	\
t                 476 drivers/pci/pcie/aer.c #define AER_AGENT_TRANSMITTER_MASK(t)	((t == AER_CORRECTABLE) ?	\
t                 479 drivers/pci/pcie/aer.c #define AER_GET_AGENT(t, e)						\
t                 480 drivers/pci/pcie/aer.c 	((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER :	\
t                 481 drivers/pci/pcie/aer.c 	(e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER :	\
t                 482 drivers/pci/pcie/aer.c 	(e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER :	\
t                 489 drivers/pci/pcie/aer.c #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\
t                 491 drivers/pci/pcie/aer.c #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\
t                 497 drivers/pci/pcie/aer.c #define AER_GET_LAYER_ERROR(t, e)					\
t                 498 drivers/pci/pcie/aer.c 	((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
t                 499 drivers/pci/pcie/aer.c 	(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
t                 711 drivers/pci/pcie/aer.c 			       struct aer_header_log_regs *t)
t                 714 drivers/pci/pcie/aer.c 		t->dw0, t->dw1, t->dw2, t->dw3);
t                 570 drivers/pci/xen-pcifront.c 	struct pci_bus_entry *bus_entry, *t;
t                 575 drivers/pci/xen-pcifront.c 	list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
t                 266 drivers/pcmcia/bcm63xx_pcmcia.c static void bcm63xx_pcmcia_poll(struct timer_list *t)
t                 271 drivers/pcmcia/bcm63xx_pcmcia.c 	skt = from_timer(skt, t, timer);
t                  69 drivers/pcmcia/electra_cf.c static void electra_cf_timer(struct timer_list *t)
t                  71 drivers/pcmcia/electra_cf.c 	struct electra_cf_socket *cf = from_timer(cf, t, timer);
t                 323 drivers/pcmcia/i82365.c     struct i82365_socket *t = &socket[s];
t                 349 drivers/pcmcia/i82365.c     if (!(t->flags & IS_VIA)) {
t                 427 drivers/pcmcia/i82365.c     struct i82365_socket *t = &socket[s];
t                 428 drivers/pcmcia/i82365.c     if (t->flags & IS_CIRRUS)
t                 430 drivers/pcmcia/i82365.c     else if (t->flags & IS_VADEM)
t                 436 drivers/pcmcia/i82365.c     struct i82365_socket *t = &socket[s];
t                 437 drivers/pcmcia/i82365.c     if (t->flags & IS_CIRRUS)
t                 443 drivers/pcmcia/i82365.c     i365_bflip(s, I365_INTCTL, I365_INTR_ENA, t->intr);
t                 444 drivers/pcmcia/i82365.c     if (t->flags & IS_VADEM)
t                 677 drivers/pcmcia/i82365.c     struct i82365_socket *t = &socket[sockets-ns];
t                 683 drivers/pcmcia/i82365.c 	       t->ioaddr, t->psock*0x40);
t                 728 drivers/pcmcia/i82365.c 	t[i].socket.features |= SS_CAP_PCCARD;
t                 729 drivers/pcmcia/i82365.c 	t[i].socket.map_size = 0x1000;
t                 730 drivers/pcmcia/i82365.c 	t[i].socket.irq_mask = mask;
t                 731 drivers/pcmcia/i82365.c 	t[i].cs_irq = isa_irq;
t                 924 drivers/pcmcia/i82365.c     struct i82365_socket *t = &socket[sock];
t                 935 drivers/pcmcia/i82365.c     reg = t->intr;
t                 945 drivers/pcmcia/i82365.c     if (t->flags & IS_CIRRUS) {
t                 961 drivers/pcmcia/i82365.c     } else if (t->flags & IS_VG_PWR) {
t                 977 drivers/pcmcia/i82365.c     } else if (t->flags & IS_DF_PWR) {
t                1008 drivers/pcmcia/i82365.c     if (t->flags & IS_CIRRUS) {
t                1015 drivers/pcmcia/i82365.c     reg = t->cs_irq << 4;
t                  79 drivers/pcmcia/omap_cf.c static void omap_cf_timer(struct timer_list *t)
t                  81 drivers/pcmcia/omap_cf.c 	struct omap_cf_socket	*cf = from_timer(cf, t, timer);
t                 237 drivers/pcmcia/pd6729.c static void pd6729_interrupt_wrapper(struct timer_list *t)
t                 239 drivers/pcmcia/pd6729.c 	struct pd6729_socket *socket = from_timer(socket, t, poll_timer);
t                 251 drivers/pcmcia/pd6729.c 	struct pd6729_socket *t;
t                 285 drivers/pcmcia/pd6729.c 	t = (socket->number) ? socket : socket + 1;
t                 286 drivers/pcmcia/pd6729.c 	indirect_write(t, PD67_EXT_INDEX, PD67_EXTERN_DATA);
t                 287 drivers/pcmcia/pd6729.c 	data = indirect_read16(t, PD67_EXT_DATA);
t                 108 drivers/pcmcia/sa11xx_base.h   unsigned int t = ((pcmcia_cycle_ns * cpu_clock_khz) / 6) - 1000000;
t                 109 drivers/pcmcia/sa11xx_base.h   return (t / 1000000) + (((t % 1000000) == 0) ? 0 : 1);
t                 464 drivers/pcmcia/soc_common.c static void soc_common_pcmcia_poll_event(struct timer_list *t)
t                 466 drivers/pcmcia/soc_common.c 	struct soc_pcmcia_socket *skt = from_timer(skt, t, poll_timer);
t                 539 drivers/pcmcia/yenta_socket.c static void yenta_interrupt_wrapper(struct timer_list *t)
t                 541 drivers/pcmcia/yenta_socket.c 	struct yenta_socket *socket = from_timer(socket, t, poll_timer);
t                1285 drivers/phy/qualcomm/phy-qcom-qmp.c 	const struct qmp_phy_init_tbl *t = tbl;
t                1287 drivers/phy/qualcomm/phy-qcom-qmp.c 	if (!t)
t                1290 drivers/phy/qualcomm/phy-qcom-qmp.c 	for (i = 0; i < num; i++, t++) {
t                1291 drivers/phy/qualcomm/phy-qcom-qmp.c 		if (t->in_layout)
t                1292 drivers/phy/qualcomm/phy-qcom-qmp.c 			writel(t->val, base + regs[t->offset]);
t                1294 drivers/phy/qualcomm/phy-qcom-qmp.c 			writel(t->val, base + t->offset);
t                  94 drivers/phy/ralink/phy-ralink-usb.c 	u32 t;
t                 119 drivers/phy/ralink/phy-ralink-usb.c 	regmap_read(phy->sysctl, RT_SYSC_REG_USB_PHY_CFG, &t);
t                 121 drivers/phy/ralink/phy-ralink-usb.c 		(t & UDEV_WAKEUP) ? ("enabled") : ("disabled"));
t                 122 drivers/phy/ralink/phy-ralink-usb.c 	if (t & USB_PHY_UTMI_8B60M)
t                 640 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	u32 t, t2;
t                 643 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, &t);
t                 661 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 		if (pullup ^ !t)
t                 665 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &t);
t                 673 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	*val = (t | t2 << 1) & 0x7;
t                 671 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 			u32 t = irq_get_trigger_type(virq);
t                 673 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 			if ((t & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
t                 211 drivers/pinctrl/pinctrl-lpc18xx.c #define LPC_P(port, pin, f0, f1, f2, f3, f4, f5, f6, f7, a, t)	\
t                 220 drivers/pinctrl/pinctrl-lpc18xx.c 	.type = TYPE_##t,					\
t                 223 drivers/pinctrl/pinctrl-lpc18xx.c #define LPC_N(pname, off, f0, f1, f2, f3, f4, f5, f6, f7, a, t)	\
t                 232 drivers/pinctrl/pinctrl-lpc18xx.c 	.type = TYPE_##t,					\
t                 319 drivers/pinctrl/pinctrl-mcp23s08.c 	struct spi_transfer t[2] = { { .tx_buf = &mcp->addr, .len = 1, },
t                 323 drivers/pinctrl/pinctrl-mcp23s08.c 	spi_message_add_tail(&t[0], &m);
t                 324 drivers/pinctrl/pinctrl-mcp23s08.c 	spi_message_add_tail(&t[1], &m);
t                 336 drivers/pinctrl/pinctrl-mcp23s08.c 	struct spi_transfer t[3] = { { .tx_buf = &mcp->addr, .len = 1, },
t                 341 drivers/pinctrl/pinctrl-mcp23s08.c 	spi_message_add_tail(&t[0], &m);
t                 342 drivers/pinctrl/pinctrl-mcp23s08.c 	spi_message_add_tail(&t[1], &m);
t                 343 drivers/pinctrl/pinctrl-mcp23s08.c 	spi_message_add_tail(&t[2], &m);
t                 199 drivers/pinctrl/sirf/pinctrl-atlas7.c #define PADCONF(pad, t, mr, pr, dsr, adr, mb, pb, dsb, adb)	\
t                 202 drivers/pinctrl/sirf/pinctrl-atlas7.c 		.type = t,					\
t                 396 drivers/platform/mellanox/mlxbf-tmfifo.c static void mlxbf_tmfifo_timer(struct timer_list *t)
t                 398 drivers/platform/mellanox/mlxbf-tmfifo.c 	struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
t                 348 drivers/platform/x86/acerhdf.c static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t)
t                 361 drivers/platform/x86/acerhdf.c 	*t = temp;
t                 179 drivers/platform/x86/dell-smbios-base.c 	u16 t = 0;
t                 222 drivers/platform/x86/dell-smbios-base.c 			t = da_tokens[i].tokenID;
t                 227 drivers/platform/x86/dell-smbios-base.c 		if (!t) {
t                 237 drivers/platform/x86/dell-smbios-base.c 			if (t >= token_blacklist[i].min &&
t                 238 drivers/platform/x86/dell-smbios-base.c 			    t <= token_blacklist[i].max)
t                 246 drivers/platform/x86/dell-smbios-base.c 			if (t < token_whitelist[i].min ||
t                 247 drivers/platform/x86/dell-smbios-base.c 			    t > token_whitelist[i].max)
t                 251 drivers/platform/x86/dell-smbios-base.c 				dev_dbg(d, "whitelisted token: %x\n", t);
t                 927 drivers/platform/x86/intel_ips.c static void monitor_timeout(struct timer_list *t)
t                 929 drivers/platform/x86/intel_ips.c 	struct ips_driver *ips = from_timer(ips, t, timer);
t                1369 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                1374 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 1, &t))
t                1377 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_disclose_usertask(attr->attr.name, "set to %ld\n", t);
t                1380 drivers/platform/x86/thinkpad_acpi.c 	if (tpacpi_rfk_check_hwblock_state() && !!t)
t                1383 drivers/platform/x86/thinkpad_acpi.c 	res = tpacpi_rfkill_switches[id]->ops->set_status((!!t) ?
t                1467 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                1469 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 0xffff, &t))
t                1472 drivers/platform/x86/thinkpad_acpi.c 	dbg_level = t;
t                1499 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                1501 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 1, &t))
t                1504 drivers/platform/x86/thinkpad_acpi.c 	if (tpacpi_wlsw_emulstate != !!t) {
t                1505 drivers/platform/x86/thinkpad_acpi.c 		tpacpi_wlsw_emulstate = !!t;
t                1506 drivers/platform/x86/thinkpad_acpi.c 		tpacpi_rfk_update_hwblock_state(!t);	/* negative logic */
t                1522 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                1524 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 1, &t))
t                1527 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_bluetooth_emulstate = !!t;
t                1542 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                1544 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 1, &t))
t                1547 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_wwan_emulstate = !!t;
t                1562 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                1564 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 1, &t))
t                1567 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_uwb_emulstate = !!t;
t                2593 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                2605 drivers/platform/x86/thinkpad_acpi.c 	t = 0;
t                2618 drivers/platform/x86/thinkpad_acpi.c 		if (t == 0) {
t                2620 drivers/platform/x86/thinkpad_acpi.c 				t = 1000/poll_freq;
t                2622 drivers/platform/x86/thinkpad_acpi.c 				t = 100;	/* should never happen... */
t                2624 drivers/platform/x86/thinkpad_acpi.c 		t = msleep_interruptible(t);
t                2628 drivers/platform/x86/thinkpad_acpi.c 		if (t > 0 && !was_frozen)
t                2635 drivers/platform/x86/thinkpad_acpi.c 			t = 0;
t                2769 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                2774 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 1, &t))
t                2777 drivers/platform/x86/thinkpad_acpi.c 	if (t == 0)
t                2797 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                2800 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 0xffffffffUL, &t))
t                2806 drivers/platform/x86/thinkpad_acpi.c 	res = hotkey_user_mask_set(t);
t                2814 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_disclose_usertask("hotkey_mask", "set to 0x%08lx\n", t);
t                2891 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                2895 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 0xffffffffUL, &t) ||
t                2896 drivers/platform/x86/thinkpad_acpi.c 		((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
t                2903 drivers/platform/x86/thinkpad_acpi.c 	hotkey_source_mask = t;
t                2923 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
t                2942 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                2944 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 25, &t))
t                2950 drivers/platform/x86/thinkpad_acpi.c 	hotkey_poll_set_freq(t);
t                2955 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_disclose_usertask("hotkey_poll_freq", "set to %lu\n", t);
t                3075 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                3078 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, LAYFLAT_MODE, &t))
t                3081 drivers/platform/x86/thinkpad_acpi.c 	res = adaptive_keyboard_set_mode(t);
t                6289 drivers/platform/x86/thinkpad_acpi.c 	int t;
t                6293 drivers/platform/x86/thinkpad_acpi.c 	t = TP_EC_THERMAL_TMP0;
t                6299 drivers/platform/x86/thinkpad_acpi.c 			t = TP_EC_THERMAL_TMP8;
t                6306 drivers/platform/x86/thinkpad_acpi.c 			if (!acpi_ec_read(t + idx, &tmp))
t                6318 drivers/platform/x86/thinkpad_acpi.c 			if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
t                6320 drivers/platform/x86/thinkpad_acpi.c 			*value = (t - 2732) * 100;
t                6328 drivers/platform/x86/thinkpad_acpi.c 			if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
t                6330 drivers/platform/x86/thinkpad_acpi.c 			if (t > 127 || t < -127)
t                6331 drivers/platform/x86/thinkpad_acpi.c 				t = TP_EC_THERMAL_TMP_NA;
t                6332 drivers/platform/x86/thinkpad_acpi.c 			*value = t * 1000;
t                6371 drivers/platform/x86/thinkpad_acpi.c 	struct ibm_thermal_sensors_struct t;
t                6373 drivers/platform/x86/thinkpad_acpi.c 	n = thermal_get_sensors(&t);
t                6380 drivers/platform/x86/thinkpad_acpi.c 		if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
t                6381 drivers/platform/x86/thinkpad_acpi.c 			pr_cont(" %d", (int)(t.temp[i] / 1000));
t                6471 drivers/platform/x86/thinkpad_acpi.c 	u8 t, ta1, ta2;
t                6490 drivers/platform/x86/thinkpad_acpi.c 			if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
t                6491 drivers/platform/x86/thinkpad_acpi.c 				ta1 |= t;
t                6496 drivers/platform/x86/thinkpad_acpi.c 			if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
t                6497 drivers/platform/x86/thinkpad_acpi.c 				ta2 |= t;
t                6580 drivers/platform/x86/thinkpad_acpi.c 	struct ibm_thermal_sensors_struct t;
t                6582 drivers/platform/x86/thinkpad_acpi.c 	n = thermal_get_sensors(&t);
t                6590 drivers/platform/x86/thinkpad_acpi.c 			seq_printf(m, "%d ", t.temp[i] / 1000);
t                6591 drivers/platform/x86/thinkpad_acpi.c 		seq_printf(m, "%d\n", t.temp[i] / 1000);
t                8593 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                8596 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 2, &t))
t                8600 drivers/platform/x86/thinkpad_acpi.c 			"set fan mode to %lu\n", t);
t                8602 drivers/platform/x86/thinkpad_acpi.c 	switch (t) {
t                8736 drivers/platform/x86/thinkpad_acpi.c 	unsigned long t;
t                8738 drivers/platform/x86/thinkpad_acpi.c 	if (parse_strtoul(buf, 120, &t))
t                8744 drivers/platform/x86/thinkpad_acpi.c 	fan_watchdog_maxinterval = t;
t                8747 drivers/platform/x86/thinkpad_acpi.c 	tpacpi_disclose_usertask("fan_watchdog", "set to %lu\n", t);
t                9205 drivers/platform/x86/thinkpad_acpi.c static int mute_led_on_off(struct tp_led_table *t, bool state)
t                9210 drivers/platform/x86/thinkpad_acpi.c 	if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
t                9211 drivers/platform/x86/thinkpad_acpi.c 		pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
t                9215 drivers/platform/x86/thinkpad_acpi.c 	if (!acpi_evalf(hkey_handle, &output, t->name, "dd",
t                9216 drivers/platform/x86/thinkpad_acpi.c 			state ? t->on_value : t->off_value))
t                9219 drivers/platform/x86/thinkpad_acpi.c 	t->state = state;
t                9225 drivers/platform/x86/thinkpad_acpi.c 	struct tp_led_table *t;
t                9227 drivers/platform/x86/thinkpad_acpi.c 	t = &led_tables[whichled];
t                9228 drivers/platform/x86/thinkpad_acpi.c 	if (t->state < 0 || t->state == on)
t                9229 drivers/platform/x86/thinkpad_acpi.c 		return t->state;
t                9230 drivers/platform/x86/thinkpad_acpi.c 	return mute_led_on_off(t, on);
t                9266 drivers/platform/x86/thinkpad_acpi.c 		struct tp_led_table *t = &led_tables[i];
t                9267 drivers/platform/x86/thinkpad_acpi.c 		if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
t                9268 drivers/platform/x86/thinkpad_acpi.c 			t->state = -ENODEV;
t                9302 drivers/platform/x86/thinkpad_acpi.c 		struct tp_led_table *t = &led_tables[i];
t                9303 drivers/platform/x86/thinkpad_acpi.c 		if (t->state >= 0)
t                9304 drivers/platform/x86/thinkpad_acpi.c 			mute_led_on_off(t, t->state);
t                10084 drivers/platform/x86/thinkpad_acpi.c 	char t;
t                10104 drivers/platform/x86/thinkpad_acpi.c 	t = tpacpi_parse_fw_id(tp->bios_version_str,
t                10106 drivers/platform/x86/thinkpad_acpi.c 	if (t != 'E' && t != 'C')
t                10135 drivers/platform/x86/thinkpad_acpi.c 		t = tpacpi_parse_fw_id(ec_fw_string,
t                10137 drivers/platform/x86/thinkpad_acpi.c 		if (t != 'H') {
t                1920 drivers/power/supply/ab8500_charger.c 	int t = 10;
t                1952 drivers/power/supply/ab8500_charger.c 			t = 1;
t                1954 drivers/power/supply/ab8500_charger.c 	queue_delayed_work(di->charger_wq, &di->check_vbat_work, t * HZ);
t                  94 drivers/pps/clients/pps-gpio.c static void pps_gpio_echo_timer_callback(struct timer_list *t)
t                  98 drivers/pps/clients/pps-gpio.c 	info = from_timer(info, t, echo_timer);
t                  55 drivers/ptp/ptp_clock.c 	dst->t.sec = seconds;
t                  56 drivers/ptp/ptp_clock.c 	dst->t.nsec = remainder;
t                  89 drivers/ptp/ptp_sysfs.c 		       event.index, event.t.sec, event.t.nsec);
t                  58 drivers/pwm/pwm-vt8500.c #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
t                 148 drivers/regulator/tps6524x-regulator.c 	struct spi_transfer t[3];
t                 151 drivers/regulator/tps6524x-regulator.c 	memset(t, 0, sizeof(t));
t                 153 drivers/regulator/tps6524x-regulator.c 	t[0].tx_buf = &cmd;
t                 154 drivers/regulator/tps6524x-regulator.c 	t[0].len = 2;
t                 155 drivers/regulator/tps6524x-regulator.c 	t[0].bits_per_word = 12;
t                 156 drivers/regulator/tps6524x-regulator.c 	spi_message_add_tail(&t[0], &m);
t                 158 drivers/regulator/tps6524x-regulator.c 	t[1].rx_buf = &in;
t                 159 drivers/regulator/tps6524x-regulator.c 	t[1].len = 2;
t                 160 drivers/regulator/tps6524x-regulator.c 	t[1].bits_per_word = 16;
t                 161 drivers/regulator/tps6524x-regulator.c 	spi_message_add_tail(&t[1], &m);
t                 163 drivers/regulator/tps6524x-regulator.c 	t[2].rx_buf = &status;
t                 164 drivers/regulator/tps6524x-regulator.c 	t[2].len = 1;
t                 165 drivers/regulator/tps6524x-regulator.c 	t[2].bits_per_word = 4;
t                 166 drivers/regulator/tps6524x-regulator.c 	spi_message_add_tail(&t[2], &m);
t                 201 drivers/regulator/tps6524x-regulator.c 	struct spi_transfer t[3];
t                 204 drivers/regulator/tps6524x-regulator.c 	memset(t, 0, sizeof(t));
t                 206 drivers/regulator/tps6524x-regulator.c 	t[0].tx_buf = &cmd;
t                 207 drivers/regulator/tps6524x-regulator.c 	t[0].len = 2;
t                 208 drivers/regulator/tps6524x-regulator.c 	t[0].bits_per_word = 12;
t                 209 drivers/regulator/tps6524x-regulator.c 	spi_message_add_tail(&t[0], &m);
t                 211 drivers/regulator/tps6524x-regulator.c 	t[1].tx_buf = &out;
t                 212 drivers/regulator/tps6524x-regulator.c 	t[1].len = 2;
t                 213 drivers/regulator/tps6524x-regulator.c 	t[1].bits_per_word = 16;
t                 214 drivers/regulator/tps6524x-regulator.c 	spi_message_add_tail(&t[1], &m);
t                 216 drivers/regulator/tps6524x-regulator.c 	t[2].rx_buf = &status;
t                 217 drivers/regulator/tps6524x-regulator.c 	t[2].len = 1;
t                 218 drivers/regulator/tps6524x-regulator.c 	t[2].bits_per_word = 4;
t                 219 drivers/regulator/tps6524x-regulator.c 	spi_message_add_tail(&t[2], &m);
t                 195 drivers/remoteproc/remoteproc_debugfs.c 	struct fw_rsc_trace *t;
t                 231 drivers/remoteproc/remoteproc_debugfs.c 			t = rsc;
t                 233 drivers/remoteproc/remoteproc_debugfs.c 			seq_printf(seq, "  Device Address 0x%x\n", t->da);
t                 234 drivers/remoteproc/remoteproc_debugfs.c 			seq_printf(seq, "  Length 0x%x Bytes\n", t->len);
t                 235 drivers/remoteproc/remoteproc_debugfs.c 			seq_printf(seq, "  Reserved (should be zero) [%d]\n", t->reserved);
t                 236 drivers/remoteproc/remoteproc_debugfs.c 			seq_printf(seq, "  Name %s\n\n", t->name);
t                  72 drivers/rtc/dev.c static void rtc_uie_timer(struct timer_list *t)
t                  74 drivers/rtc/dev.c 	struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
t                 266 drivers/rtc/rtc-abx80x.c static int abx80x_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 285 drivers/rtc/rtc-abx80x.c 	t->time.tm_sec = bcd2bin(buf[0] & 0x7F);
t                 286 drivers/rtc/rtc-abx80x.c 	t->time.tm_min = bcd2bin(buf[1] & 0x7F);
t                 287 drivers/rtc/rtc-abx80x.c 	t->time.tm_hour = bcd2bin(buf[2] & 0x3F);
t                 288 drivers/rtc/rtc-abx80x.c 	t->time.tm_mday = bcd2bin(buf[3] & 0x3F);
t                 289 drivers/rtc/rtc-abx80x.c 	t->time.tm_mon = bcd2bin(buf[4] & 0x1F) - 1;
t                 290 drivers/rtc/rtc-abx80x.c 	t->time.tm_wday = buf[5] & 0x7;
t                 292 drivers/rtc/rtc-abx80x.c 	t->enabled = !!(irq_mask & ABX8XX_IRQ_AIE);
t                 293 drivers/rtc/rtc-abx80x.c 	t->pending = (buf[6] & ABX8XX_STATUS_AF) && t->enabled;
t                 298 drivers/rtc/rtc-abx80x.c static int abx80x_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 308 drivers/rtc/rtc-abx80x.c 	alarm[1] = bin2bcd(t->time.tm_sec);
t                 309 drivers/rtc/rtc-abx80x.c 	alarm[2] = bin2bcd(t->time.tm_min);
t                 310 drivers/rtc/rtc-abx80x.c 	alarm[3] = bin2bcd(t->time.tm_hour);
t                 311 drivers/rtc/rtc-abx80x.c 	alarm[4] = bin2bcd(t->time.tm_mday);
t                 312 drivers/rtc/rtc-abx80x.c 	alarm[5] = bin2bcd(t->time.tm_mon + 1);
t                 321 drivers/rtc/rtc-abx80x.c 	if (t->enabled) {
t                  33 drivers/rtc/rtc-au1xxx.c 	unsigned long t;
t                  35 drivers/rtc/rtc-au1xxx.c 	t = alchemy_rdsys(AU1000_SYS_TOYREAD);
t                  37 drivers/rtc/rtc-au1xxx.c 	rtc_time_to_tm(t, tm);
t                  44 drivers/rtc/rtc-au1xxx.c 	unsigned long t;
t                  46 drivers/rtc/rtc-au1xxx.c 	rtc_tm_to_time(tm, &t);
t                  48 drivers/rtc/rtc-au1xxx.c 	alchemy_wrsys(t, AU1000_SYS_TOYWRITE);
t                  67 drivers/rtc/rtc-au1xxx.c 	unsigned long t;
t                  70 drivers/rtc/rtc-au1xxx.c 	t = alchemy_rdsys(AU1000_SYS_CNTRCTRL);
t                  71 drivers/rtc/rtc-au1xxx.c 	if (!(t & CNTR_OK)) {
t                  82 drivers/rtc/rtc-au1xxx.c 		t = 0x00100000;
t                  83 drivers/rtc/rtc-au1xxx.c 		while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T0S) && --t)
t                  86 drivers/rtc/rtc-au1xxx.c 		if (!t) {
t                 164 drivers/rtc/rtc-bd70528.c static inline void tmday2rtc(struct rtc_time *t, struct bd70528_rtc_day *d)
t                 169 drivers/rtc/rtc-bd70528.c 	d->sec |= bin2bcd(t->tm_sec);
t                 170 drivers/rtc/rtc-bd70528.c 	d->min |= bin2bcd(t->tm_min);
t                 171 drivers/rtc/rtc-bd70528.c 	d->hour |= bin2bcd(t->tm_hour);
t                 174 drivers/rtc/rtc-bd70528.c static inline void tm2rtc(struct rtc_time *t, struct bd70528_rtc_data *r)
t                 185 drivers/rtc/rtc-bd70528.c 	tmday2rtc(t, &r->time);
t                 190 drivers/rtc/rtc-bd70528.c 	r->day |= bin2bcd(t->tm_mday);
t                 191 drivers/rtc/rtc-bd70528.c 	r->week |= bin2bcd(t->tm_wday);
t                 192 drivers/rtc/rtc-bd70528.c 	r->month |= bin2bcd(t->tm_mon + 1);
t                 193 drivers/rtc/rtc-bd70528.c 	r->year = bin2bcd(t->tm_year - 100);
t                 196 drivers/rtc/rtc-bd70528.c static inline void rtc2tm(struct bd70528_rtc_data *r, struct rtc_time *t)
t                 198 drivers/rtc/rtc-bd70528.c 	t->tm_sec = bcd2bin(r->time.sec & BD70528_MASK_RTC_SEC);
t                 199 drivers/rtc/rtc-bd70528.c 	t->tm_min = bcd2bin(r->time.min & BD70528_MASK_RTC_MINUTE);
t                 200 drivers/rtc/rtc-bd70528.c 	t->tm_hour = bcd2bin(r->time.hour & BD70528_MASK_RTC_HOUR);
t                 206 drivers/rtc/rtc-bd70528.c 		t->tm_hour %= 12;
t                 208 drivers/rtc/rtc-bd70528.c 			t->tm_hour += 12;
t                 210 drivers/rtc/rtc-bd70528.c 	t->tm_mday = bcd2bin(r->day & BD70528_MASK_RTC_DAY);
t                 211 drivers/rtc/rtc-bd70528.c 	t->tm_mon = bcd2bin(r->month & BD70528_MASK_RTC_MONTH) - 1;
t                 212 drivers/rtc/rtc-bd70528.c 	t->tm_year = 100 + bcd2bin(r->year & BD70528_MASK_RTC_YEAR);
t                 213 drivers/rtc/rtc-bd70528.c 	t->tm_wday = bcd2bin(r->week & BD70528_MASK_RTC_WEEK);
t                 288 drivers/rtc/rtc-bd70528.c static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
t                 306 drivers/rtc/rtc-bd70528.c 	tm2rtc(t, &rtc_data);
t                 324 drivers/rtc/rtc-bd70528.c static int bd70528_set_time(struct device *dev, struct rtc_time *t)
t                 330 drivers/rtc/rtc-bd70528.c 	ret = bd70528_set_time_locked(dev, t);
t                 335 drivers/rtc/rtc-bd70528.c static int bd70528_get_time(struct device *dev, struct rtc_time *t)
t                 351 drivers/rtc/rtc-bd70528.c 	rtc2tm(&rtc_data, t);
t                 432 drivers/rtc/rtc-bd70528.c 		struct rtc_time t;
t                 434 drivers/rtc/rtc-bd70528.c 		ret = bd70528_get_time(&pdev->dev, &t);
t                 437 drivers/rtc/rtc-bd70528.c 			ret = bd70528_set_time(&pdev->dev, &t);
t                  76 drivers/rtc/rtc-brcmstb-waketimer.c 		       struct wktmr_time *t)
t                  81 drivers/rtc/rtc-brcmstb-waketimer.c 		t->sec = readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER);
t                  85 drivers/rtc/rtc-brcmstb-waketimer.c 	t->pre = timer->rate - tmp;
t                 223 drivers/rtc/rtc-cmos.c static int cmos_read_time(struct device *dev, struct rtc_time *t)
t                 236 drivers/rtc/rtc-cmos.c 	mc146818_get_time(t);
t                 240 drivers/rtc/rtc-cmos.c static int cmos_set_time(struct device *dev, struct rtc_time *t)
t                 248 drivers/rtc/rtc-cmos.c 	return mc146818_set_time(t);
t                 251 drivers/rtc/rtc-cmos.c static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 266 drivers/rtc/rtc-cmos.c 	t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
t                 267 drivers/rtc/rtc-cmos.c 	t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM);
t                 268 drivers/rtc/rtc-cmos.c 	t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM);
t                 272 drivers/rtc/rtc-cmos.c 		t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f;
t                 273 drivers/rtc/rtc-cmos.c 		if (!t->time.tm_mday)
t                 274 drivers/rtc/rtc-cmos.c 			t->time.tm_mday = -1;
t                 277 drivers/rtc/rtc-cmos.c 			t->time.tm_mon = CMOS_READ(cmos->mon_alrm);
t                 278 drivers/rtc/rtc-cmos.c 			if (!t->time.tm_mon)
t                 279 drivers/rtc/rtc-cmos.c 				t->time.tm_mon = -1;
t                 287 drivers/rtc/rtc-cmos.c 		if (((unsigned)t->time.tm_sec) < 0x60)
t                 288 drivers/rtc/rtc-cmos.c 			t->time.tm_sec = bcd2bin(t->time.tm_sec);
t                 290 drivers/rtc/rtc-cmos.c 			t->time.tm_sec = -1;
t                 291 drivers/rtc/rtc-cmos.c 		if (((unsigned)t->time.tm_min) < 0x60)
t                 292 drivers/rtc/rtc-cmos.c 			t->time.tm_min = bcd2bin(t->time.tm_min);
t                 294 drivers/rtc/rtc-cmos.c 			t->time.tm_min = -1;
t                 295 drivers/rtc/rtc-cmos.c 		if (((unsigned)t->time.tm_hour) < 0x24)
t                 296 drivers/rtc/rtc-cmos.c 			t->time.tm_hour = bcd2bin(t->time.tm_hour);
t                 298 drivers/rtc/rtc-cmos.c 			t->time.tm_hour = -1;
t                 301 drivers/rtc/rtc-cmos.c 			if (((unsigned)t->time.tm_mday) <= 0x31)
t                 302 drivers/rtc/rtc-cmos.c 				t->time.tm_mday = bcd2bin(t->time.tm_mday);
t                 304 drivers/rtc/rtc-cmos.c 				t->time.tm_mday = -1;
t                 307 drivers/rtc/rtc-cmos.c 				if (((unsigned)t->time.tm_mon) <= 0x12)
t                 308 drivers/rtc/rtc-cmos.c 					t->time.tm_mon = bcd2bin(t->time.tm_mon)-1;
t                 310 drivers/rtc/rtc-cmos.c 					t->time.tm_mon = -1;
t                 315 drivers/rtc/rtc-cmos.c 	t->enabled = !!(rtc_control & RTC_AIE);
t                 316 drivers/rtc/rtc-cmos.c 	t->pending = 0;
t                 379 drivers/rtc/rtc-cmos.c static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 392 drivers/rtc/rtc-cmos.c 		t_alrm = rtc_tm_to_time64(&t->time);
t                 416 drivers/rtc/rtc-cmos.c 		t_alrm = rtc_tm_to_time64(&t->time);
t                 435 drivers/rtc/rtc-cmos.c 		t_alrm = rtc_tm_to_time64(&t->time);
t                 446 drivers/rtc/rtc-cmos.c static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 456 drivers/rtc/rtc-cmos.c 	ret = cmos_validate_alarm(dev, t);
t                 460 drivers/rtc/rtc-cmos.c 	mon = t->time.tm_mon + 1;
t                 461 drivers/rtc/rtc-cmos.c 	mday = t->time.tm_mday;
t                 462 drivers/rtc/rtc-cmos.c 	hrs = t->time.tm_hour;
t                 463 drivers/rtc/rtc-cmos.c 	min = t->time.tm_min;
t                 464 drivers/rtc/rtc-cmos.c 	sec = t->time.tm_sec;
t                 498 drivers/rtc/rtc-cmos.c 		hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
t                 499 drivers/rtc/rtc-cmos.c 				    t->time.tm_sec);
t                 502 drivers/rtc/rtc-cmos.c 	if (t->enabled)
t                 507 drivers/rtc/rtc-cmos.c 	cmos->alarm_expires = rtc_tm_to_time64(&t->time);
t                 187 drivers/rtc/rtc-ds1307.c static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t                 264 drivers/rtc/rtc-ds1307.c 	t->tm_sec = bcd2bin(regs[DS1307_REG_SECS] & 0x7f);
t                 265 drivers/rtc/rtc-ds1307.c 	t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
t                 267 drivers/rtc/rtc-ds1307.c 	t->tm_hour = bcd2bin(tmp);
t                 268 drivers/rtc/rtc-ds1307.c 	t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
t                 269 drivers/rtc/rtc-ds1307.c 	t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
t                 271 drivers/rtc/rtc-ds1307.c 	t->tm_mon = bcd2bin(tmp) - 1;
t                 272 drivers/rtc/rtc-ds1307.c 	t->tm_year = bcd2bin(regs[DS1307_REG_YEAR]) + 100;
t                 276 drivers/rtc/rtc-ds1307.c 		t->tm_year += 100;
t                 280 drivers/rtc/rtc-ds1307.c 		"read", t->tm_sec, t->tm_min,
t                 281 drivers/rtc/rtc-ds1307.c 		t->tm_hour, t->tm_mday,
t                 282 drivers/rtc/rtc-ds1307.c 		t->tm_mon, t->tm_year, t->tm_wday);
t                 287 drivers/rtc/rtc-ds1307.c static int ds1307_set_time(struct device *dev, struct rtc_time *t)
t                 297 drivers/rtc/rtc-ds1307.c 		"write", t->tm_sec, t->tm_min,
t                 298 drivers/rtc/rtc-ds1307.c 		t->tm_hour, t->tm_mday,
t                 299 drivers/rtc/rtc-ds1307.c 		t->tm_mon, t->tm_year, t->tm_wday);
t                 301 drivers/rtc/rtc-ds1307.c 	if (t->tm_year < 100)
t                 305 drivers/rtc/rtc-ds1307.c 	if (t->tm_year > (chip->century_bit ? 299 : 199))
t                 308 drivers/rtc/rtc-ds1307.c 	if (t->tm_year > 199)
t                 312 drivers/rtc/rtc-ds1307.c 	regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
t                 313 drivers/rtc/rtc-ds1307.c 	regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
t                 314 drivers/rtc/rtc-ds1307.c 	regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
t                 315 drivers/rtc/rtc-ds1307.c 	regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
t                 316 drivers/rtc/rtc-ds1307.c 	regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
t                 317 drivers/rtc/rtc-ds1307.c 	regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
t                 320 drivers/rtc/rtc-ds1307.c 	tmp = t->tm_year - 100;
t                 325 drivers/rtc/rtc-ds1307.c 	if (t->tm_year > 199 && chip->century_bit)
t                 373 drivers/rtc/rtc-ds1307.c static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 397 drivers/rtc/rtc-ds1307.c 	t->time.tm_sec = bcd2bin(regs[0] & 0x7f);
t                 398 drivers/rtc/rtc-ds1307.c 	t->time.tm_min = bcd2bin(regs[1] & 0x7f);
t                 399 drivers/rtc/rtc-ds1307.c 	t->time.tm_hour = bcd2bin(regs[2] & 0x3f);
t                 400 drivers/rtc/rtc-ds1307.c 	t->time.tm_mday = bcd2bin(regs[3] & 0x3f);
t                 403 drivers/rtc/rtc-ds1307.c 	t->enabled = !!(regs[7] & DS1337_BIT_A1IE);
t                 404 drivers/rtc/rtc-ds1307.c 	t->pending = !!(regs[8] & DS1337_BIT_A1I);
t                 408 drivers/rtc/rtc-ds1307.c 		"alarm read", t->time.tm_sec, t->time.tm_min,
t                 409 drivers/rtc/rtc-ds1307.c 		t->time.tm_hour, t->time.tm_mday,
t                 410 drivers/rtc/rtc-ds1307.c 		t->enabled, t->pending);
t                 415 drivers/rtc/rtc-ds1307.c static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 427 drivers/rtc/rtc-ds1307.c 		"alarm set", t->time.tm_sec, t->time.tm_min,
t                 428 drivers/rtc/rtc-ds1307.c 		t->time.tm_hour, t->time.tm_mday,
t                 429 drivers/rtc/rtc-ds1307.c 		t->enabled, t->pending);
t                 445 drivers/rtc/rtc-ds1307.c 	regs[0] = bin2bcd(t->time.tm_sec);
t                 446 drivers/rtc/rtc-ds1307.c 	regs[1] = bin2bcd(t->time.tm_min);
t                 447 drivers/rtc/rtc-ds1307.c 	regs[2] = bin2bcd(t->time.tm_hour);
t                 448 drivers/rtc/rtc-ds1307.c 	regs[3] = bin2bcd(t->time.tm_mday);
t                 467 drivers/rtc/rtc-ds1307.c 	if (t->enabled) {
t                 543 drivers/rtc/rtc-ds1307.c static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 564 drivers/rtc/rtc-ds1307.c 	t->enabled = !!(ctl[2] & RX8130_REG_CONTROL0_AIE);
t                 565 drivers/rtc/rtc-ds1307.c 	t->pending = !!(ctl[1] & RX8130_REG_FLAG_AF);
t                 568 drivers/rtc/rtc-ds1307.c 	t->time.tm_sec = -1;
t                 569 drivers/rtc/rtc-ds1307.c 	t->time.tm_min = bcd2bin(ald[0] & 0x7f);
t                 570 drivers/rtc/rtc-ds1307.c 	t->time.tm_hour = bcd2bin(ald[1] & 0x7f);
t                 571 drivers/rtc/rtc-ds1307.c 	t->time.tm_wday = -1;
t                 572 drivers/rtc/rtc-ds1307.c 	t->time.tm_mday = bcd2bin(ald[2] & 0x7f);
t                 573 drivers/rtc/rtc-ds1307.c 	t->time.tm_mon = -1;
t                 574 drivers/rtc/rtc-ds1307.c 	t->time.tm_year = -1;
t                 575 drivers/rtc/rtc-ds1307.c 	t->time.tm_yday = -1;
t                 576 drivers/rtc/rtc-ds1307.c 	t->time.tm_isdst = -1;
t                 579 drivers/rtc/rtc-ds1307.c 		__func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t                 580 drivers/rtc/rtc-ds1307.c 		t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled);
t                 585 drivers/rtc/rtc-ds1307.c static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 596 drivers/rtc/rtc-ds1307.c 		t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t                 597 drivers/rtc/rtc-ds1307.c 		t->time.tm_wday, t->time.tm_mday, t->time.tm_mon,
t                 598 drivers/rtc/rtc-ds1307.c 		t->enabled, t->pending);
t                 616 drivers/rtc/rtc-ds1307.c 	ald[0] = bin2bcd(t->time.tm_min);
t                 617 drivers/rtc/rtc-ds1307.c 	ald[1] = bin2bcd(t->time.tm_hour);
t                 618 drivers/rtc/rtc-ds1307.c 	ald[2] = bin2bcd(t->time.tm_mday);
t                 625 drivers/rtc/rtc-ds1307.c 	if (!t->enabled)
t                 686 drivers/rtc/rtc-ds1307.c static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 701 drivers/rtc/rtc-ds1307.c 	t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN);
t                 704 drivers/rtc/rtc-ds1307.c 	t->time.tm_sec = bcd2bin(regs[3] & 0x7f);
t                 705 drivers/rtc/rtc-ds1307.c 	t->time.tm_min = bcd2bin(regs[4] & 0x7f);
t                 706 drivers/rtc/rtc-ds1307.c 	t->time.tm_hour = bcd2bin(regs[5] & 0x3f);
t                 707 drivers/rtc/rtc-ds1307.c 	t->time.tm_wday = bcd2bin(regs[6] & 0x7) - 1;
t                 708 drivers/rtc/rtc-ds1307.c 	t->time.tm_mday = bcd2bin(regs[7] & 0x3f);
t                 709 drivers/rtc/rtc-ds1307.c 	t->time.tm_mon = bcd2bin(regs[8] & 0x1f) - 1;
t                 710 drivers/rtc/rtc-ds1307.c 	t->time.tm_year = -1;
t                 711 drivers/rtc/rtc-ds1307.c 	t->time.tm_yday = -1;
t                 712 drivers/rtc/rtc-ds1307.c 	t->time.tm_isdst = -1;
t                 716 drivers/rtc/rtc-ds1307.c 		t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t                 717 drivers/rtc/rtc-ds1307.c 		t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled,
t                 744 drivers/rtc/rtc-ds1307.c static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 753 drivers/rtc/rtc-ds1307.c 	wday = mcp794xx_alm_weekday(dev, &t->time);
t                 759 drivers/rtc/rtc-ds1307.c 		t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t                 760 drivers/rtc/rtc-ds1307.c 		t->time.tm_wday, t->time.tm_mday, t->time.tm_mon,
t                 761 drivers/rtc/rtc-ds1307.c 		t->enabled, t->pending);
t                 770 drivers/rtc/rtc-ds1307.c 	regs[3] = bin2bcd(t->time.tm_sec);
t                 771 drivers/rtc/rtc-ds1307.c 	regs[4] = bin2bcd(t->time.tm_min);
t                 772 drivers/rtc/rtc-ds1307.c 	regs[5] = bin2bcd(t->time.tm_hour);
t                 774 drivers/rtc/rtc-ds1307.c 	regs[7] = bin2bcd(t->time.tm_mday);
t                 775 drivers/rtc/rtc-ds1307.c 	regs[8] = bin2bcd(t->time.tm_mon + 1);
t                 789 drivers/rtc/rtc-ds1307.c 	if (!t->enabled)
t                  92 drivers/rtc/rtc-fm3130.c static int fm3130_get_time(struct device *dev, struct rtc_time *t)
t                 117 drivers/rtc/rtc-fm3130.c 	t->tm_sec = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
t                 118 drivers/rtc/rtc-fm3130.c 	t->tm_min = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
t                 120 drivers/rtc/rtc-fm3130.c 	t->tm_hour = bcd2bin(tmp);
t                 121 drivers/rtc/rtc-fm3130.c 	t->tm_wday = bcd2bin(fm3130->regs[FM3130_RTC_DAY] & 0x07) - 1;
t                 122 drivers/rtc/rtc-fm3130.c 	t->tm_mday = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
t                 124 drivers/rtc/rtc-fm3130.c 	t->tm_mon = bcd2bin(tmp) - 1;
t                 127 drivers/rtc/rtc-fm3130.c 	t->tm_year = bcd2bin(fm3130->regs[FM3130_RTC_YEARS]) + 100;
t                 131 drivers/rtc/rtc-fm3130.c 		"read", t->tm_sec, t->tm_min,
t                 132 drivers/rtc/rtc-fm3130.c 		t->tm_hour, t->tm_mday,
t                 133 drivers/rtc/rtc-fm3130.c 		t->tm_mon, t->tm_year, t->tm_wday);
t                 139 drivers/rtc/rtc-fm3130.c static int fm3130_set_time(struct device *dev, struct rtc_time *t)
t                 147 drivers/rtc/rtc-fm3130.c 		"write", t->tm_sec, t->tm_min,
t                 148 drivers/rtc/rtc-fm3130.c 		t->tm_hour, t->tm_mday,
t                 149 drivers/rtc/rtc-fm3130.c 		t->tm_mon, t->tm_year, t->tm_wday);
t                 152 drivers/rtc/rtc-fm3130.c 	buf[FM3130_RTC_SECONDS] = bin2bcd(t->tm_sec);
t                 153 drivers/rtc/rtc-fm3130.c 	buf[FM3130_RTC_MINUTES] = bin2bcd(t->tm_min);
t                 154 drivers/rtc/rtc-fm3130.c 	buf[FM3130_RTC_HOURS] = bin2bcd(t->tm_hour);
t                 155 drivers/rtc/rtc-fm3130.c 	buf[FM3130_RTC_DAY] = bin2bcd(t->tm_wday + 1);
t                 156 drivers/rtc/rtc-fm3130.c 	buf[FM3130_RTC_DATE] = bin2bcd(t->tm_mday);
t                 157 drivers/rtc/rtc-fm3130.c 	buf[FM3130_RTC_MONTHS] = bin2bcd(t->tm_mon + 1);
t                 160 drivers/rtc/rtc-fm3130.c 	tmp = t->tm_year - 100;
t                  76 drivers/rtc/rtc-ls1x.c #define ls1x_get_sec(t)		(((t) >> LS1X_SEC_OFFSET) & LS1X_SEC_MASK)
t                  77 drivers/rtc/rtc-ls1x.c #define ls1x_get_min(t)		(((t) >> LS1X_MIN_OFFSET) & LS1X_MIN_MASK)
t                  78 drivers/rtc/rtc-ls1x.c #define ls1x_get_hour(t)	(((t) >> LS1X_HOUR_OFFSET) & LS1X_HOUR_MASK)
t                  79 drivers/rtc/rtc-ls1x.c #define ls1x_get_day(t)		(((t) >> LS1X_DAY_OFFSET) & LS1X_DAY_MASK)
t                  80 drivers/rtc/rtc-ls1x.c #define ls1x_get_month(t)	(((t) >> LS1X_MONTH_OFFSET) & LS1X_MONTH_MASK)
t                  87 drivers/rtc/rtc-ls1x.c 	time64_t t;
t                  90 drivers/rtc/rtc-ls1x.c 	t = readl(SYS_TOYREAD1);
t                  93 drivers/rtc/rtc-ls1x.c 	t  = mktime64((t & LS1X_YEAR_MASK), ls1x_get_month(v),
t                  96 drivers/rtc/rtc-ls1x.c 	rtc_time64_to_tm(t, rtm);
t                 103 drivers/rtc/rtc-ls1x.c 	unsigned long v, t, c;
t                 123 drivers/rtc/rtc-ls1x.c 	t = rtm->tm_year + 1900;
t                 124 drivers/rtc/rtc-ls1x.c 	writel(t, SYS_TOYWRITE1);
t                 139 drivers/rtc/rtc-mrst.c static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 149 drivers/rtc/rtc-mrst.c 	t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
t                 150 drivers/rtc/rtc-mrst.c 	t->time.tm_min = vrtc_cmos_read(RTC_MINUTES_ALARM);
t                 151 drivers/rtc/rtc-mrst.c 	t->time.tm_hour = vrtc_cmos_read(RTC_HOURS_ALARM);
t                 156 drivers/rtc/rtc-mrst.c 	t->enabled = !!(rtc_control & RTC_AIE);
t                 157 drivers/rtc/rtc-mrst.c 	t->pending = 0;
t                 203 drivers/rtc/rtc-mrst.c static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 212 drivers/rtc/rtc-mrst.c 	hrs = t->time.tm_hour;
t                 213 drivers/rtc/rtc-mrst.c 	min = t->time.tm_min;
t                 214 drivers/rtc/rtc-mrst.c 	sec = t->time.tm_sec;
t                 232 drivers/rtc/rtc-mrst.c 	if (t->enabled)
t                 334 drivers/rtc/rtc-rs5c372.c 			int t = tmp & 0x3f;
t                 337 drivers/rtc/rtc-rs5c372.c 				t = (~t | (s8)0xc0) + 1;
t                 339 drivers/rtc/rtc-rs5c372.c 				t = t - 1;
t                 341 drivers/rtc/rtc-rs5c372.c 			tmp = t * 2;
t                 392 drivers/rtc/rtc-rs5c372.c static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 403 drivers/rtc/rtc-rs5c372.c 	t->time.tm_sec = 0;
t                 404 drivers/rtc/rtc-rs5c372.c 	t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
t                 405 drivers/rtc/rtc-rs5c372.c 	t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
t                 408 drivers/rtc/rtc-rs5c372.c 	t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
t                 409 drivers/rtc/rtc-rs5c372.c 	t->pending = !!(rs5c->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_AAFG);
t                 414 drivers/rtc/rtc-rs5c372.c static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 422 drivers/rtc/rtc-rs5c372.c 	if (t->time.tm_mday != -1
t                 423 drivers/rtc/rtc-rs5c372.c 			|| t->time.tm_mon != -1
t                 424 drivers/rtc/rtc-rs5c372.c 			|| t->time.tm_year != -1)
t                 444 drivers/rtc/rtc-rs5c372.c 	buf[0] = bin2bcd(t->time.tm_min);
t                 445 drivers/rtc/rtc-rs5c372.c 	buf[1] = rs5c_hr2reg(rs5c, t->time.tm_hour);
t                 457 drivers/rtc/rtc-rs5c372.c 	if (t->enabled) {
t                 249 drivers/rtc/rtc-rx8010.c static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 265 drivers/rtc/rtc-rx8010.c 	t->time.tm_sec = 0;
t                 266 drivers/rtc/rtc-rx8010.c 	t->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
t                 267 drivers/rtc/rtc-rx8010.c 	t->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
t                 270 drivers/rtc/rtc-rx8010.c 		t->time.tm_mday = bcd2bin(alarmvals[2] & 0x7f);
t                 272 drivers/rtc/rtc-rx8010.c 	t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t                 273 drivers/rtc/rtc-rx8010.c 	t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
t                 278 drivers/rtc/rtc-rx8010.c static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 305 drivers/rtc/rtc-rx8010.c 	alarmvals[0] = bin2bcd(t->time.tm_min);
t                 306 drivers/rtc/rtc-rx8010.c 	alarmvals[1] = bin2bcd(t->time.tm_hour);
t                 307 drivers/rtc/rtc-rx8010.c 	alarmvals[2] = bin2bcd(t->time.tm_mday);
t                 331 drivers/rtc/rtc-rx8010.c 	if (t->enabled) {
t                 281 drivers/rtc/rtc-rx8025.c static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 303 drivers/rtc/rtc-rx8025.c 	t->time.tm_sec = 0;
t                 304 drivers/rtc/rtc-rx8025.c 	t->time.tm_min = bcd2bin(ald[0] & 0x7f);
t                 306 drivers/rtc/rtc-rx8025.c 		t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
t                 308 drivers/rtc/rtc-rx8025.c 		t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
t                 311 drivers/rtc/rtc-rx8025.c 	dev_dbg(dev, "%s: date: %ptRr\n", __func__, &t->time);
t                 312 drivers/rtc/rtc-rx8025.c 	t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE);
t                 313 drivers/rtc/rtc-rx8025.c 	t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled;
t                 318 drivers/rtc/rtc-rx8025.c static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 332 drivers/rtc/rtc-rx8025.c 	if (t->time.tm_sec) {
t                 333 drivers/rtc/rtc-rx8025.c 		time64_t alarm_time = rtc_tm_to_time64(&t->time);
t                 335 drivers/rtc/rtc-rx8025.c 		alarm_time += 60 - t->time.tm_sec;
t                 336 drivers/rtc/rtc-rx8025.c 		rtc_time64_to_tm(alarm_time, &t->time);
t                 339 drivers/rtc/rtc-rx8025.c 	ald[0] = bin2bcd(t->time.tm_min);
t                 341 drivers/rtc/rtc-rx8025.c 		ald[1] = bin2bcd(t->time.tm_hour);
t                 343 drivers/rtc/rtc-rx8025.c 		ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
t                 344 drivers/rtc/rtc-rx8025.c 			| bin2bcd((t->time.tm_hour + 11) % 12 + 1);
t                 359 drivers/rtc/rtc-rx8025.c 	if (t->enabled) {
t                 153 drivers/rtc/rtc-st-lpc.c static int st_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
t                 163 drivers/rtc/rtc-st-lpc.c 	alarm_secs = rtc_tm_to_time64(&t->time);
t                 165 drivers/rtc/rtc-st-lpc.c 	memcpy(&rtc->alarm, t, sizeof(struct rtc_wkalrm));
t                 172 drivers/rtc/rtc-st-lpc.c 	st_rtc_alarm_irq_enable(dev, t->enabled);
t                 109 drivers/rtc/rtc-test.c static void test_rtc_alarm_handler(struct timer_list *t)
t                 111 drivers/rtc/rtc-test.c 	struct rtc_test_data *rtd = from_timer(rtd, t, alarm);
t                1544 drivers/s390/block/dasd.c static void dasd_device_timeout(struct timer_list *t)
t                1549 drivers/s390/block/dasd.c 	device = from_timer(device, t, timer);
t                2704 drivers/s390/block/dasd.c static void dasd_block_timeout(struct timer_list *t)
t                2709 drivers/s390/block/dasd.c 	block = from_timer(block, t, timer);
t                 285 drivers/s390/char/con3215.c static void raw3215_timeout(struct timer_list *t)
t                 287 drivers/s390/char/con3215.c 	struct raw3215_info *raw = from_timer(raw, t, timer);
t                 208 drivers/s390/char/con3270.c con3270_update(struct timer_list *t)
t                 210 drivers/s390/char/con3270.c 	struct con3270 *cp = from_timer(cp, t, timer);
t                 662 drivers/s390/char/sclp.c 	struct sclp_register *t;
t                 667 drivers/s390/char/sclp.c 		t = list_entry(l, struct sclp_register, list);
t                 668 drivers/s390/char/sclp.c 		*receive_mask |= t->receive_mask;
t                 669 drivers/s390/char/sclp.c 		*send_mask |= t->send_mask;
t                 289 drivers/s390/char/sclp_tty.c 	struct sclp_buffer *t;
t                 297 drivers/s390/char/sclp_tty.c 		t = list_entry(l, struct sclp_buffer, list);
t                 298 drivers/s390/char/sclp_tty.c 		count += sclp_chars_in_buffer(t);
t                  36 drivers/s390/char/tape_core.c static void tape_long_busy_timeout(struct timer_list *t);
t                 871 drivers/s390/char/tape_core.c static void tape_long_busy_timeout(struct timer_list *t)
t                 873 drivers/s390/char/tape_core.c 	struct tape_device *device = from_timer(device, t, lb_timeout);
t                  36 drivers/s390/char/tape_std.c tape_std_assign_timeout(struct timer_list *t)
t                  38 drivers/s390/char/tape_std.c 	struct tape_request *	request = from_timer(request, t, timer);
t                 365 drivers/s390/char/tty3270.c tty3270_update(struct timer_list *t)
t                 367 drivers/s390/char/tty3270.c 	struct tty3270 *tp = from_timer(tp, t, timer);
t                 699 drivers/s390/cio/chp.c 	enum cfg_task_t t = cfg_none;
t                 702 drivers/s390/cio/chp.c 		t = cfg_get_task(*chpid);
t                 703 drivers/s390/cio/chp.c 		if (t != cfg_none)
t                 707 drivers/s390/cio/chp.c 	return t;
t                 715 drivers/s390/cio/chp.c 	enum cfg_task_t t;
t                 719 drivers/s390/cio/chp.c 	t = chp_cfg_fetch_task(&chpid);
t                 722 drivers/s390/cio/chp.c 	switch (t) {
t                 750 drivers/s390/cio/chp.c 	if (t == cfg_get_task(chpid))
t                 792 drivers/s390/cio/chp.c 	enum cfg_task_t t;
t                 795 drivers/s390/cio/chp.c 	t = chp_cfg_fetch_task(&chpid);
t                 798 drivers/s390/cio/chp.c 	return t == cfg_none;
t                 138 drivers/s390/cio/device.h void ccw_device_timeout(struct timer_list *t);
t                  98 drivers/s390/cio/device_fsm.c ccw_device_timeout(struct timer_list *t)
t                 100 drivers/s390/cio/device_fsm.c 	struct ccw_device_private *priv = from_timer(priv, t, timer);
t                  98 drivers/s390/cio/eadm_sch.c static void eadm_subchannel_timeout(struct timer_list *t)
t                 100 drivers/s390/cio/eadm_sch.c 	struct eadm_private *private = from_timer(private, t, timer);
t                 391 drivers/s390/cio/qdio.h void qdio_outbound_timer(struct timer_list *t);
t                 871 drivers/s390/cio/qdio_main.c void qdio_outbound_timer(struct timer_list *t)
t                 873 drivers/s390/cio/qdio_main.c 	struct qdio_q *q = from_timer(q, t, u.out.timer);
t                 385 drivers/s390/crypto/ap_bus.c void ap_request_timeout(struct timer_list *t)
t                 387 drivers/s390/crypto/ap_bus.c 	struct ap_queue *aq = from_timer(aq, t, timeout);
t                 182 drivers/s390/crypto/pkey_api.c 	struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
t                 193 drivers/s390/crypto/pkey_api.c 		*pkeysize = t->bitsize;
t                 309 drivers/s390/crypto/pkey_api.c 	struct protaeskeytoken *t;
t                 316 drivers/s390/crypto/pkey_api.c 		t = (struct protaeskeytoken *)key;
t                 317 drivers/s390/crypto/pkey_api.c 		protkey->len = t->len;
t                 318 drivers/s390/crypto/pkey_api.c 		protkey->type = t->keytype;
t                 319 drivers/s390/crypto/pkey_api.c 		memcpy(protkey->protkey, t->protkey,
t                 497 drivers/s390/crypto/pkey_api.c 		struct secaeskeytoken *t = (struct secaeskeytoken *)key;
t                 505 drivers/s390/crypto/pkey_api.c 			*ksize = (enum pkey_key_size) t->bitsize;
t                 508 drivers/s390/crypto/pkey_api.c 				   ZCRYPT_CEX3C, t->mkvp, 0, 1);
t                 514 drivers/s390/crypto/pkey_api.c 					   ZCRYPT_CEX3C, 0, t->mkvp, 1);
t                 525 drivers/s390/crypto/pkey_api.c 		struct cipherkeytoken *t = (struct cipherkeytoken *)key;
t                 534 drivers/s390/crypto/pkey_api.c 			if (!t->plfver && t->wpllen == 512)
t                 536 drivers/s390/crypto/pkey_api.c 			else if (!t->plfver && t->wpllen == 576)
t                 538 drivers/s390/crypto/pkey_api.c 			else if (!t->plfver && t->wpllen == 640)
t                 543 drivers/s390/crypto/pkey_api.c 				   ZCRYPT_CEX6, t->mkvp0, 0, 1);
t                 549 drivers/s390/crypto/pkey_api.c 					   ZCRYPT_CEX6, 0, t->mkvp0, 1);
t                 644 drivers/s390/crypto/pkey_api.c 			struct secaeskeytoken *t = (struct secaeskeytoken *)key;
t                 647 drivers/s390/crypto/pkey_api.c 				cur_mkvp = t->mkvp;
t                 649 drivers/s390/crypto/pkey_api.c 				old_mkvp = t->mkvp;
t                 651 drivers/s390/crypto/pkey_api.c 			struct cipherkeytoken *t = (struct cipherkeytoken *)key;
t                 655 drivers/s390/crypto/pkey_api.c 				cur_mkvp = t->mkvp0;
t                 657 drivers/s390/crypto/pkey_api.c 				old_mkvp = t->mkvp0;
t                  56 drivers/s390/crypto/zcrypt_ccamisc.c 	struct secaeskeytoken *t = (struct secaeskeytoken *) token;
t                  60 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->type != TOKTYPE_CCA_INTERNAL) {
t                  63 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
t                  66 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->version != TOKVER_CCA_AES) {
t                  69 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->version, TOKVER_CCA_AES);
t                  72 drivers/s390/crypto/zcrypt_ccamisc.c 	if (keybitsize > 0 && t->bitsize != keybitsize) {
t                  75 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->bitsize, keybitsize);
t                  96 drivers/s390/crypto/zcrypt_ccamisc.c 	struct cipherkeytoken *t = (struct cipherkeytoken *) token;
t                 101 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->type != TOKTYPE_CCA_INTERNAL) {
t                 104 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
t                 107 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->version != TOKVER_CCA_VLSC) {
t                 110 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->version, TOKVER_CCA_VLSC);
t                 113 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->algtype != 0x02) {
t                 116 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->algtype);
t                 119 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->keytype != 0x0001) {
t                 122 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->keytype);
t                 125 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->plfver != 0x00 && t->plfver != 0x01) {
t                 128 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->plfver);
t                 131 drivers/s390/crypto/zcrypt_ccamisc.c 	if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
t                 134 drivers/s390/crypto/zcrypt_ccamisc.c 			    __func__, (int) t->wpllen);
t                 140 drivers/s390/crypto/zcrypt_ccamisc.c 			if (t->wpllen != (t->plfver ? 640 : 512))
t                 144 drivers/s390/crypto/zcrypt_ccamisc.c 			if (t->wpllen != (t->plfver ? 640 : 576))
t                 148 drivers/s390/crypto/zcrypt_ccamisc.c 			if (t->wpllen != 640)
t                 162 drivers/s390/crypto/zcrypt_ccamisc.c 	if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
t                 786 drivers/s390/crypto/zcrypt_ccamisc.c 	struct cipherkeytoken *t;
t                 840 drivers/s390/crypto/zcrypt_ccamisc.c 		t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
t                 841 drivers/s390/crypto/zcrypt_ccamisc.c 		t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
t                 842 drivers/s390/crypto/zcrypt_ccamisc.c 		t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
t                 891 drivers/s390/crypto/zcrypt_ccamisc.c 	t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key;
t                 893 drivers/s390/crypto/zcrypt_ccamisc.c 		if (*keybufsize >= t->len)
t                 894 drivers/s390/crypto/zcrypt_ccamisc.c 			memcpy(keybuf, t, t->len);
t                 898 drivers/s390/crypto/zcrypt_ccamisc.c 	*keybufsize = t->len;
t                 963 drivers/s390/crypto/zcrypt_ccamisc.c 	struct cipherkeytoken *t;
t                1051 drivers/s390/crypto/zcrypt_ccamisc.c 	t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token;
t                1052 drivers/s390/crypto/zcrypt_ccamisc.c 	memcpy(key_token, t, t->len);
t                1053 drivers/s390/crypto/zcrypt_ccamisc.c 	*key_token_size = t->len;
t                1070 drivers/s390/crypto/zcrypt_ccamisc.c 	struct cipherkeytoken *t;
t                1086 drivers/s390/crypto/zcrypt_ccamisc.c 		t = (struct cipherkeytoken *) token;
t                1087 drivers/s390/crypto/zcrypt_ccamisc.c 		t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
t                1088 drivers/s390/crypto/zcrypt_ccamisc.c 		t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
t                 133 drivers/s390/net/fsm.c fsm_expire_timer(struct timer_list *t)
t                 135 drivers/s390/net/fsm.c 	fsm_timer *this = from_timer(this, t, tl);
t                 820 drivers/s390/net/lcs.c lcs_lancmd_timeout(struct timer_list *t)
t                 822 drivers/s390/net/lcs.c 	struct lcs_reply *reply = from_timer(reply, t, timer);
t                 612 drivers/s390/scsi/zfcp_erp.c void zfcp_erp_timeout_handler(struct timer_list *t)
t                 614 drivers/s390/scsi/zfcp_erp.c 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
t                 620 drivers/s390/scsi/zfcp_erp.c static void zfcp_erp_memwait_handler(struct timer_list *t)
t                 622 drivers/s390/scsi/zfcp_erp.c 	struct zfcp_erp_action *act = from_timer(act, t, timer);
t                  84 drivers/s390/scsi/zfcp_ext.h extern void zfcp_erp_timeout_handler(struct timer_list *t);
t                  35 drivers/s390/scsi/zfcp_fsf.c static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
t                  37 drivers/s390/scsi/zfcp_fsf.c 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
t                 202 drivers/scsi/aic7xxx/aic79xx_core.c static void		ahd_stat_timer(struct timer_list *t);
t                8830 drivers/scsi/aic7xxx/aic79xx_core.c ahd_stat_timer(struct timer_list *t)
t                8832 drivers/scsi/aic7xxx/aic79xx_core.c 	struct	ahd_softc *ahd = from_timer(ahd, t, stat_timer);
t                1136 drivers/scsi/aic94xx/aic94xx_hwi.c 	struct asd_dma_tok t = ascb->dma_scb;
t                1141 drivers/scsi/aic94xx/aic94xx_hwi.c 	seq->next_scb = t;
t                 376 drivers/scsi/aic94xx/aic94xx_hwi.h void asd_ascb_timedout(struct timer_list *t);
t                 859 drivers/scsi/aic94xx/aic94xx_scb.c void asd_ascb_timedout(struct timer_list *t)
t                 861 drivers/scsi/aic94xx/aic94xx_scb.c 	struct asd_ascb *ascb = from_timer(ascb, t, timer);
t                 532 drivers/scsi/aic94xx/aic94xx_task.c 	struct sas_task *t = task;
t                 550 drivers/scsi/aic94xx/aic94xx_task.c 		a->uldd_task = t;
t                 551 drivers/scsi/aic94xx/aic94xx_task.c 		t->lldd_task = a;
t                 555 drivers/scsi/aic94xx/aic94xx_task.c 		t = a->uldd_task;
t                 557 drivers/scsi/aic94xx/aic94xx_task.c 		if (t->task_proto & SAS_PROTOCOL_STP)
t                 558 drivers/scsi/aic94xx/aic94xx_task.c 			t->task_proto = SAS_PROTOCOL_STP;
t                 559 drivers/scsi/aic94xx/aic94xx_task.c 		switch (t->task_proto) {
t                 562 drivers/scsi/aic94xx/aic94xx_task.c 			res = asd_build_ata_ascb(a, t, gfp_flags);
t                 565 drivers/scsi/aic94xx/aic94xx_task.c 			res = asd_build_smp_ascb(a, t, gfp_flags);
t                 568 drivers/scsi/aic94xx/aic94xx_task.c 			res = asd_build_ssp_ascb(a, t, gfp_flags);
t                 572 drivers/scsi/aic94xx/aic94xx_task.c 				   t->task_proto);
t                 579 drivers/scsi/aic94xx/aic94xx_task.c 		spin_lock_irqsave(&t->task_state_lock, flags);
t                 580 drivers/scsi/aic94xx/aic94xx_task.c 		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
t                 581 drivers/scsi/aic94xx/aic94xx_task.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                 599 drivers/scsi/aic94xx/aic94xx_task.c 			t = a->uldd_task;
t                 600 drivers/scsi/aic94xx/aic94xx_task.c 			spin_lock_irqsave(&t->task_state_lock, flags);
t                 601 drivers/scsi/aic94xx/aic94xx_task.c 			t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                 602 drivers/scsi/aic94xx/aic94xx_task.c 			spin_unlock_irqrestore(&t->task_state_lock, flags);
t                 603 drivers/scsi/aic94xx/aic94xx_task.c 			switch (t->task_proto) {
t                 616 drivers/scsi/aic94xx/aic94xx_task.c 			t->lldd_task = NULL;
t                  20 drivers/scsi/aic94xx/aic94xx_tmf.c 				void (*timed_out)(struct timer_list *t))
t                  71 drivers/scsi/aic94xx/aic94xx_tmf.c static void asd_clear_nexus_timedout(struct timer_list *t)
t                  73 drivers/scsi/aic94xx/aic94xx_tmf.c 	struct asd_ascb *ascb = from_timer(ascb, t, timer);
t                 245 drivers/scsi/aic94xx/aic94xx_tmf.c static void asd_tmf_timedout(struct timer_list *t)
t                 247 drivers/scsi/aic94xx/aic94xx_tmf.c 	struct asd_ascb *ascb = from_timer(ascb, t, timer);
t                 128 drivers/scsi/arcmsr/arcmsr_hba.c static void arcmsr_request_device_map(struct timer_list *t);
t                3673 drivers/scsi/arcmsr/arcmsr_hba.c static void arcmsr_set_iop_datetime(struct timer_list *t)
t                3675 drivers/scsi/arcmsr/arcmsr_hba.c 	struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
t                3965 drivers/scsi/arcmsr/arcmsr_hba.c static void arcmsr_request_device_map(struct timer_list *t)
t                3967 drivers/scsi/arcmsr/arcmsr_hba.c 	struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
t                2326 drivers/scsi/arm/fas216.c static void fas216_eh_timer(struct timer_list *t)
t                2328 drivers/scsi/arm/fas216.c 	FAS216_Info *info = from_timer(info, t, eh_timer);
t                5249 drivers/scsi/be2iscsi/be_main.c static void beiscsi_hw_tpe_check(struct timer_list *t)
t                5251 drivers/scsi/be2iscsi/be_main.c 	struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
t                5266 drivers/scsi/be2iscsi/be_main.c static void beiscsi_hw_health_check(struct timer_list *t)
t                5268 drivers/scsi/be2iscsi/be_main.c 	struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
t                  74 drivers/scsi/bfa/bfa_defs.h 	u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
t                  76 drivers/scsi/bfa/bfa_defs.h 	t += (i);      \
t                  77 drivers/scsi/bfa/bfa_defs.h 	(m)[0] = (t >> 16) & 0xFF;                              \
t                  78 drivers/scsi/bfa/bfa_defs.h 	(m)[1] = (t >> 8) & 0xFF;                               \
t                  79 drivers/scsi/bfa/bfa_defs.h 	(m)[2] = t & 0xFF;                                      \
t                6787 drivers/scsi/bfa/bfa_ioc.c 	u32 t;
t                6797 drivers/scsi/bfa/bfa_ioc.c 		t = readl(pci_bar + FLI_RDDATA_REG);
t                 686 drivers/scsi/bfa/bfad.c bfad_bfa_tmo(struct timer_list *t)
t                 688 drivers/scsi/bfa/bfad.c 	struct bfad_s	      *bfad = from_timer(bfad, t, hal_tmo);
t                 309 drivers/scsi/bfa/bfad_drv.h void		bfad_bfa_tmo(struct timer_list *t);
t                 839 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_destroy_timer(struct timer_list *t)
t                 841 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer);
t                  17 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_upld_timer(struct timer_list *t);
t                  18 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_ofld_timer(struct timer_list *t);
t                  30 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_upld_timer(struct timer_list *t)
t                  33 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
t                  43 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_ofld_timer(struct timer_list *t)
t                  46 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
t                 861 drivers/scsi/bnx2i/bnx2i.h extern void bnx2i_ep_ofld_timer(struct timer_list *t);
t                 685 drivers/scsi/bnx2i/bnx2i_hwi.c void bnx2i_ep_ofld_timer(struct timer_list *t)
t                 687 drivers/scsi/bnx2i/bnx2i_hwi.c 	struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer);
t                3739 drivers/scsi/csiostor/csio_hw.c csio_hw_mb_timer(struct timer_list *t)
t                3741 drivers/scsi/csiostor/csio_hw.c 	struct csio_mbm *mbm = from_timer(mbm, t, timer);
t                4108 drivers/scsi/csiostor/csio_hw.c csio_mgmt_tmo_handler(struct timer_list *t)
t                4110 drivers/scsi/csiostor/csio_hw.c 	struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer);
t                 548 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static void act_open_retry_timer(struct timer_list *t)
t                 550 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
t                 817 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                 820 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_atid(t, atid);
t                 906 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void csk_act_open_retry_timer(struct timer_list *t)
t                 909 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
t                 970 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                 972 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_atid(t, atid);
t                1016 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1018 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1037 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1039 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1076 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1079 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1124 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1126 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1149 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1151 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1171 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1173 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1274 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1279 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1372 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1375 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1436 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1443 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1528 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1530 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                1548 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct tid_info *t = lldi->tids;
t                1551 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk = lookup_tid(t, tid);
t                 897 drivers/scsi/cxlflash/superpipe.c 	struct lun_access *lun_access, *t;
t                 936 drivers/scsi/cxlflash/superpipe.c 	list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
t                 998 drivers/scsi/cxlflash/superpipe.c 	struct lun_access *lun_access, *t;
t                1028 drivers/scsi/cxlflash/superpipe.c 	list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
t                 398 drivers/scsi/dc395x.c static void waiting_timeout(struct timer_list *t);
t                 837 drivers/scsi/dc395x.c static void waiting_timeout(struct timer_list *t)
t                 840 drivers/scsi/dc395x.c 	struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
t                1117 drivers/scsi/esas2r/esas2r.h void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
t                1384 drivers/scsi/esas2r/esas2r.h static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
t                1387 drivers/scsi/esas2r/esas2r.h 	return (u16)(uintptr_t)(t - a->targetdb);
t                 944 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_target *t = NULL;
t                 974 drivers/scsi/esas2r/esas2r_disc.c 				t = esas2r_targ_db_add_pthru(a,
t                 983 drivers/scsi/esas2r/esas2r_disc.c 				if (t)
t                 984 drivers/scsi/esas2r/esas2r_disc.c 					memcpy(&t->sas_addr, &dc->sas_addr,
t                 985 drivers/scsi/esas2r/esas2r_disc.c 					       sizeof(t->sas_addr));
t                1048 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_target *t;
t                1055 drivers/scsi/esas2r/esas2r_disc.c 	for (t = a->targetdb; t < a->targetdb_end; t++) {
t                1056 drivers/scsi/esas2r/esas2r_disc.c 		if (t->new_target_state != TS_NOT_PRESENT)
t                1059 drivers/scsi/esas2r/esas2r_disc.c 		t->new_target_state = TS_INVALID;
t                1065 drivers/scsi/esas2r/esas2r_disc.c 						       esas2r_targ_get_id(t,
t                1087 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_target *t = dc->curr_targ;
t                1089 drivers/scsi/esas2r/esas2r_disc.c 	if (t >= a->targetdb_end) {
t                1093 drivers/scsi/esas2r/esas2r_disc.c 	} else if (t->new_target_state == TS_PRESENT) {
t                1094 drivers/scsi/esas2r/esas2r_disc.c 		struct atto_vda_ae_lu *luevt = &t->lu_event;
t                1100 drivers/scsi/esas2r/esas2r_disc.c 		t->new_target_state = TS_INVALID;
t                1104 drivers/scsi/esas2r/esas2r_disc.c 		dc->curr_virt_id = esas2r_targ_get_id(t, a);
t                1161 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_target *t;
t                1172 drivers/scsi/esas2r/esas2r_disc.c 			t = a->targetdb + rq->target_id;
t                1174 drivers/scsi/esas2r/esas2r_disc.c 			if (t->target_state == TS_PRESENT)
t                1176 drivers/scsi/esas2r/esas2r_disc.c 					t->virt_targ_id);
t                 705 drivers/scsi/esas2r/esas2r_int.c 	struct esas2r_target *t = a->targetdb + target;
t                 709 drivers/scsi/esas2r/esas2r_int.c 	if (cplen > sizeof(t->lu_event))
t                 710 drivers/scsi/esas2r/esas2r_int.c 		cplen = sizeof(t->lu_event);
t                 717 drivers/scsi/esas2r/esas2r_int.c 	t->new_target_state = TS_INVALID;
t                 720 drivers/scsi/esas2r/esas2r_int.c 		t->new_target_state = TS_NOT_PRESENT;
t                 727 drivers/scsi/esas2r/esas2r_int.c 			t->new_target_state = TS_NOT_PRESENT;
t                 732 drivers/scsi/esas2r/esas2r_int.c 			t->new_target_state = TS_PRESENT;
t                 737 drivers/scsi/esas2r/esas2r_int.c 	if (t->new_target_state != TS_INVALID) {
t                 738 drivers/scsi/esas2r/esas2r_int.c 		memcpy(&t->lu_event, &ae->lu, cplen);
t                  48 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_target *t = NULL;
t                  59 drivers/scsi/esas2r/esas2r_io.c 		t = a->targetdb + rq->target_id;
t                  61 drivers/scsi/esas2r/esas2r_io.c 		if (unlikely(t >= a->targetdb_end
t                  62 drivers/scsi/esas2r/esas2r_io.c 			     || !(t->flags & TF_USED))) {
t                  66 drivers/scsi/esas2r/esas2r_io.c 			rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
t                  73 drivers/scsi/esas2r/esas2r_io.c 			if (unlikely(t->target_state != TS_PRESENT &&
t                 529 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_target *t = a->targetdb + rq->target_id;
t                 541 drivers/scsi/esas2r/esas2r_io.c 	    && t->target_state == TS_PRESENT
t                 542 drivers/scsi/esas2r/esas2r_io.c 	    && !(t->flags & TF_PASS_THRU)) {
t                 594 drivers/scsi/esas2r/esas2r_io.c 			startlba = t->inter_block - (lbalo & (t->inter_block -
t                 596 drivers/scsi/esas2r/esas2r_io.c 			sgc->length = startlba * t->block_size;
t                 599 drivers/scsi/esas2r/esas2r_io.c 			if ((lbalo & (t->inter_block - 1)) == 0)
t                 641 drivers/scsi/esas2r/esas2r_io.c 			if (len > t->inter_byte)
t                 642 drivers/scsi/esas2r/esas2r_io.c 				sgc->length = t->inter_byte;
t                 401 drivers/scsi/esas2r/esas2r_ioctl.c 	struct esas2r_target *t;
t                 538 drivers/scsi/esas2r/esas2r_ioctl.c 		t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
t                 541 drivers/scsi/esas2r/esas2r_ioctl.c 		if (t == NULL) {
t                 548 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->target_id = esas2r_targ_get_id(t, a);
t                 557 drivers/scsi/esas2r/esas2r_ioctl.c 		t = a->targetdb + rq->target_id;
t                 559 drivers/scsi/esas2r/esas2r_ioctl.c 		if (t >= a->targetdb_end
t                 560 drivers/scsi/esas2r/esas2r_ioctl.c 		    || t->target_state != TS_PRESENT
t                 561 drivers/scsi/esas2r/esas2r_ioctl.c 		    || t->sas_addr == 0) {
t                 567 drivers/scsi/esas2r/esas2r_ioctl.c 		*(u64 *)gda->sas_addr = t->sas_addr;
t                 576 drivers/scsi/esas2r/esas2r_ioctl.c 		t = a->targetdb + rq->target_id;
t                 578 drivers/scsi/esas2r/esas2r_ioctl.c 		if (t >= a->targetdb_end
t                 579 drivers/scsi/esas2r/esas2r_ioctl.c 		    || t->target_state != TS_PRESENT
t                 580 drivers/scsi/esas2r/esas2r_ioctl.c 		    || !(t->flags & TF_PASS_THRU)) {
t                 587 drivers/scsi/esas2r/esas2r_ioctl.c 				       t->phys_targ_id)) {
t                1061 drivers/scsi/esas2r/esas2r_ioctl.c 		struct esas2r_target *t;
t                1081 drivers/scsi/esas2r/esas2r_ioctl.c 		t = a->targetdb + (u16)gda->target_id;
t                1083 drivers/scsi/esas2r/esas2r_ioctl.c 		if (t->target_state != TS_PRESENT) {
t                1086 drivers/scsi/esas2r/esas2r_ioctl.c 			if (t->sas_addr == 0) {
t                1089 drivers/scsi/esas2r/esas2r_ioctl.c 				*(u64 *)gda->address = t->sas_addr;
t                1157 drivers/scsi/esas2r/esas2r_ioctl.c 		struct esas2r_target *t;
t                1177 drivers/scsi/esas2r/esas2r_ioctl.c 		t = a->targetdb + (u16)gdi->target_id;
t                1185 drivers/scsi/esas2r/esas2r_ioctl.c 		if (t->target_state != TS_PRESENT) {
t                 652 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_target *t;
t                 688 drivers/scsi/esas2r/esas2r_main.c 	for (t = a->targetdb; t < a->targetdb_end; t++)
t                 689 drivers/scsi/esas2r/esas2r_main.c 		if (t->buffered_target_state == TS_PRESENT) {
t                 692 drivers/scsi/esas2r/esas2r_main.c 				   (u16)(uintptr_t)(t - a->targetdb));
t                1569 drivers/scsi/esas2r/esas2r_main.c static void esas2r_timer_callback(struct timer_list *t);
t                1581 drivers/scsi/esas2r/esas2r_main.c static void esas2r_timer_callback(struct timer_list *t)
t                1583 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_adapter *a = from_timer(a, t, timer);
t                  48 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                  50 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++) {
t                  51 drivers/scsi/esas2r/esas2r_targdb.c 		memset(t, 0, sizeof(struct esas2r_target));
t                  53 drivers/scsi/esas2r/esas2r_targdb.c 		t->target_state = TS_NOT_PRESENT;
t                  54 drivers/scsi/esas2r/esas2r_targdb.c 		t->buffered_target_state = TS_NOT_PRESENT;
t                  55 drivers/scsi/esas2r/esas2r_targdb.c 		t->new_target_state = TS_INVALID;
t                  61 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                  64 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++) {
t                  65 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->target_state != TS_PRESENT)
t                  69 drivers/scsi/esas2r/esas2r_targdb.c 		esas2r_targ_db_remove(a, t);
t                  73 drivers/scsi/esas2r/esas2r_targdb.c 			esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
t                  75 drivers/scsi/esas2r/esas2r_targdb.c 			esas2r_target_state_changed(a, esas2r_targ_get_id(t,
t                  84 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                  94 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++) {
t                  98 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->buffered_target_state != t->target_state)
t                  99 drivers/scsi/esas2r/esas2r_targdb.c 			state = t->buffered_target_state = t->target_state;
t                 105 drivers/scsi/esas2r/esas2r_targdb.c 					     t,
t                 110 drivers/scsi/esas2r/esas2r_targdb.c 						    esas2r_targ_get_id(t,
t                 123 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                 133 drivers/scsi/esas2r/esas2r_targdb.c 	t = a->targetdb + dc->curr_virt_id;
t                 135 drivers/scsi/esas2r/esas2r_targdb.c 	if (t->target_state == TS_PRESENT) {
t                 142 drivers/scsi/esas2r/esas2r_targdb.c 			      t,
t                 156 drivers/scsi/esas2r/esas2r_targdb.c 	t->block_size = dc->block_size;
t                 157 drivers/scsi/esas2r/esas2r_targdb.c 	t->inter_byte = dc->interleave;
t                 158 drivers/scsi/esas2r/esas2r_targdb.c 	t->inter_block = dc->interleave / dc->block_size;
t                 159 drivers/scsi/esas2r/esas2r_targdb.c 	t->virt_targ_id = dc->curr_virt_id;
t                 160 drivers/scsi/esas2r/esas2r_targdb.c 	t->phys_targ_id = ESAS2R_TARG_ID_INV;
t                 162 drivers/scsi/esas2r/esas2r_targdb.c 	t->flags &= ~TF_PASS_THRU;
t                 163 drivers/scsi/esas2r/esas2r_targdb.c 	t->flags |= TF_USED;
t                 165 drivers/scsi/esas2r/esas2r_targdb.c 	t->identifier_len = 0;
t                 167 drivers/scsi/esas2r/esas2r_targdb.c 	t->target_state = TS_PRESENT;
t                 169 drivers/scsi/esas2r/esas2r_targdb.c 	return t;
t                 177 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                 189 drivers/scsi/esas2r/esas2r_targdb.c 	t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
t                 191 drivers/scsi/esas2r/esas2r_targdb.c 	if (t == NULL) {
t                 192 drivers/scsi/esas2r/esas2r_targdb.c 		t = a->targetdb + dc->curr_virt_id;
t                 194 drivers/scsi/esas2r/esas2r_targdb.c 		if (ident_len > sizeof(t->identifier)
t                 195 drivers/scsi/esas2r/esas2r_targdb.c 		    || t->target_state == TS_PRESENT) {
t                 201 drivers/scsi/esas2r/esas2r_targdb.c 	esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
t                 205 drivers/scsi/esas2r/esas2r_targdb.c 	t->block_size = 0;
t                 206 drivers/scsi/esas2r/esas2r_targdb.c 	t->inter_byte = 0;
t                 207 drivers/scsi/esas2r/esas2r_targdb.c 	t->inter_block = 0;
t                 208 drivers/scsi/esas2r/esas2r_targdb.c 	t->virt_targ_id = dc->curr_virt_id;
t                 209 drivers/scsi/esas2r/esas2r_targdb.c 	t->phys_targ_id = dc->curr_phys_id;
t                 210 drivers/scsi/esas2r/esas2r_targdb.c 	t->identifier_len = ident_len;
t                 212 drivers/scsi/esas2r/esas2r_targdb.c 	memcpy(t->identifier, ident, ident_len);
t                 214 drivers/scsi/esas2r/esas2r_targdb.c 	t->flags |= TF_PASS_THRU | TF_USED;
t                 216 drivers/scsi/esas2r/esas2r_targdb.c 	t->target_state = TS_PRESENT;
t                 218 drivers/scsi/esas2r/esas2r_targdb.c 	return t;
t                 221 drivers/scsi/esas2r/esas2r_targdb.c void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
t                 225 drivers/scsi/esas2r/esas2r_targdb.c 	t->target_state = TS_NOT_PRESENT;
t                 227 drivers/scsi/esas2r/esas2r_targdb.c 	esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
t                 235 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                 237 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++)
t                 238 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->sas_addr == *sas_addr)
t                 239 drivers/scsi/esas2r/esas2r_targdb.c 			return t;
t                 248 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                 250 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++) {
t                 251 drivers/scsi/esas2r/esas2r_targdb.c 		if (ident_len == t->identifier_len
t                 252 drivers/scsi/esas2r/esas2r_targdb.c 		    && memcmp(&t->identifier[0], identifier,
t                 254 drivers/scsi/esas2r/esas2r_targdb.c 			return t;
t                 265 drivers/scsi/esas2r/esas2r_targdb.c 		struct esas2r_target *t = a->targetdb + id;
t                 267 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->target_state == TS_PRESENT)
t                 279 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                 281 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++) {
t                 282 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->target_state != TS_PRESENT)
t                 285 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->virt_targ_id == virt_id)
t                 286 drivers/scsi/esas2r/esas2r_targdb.c 			return t;
t                 295 drivers/scsi/esas2r/esas2r_targdb.c 	struct esas2r_target *t;
t                 299 drivers/scsi/esas2r/esas2r_targdb.c 	for (t = a->targetdb; t < a->targetdb_end; t++)
t                 300 drivers/scsi/esas2r/esas2r_targdb.c 		if (t->target_state == TS_PRESENT)
t                 897 drivers/scsi/fcoe/fcoe_ctlr.c 	unsigned long t;
t                 974 drivers/scsi/fcoe/fcoe_ctlr.c 			t = ntohl(fka->fd_fka_period);
t                 975 drivers/scsi/fcoe/fcoe_ctlr.c 			if (t >= FCOE_CTLR_MIN_FKA)
t                 976 drivers/scsi/fcoe/fcoe_ctlr.c 				fcf->fka_period = msecs_to_jiffies(t);
t                1780 drivers/scsi/fcoe/fcoe_ctlr.c static void fcoe_ctlr_timeout(struct timer_list *t)
t                1782 drivers/scsi/fcoe/fcoe_ctlr.c 	struct fcoe_ctlr *fip = from_timer(fip, t, timer);
t                 446 drivers/scsi/fcoe/fcoe_transport.c void fcoe_queue_timer(struct timer_list *t)
t                 448 drivers/scsi/fcoe/fcoe_transport.c 	struct fcoe_port *port = from_timer(port, t, timer);
t                 417 drivers/scsi/fnic/fnic_main.c static void fnic_notify_timer(struct timer_list *t)
t                 419 drivers/scsi/fnic/fnic_main.c 	struct fnic *fnic = from_timer(fnic, t, notify_timer);
t                 426 drivers/scsi/fnic/fnic_main.c static void fnic_fip_notify_timer(struct timer_list *t)
t                 428 drivers/scsi/fnic/fnic_main.c 	struct fnic *fnic = from_timer(fnic, t, fip_timer);
t                 185 drivers/scsi/gdth.c #define INDEX_OK(i,t)   ((i)<ARRAY_SIZE(t))
t                1580 drivers/scsi/gdth.c     u8 b, t, l, firsttime;
t                1600 drivers/scsi/gdth.c             t = nscp->device->id;
t                1604 drivers/scsi/gdth.c                     (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
t                1608 drivers/scsi/gdth.c             b = t = l = 0;
t                1628 drivers/scsi/gdth.c                         b, t, l));
t                1631 drivers/scsi/gdth.c                     if (b == 0 && t == 0 && l == 0) {
t                1636 drivers/scsi/gdth.c                     if (b == 0 && ((t == 0 && l == 1) ||
t                1637 drivers/scsi/gdth.c                          (t == 1 && l == 0))) {
t                1649 drivers/scsi/gdth.c                     if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
t                1660 drivers/scsi/gdth.c                 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
t                1669 drivers/scsi/gdth.c                 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
t                1691 drivers/scsi/gdth.c             if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
t                1695 drivers/scsi/gdth.c                 ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
t                1696 drivers/scsi/gdth.c         } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
t                1698 drivers/scsi/gdth.c                     nscp->cmnd[0], b, t, l));
t                1717 drivers/scsi/gdth.c                 if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
t                1720 drivers/scsi/gdth.c                              nscp->cmnd[0], t));
t                1721 drivers/scsi/gdth.c                     ha->hdr[t].media_changed = FALSE;
t                1738 drivers/scsi/gdth.c                 if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
t                1747 drivers/scsi/gdth.c                     nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
t                1750 drivers/scsi/gdth.c                     if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
t                1759 drivers/scsi/gdth.c                 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
t                1769 drivers/scsi/gdth.c                 if (ha->hdr[t].media_changed) {
t                1772 drivers/scsi/gdth.c                              nscp->cmnd[0], t));
t                1773 drivers/scsi/gdth.c                     ha->hdr[t].media_changed = FALSE;
t                1782 drivers/scsi/gdth.c                 } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
t                1873 drivers/scsi/gdth.c     u8 t;
t                1880 drivers/scsi/gdth.c     t  = scp->device->id;
t                1882 drivers/scsi/gdth.c            scp->cmnd[0],t));
t                1891 drivers/scsi/gdth.c         TRACE2(("Test/Verify/Start hdrive %d\n",t));
t                1896 drivers/scsi/gdth.c                 t,ha->hdr[t].devtype));
t                1897 drivers/scsi/gdth.c         inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
t                1901 drivers/scsi/gdth.c         if ((ha->hdr[t].devtype & 1) ||
t                1902 drivers/scsi/gdth.c             (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
t                1908 drivers/scsi/gdth.c         snprintf(inq.product, sizeof(inq.product), "Host Drive  #%02d",t);
t                1914 drivers/scsi/gdth.c         TRACE2(("Request sense hdrive %d\n",t));
t                1924 drivers/scsi/gdth.c         TRACE2(("Mode sense hdrive %d\n",t));
t                1927 drivers/scsi/gdth.c         mpd.hd.dev_par     = (ha->hdr[t].devtype&2) ? 0x80:0;
t                1936 drivers/scsi/gdth.c         TRACE2(("Read capacity hdrive %d\n",t));
t                1937 drivers/scsi/gdth.c         if (ha->hdr[t].size > (u64)0xffffffff)
t                1940 drivers/scsi/gdth.c             rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
t                1950 drivers/scsi/gdth.c             TRACE2(("Read capacity (16) hdrive %d\n",t));
t                1951 drivers/scsi/gdth.c             rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
t                2143 drivers/scsi/gdth.c     u8 t,l;
t                2146 drivers/scsi/gdth.c     t = scp->device->id;
t                2150 drivers/scsi/gdth.c            scp->cmnd[0],b,t,l));
t                2196 drivers/scsi/gdth.c             cmdp->u.raw64.target     = t;
t                2212 drivers/scsi/gdth.c             cmdp->u.raw.target     = t;
t                2647 drivers/scsi/gdth.c     u8 b, t;
t                2724 drivers/scsi/gdth.c         t = scp->device->id;
t                2726 drivers/scsi/gdth.c             ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
t                2752 drivers/scsi/gdth.c                     ha->hdr[t].cluster_type = (u8)ha->info;
t                2753 drivers/scsi/gdth.c                     if (!(ha->hdr[t].cluster_type & 
t                2757 drivers/scsi/gdth.c                         if (ha->hdr[t].cluster_type & 
t                2767 drivers/scsi/gdth.c                         ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
t                2768 drivers/scsi/gdth.c                         ha->hdr[t].media_changed = TRUE;
t                2770 drivers/scsi/gdth.c                         ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
t                2771 drivers/scsi/gdth.c                         ha->hdr[t].media_changed = TRUE;
t                2781 drivers/scsi/gdth.c                     ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
t                2783 drivers/scsi/gdth.c                     ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
t                2808 drivers/scsi/gdth.c                     (ha->hdr[t].cluster_type & 
t                2811 drivers/scsi/gdth.c                     ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
t                2827 drivers/scsi/gdth.c                     ha->dvr.eu.sync.hostdrive = t;
t                3278 drivers/scsi/gdth.c 	u8 b, t;
t                3284 drivers/scsi/gdth.c 	t = scp->device->id;
t                3297 drivers/scsi/gdth.c 	    (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
t                3361 drivers/scsi/gdth.c     u8 b, t;
t                3369 drivers/scsi/gdth.c     t = sd->id;
t                3370 drivers/scsi/gdth.c     TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
t                3372 drivers/scsi/gdth.c     if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
t                3377 drivers/scsi/gdth.c         ip[0] = ha->hdr[t].heads;
t                3378 drivers/scsi/gdth.c         ip[1] = ha->hdr[t].secs;
t                 566 drivers/scsi/gdth_proc.c     u8 b, t;
t                 575 drivers/scsi/gdth_proc.c         t = scp->device->id;
t                 576 drivers/scsi/gdth_proc.c         if (!SPECIAL_SCP(scp) && t == (u8)id && 
t                 891 drivers/scsi/hisi_sas/hisi_sas_main.c static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
t                 893 drivers/scsi/hisi_sas/hisi_sas_main.c 	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
t                1160 drivers/scsi/hisi_sas/hisi_sas_main.c static void hisi_sas_tmf_timedout(struct timer_list *t)
t                1162 drivers/scsi/hisi_sas/hisi_sas_main.c 	struct sas_task_slow *slow = from_timer(slow, t, timer);
t                 794 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static void start_phys_v1_hw(struct timer_list *t)
t                 796 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
t                 727 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static void link_timeout_disable_link(struct timer_list *t);
t                1324 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static void link_timeout_enable_link(struct timer_list *t)
t                1326 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
t                1345 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static void link_timeout_disable_link(struct timer_list *t)
t                1347 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
t                2568 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
t                2570 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer);
t                3356 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 		struct tasklet_struct *t = &cq->tasklet;
t                3367 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 		tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
t                2438 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		struct tasklet_struct *t = &cq->tasklet;
t                2452 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
t                 359 drivers/scsi/hpsa_cmd.h #define TYPE_ATTR_DIR(t, a, d) ((((d) & 0x03) << 6) |\
t                 361 drivers/scsi/hpsa_cmd.h 				((t) & 0x07))
t                1383 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_timeout(struct timer_list *t)
t                1385 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
t                3683 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_adisc_timeout(struct timer_list *t)
t                3685 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
t                 732 drivers/scsi/ibmvscsi/ibmvfc.h #define tgt_dbg(t, fmt, ...)			\
t                 733 drivers/scsi/ibmvscsi/ibmvfc.h 	DBG_CMD(dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
t                 735 drivers/scsi/ibmvscsi/ibmvfc.h #define tgt_info(t, fmt, ...)		\
t                 736 drivers/scsi/ibmvscsi/ibmvfc.h 	dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
t                 738 drivers/scsi/ibmvscsi/ibmvfc.h #define tgt_err(t, fmt, ...)		\
t                 739 drivers/scsi/ibmvscsi/ibmvfc.h 	dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
t                 741 drivers/scsi/ibmvscsi/ibmvfc.h #define tgt_log(t, level, fmt, ...) \
t                 743 drivers/scsi/ibmvscsi/ibmvfc.h 		if ((t)->vhost->log_level >= level) \
t                 744 drivers/scsi/ibmvscsi/ibmvfc.h 			tgt_err(t, fmt, ##__VA_ARGS__); \
t                 827 drivers/scsi/ibmvscsi/ibmvscsi.c static void ibmvscsi_timeout(struct timer_list *t)
t                 829 drivers/scsi/ibmvscsi/ibmvscsi.c 	struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
t                2678 drivers/scsi/ipr.c static void ipr_timeout(struct timer_list *t)
t                2680 drivers/scsi/ipr.c 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
t                2711 drivers/scsi/ipr.c static void ipr_oper_timeout(struct timer_list *t)
t                2713 drivers/scsi/ipr.c 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
t                5452 drivers/scsi/ipr.c static void ipr_abort_timeout(struct timer_list *t)
t                5454 drivers/scsi/ipr.c 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
t                8275 drivers/scsi/ipr.c static void ipr_reset_timer_done(struct timer_list *t)
t                8277 drivers/scsi/ipr.c 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
t                9877 drivers/scsi/ipr.c 	struct ipr_interrupts *t;
t                9881 drivers/scsi/ipr.c 	t = &ioa_cfg->regs;
t                9884 drivers/scsi/ipr.c 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
t                9885 drivers/scsi/ipr.c 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
t                9886 drivers/scsi/ipr.c 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
t                9887 drivers/scsi/ipr.c 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
t                9888 drivers/scsi/ipr.c 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
t                9889 drivers/scsi/ipr.c 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
t                9890 drivers/scsi/ipr.c 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
t                9891 drivers/scsi/ipr.c 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
t                9892 drivers/scsi/ipr.c 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
t                9893 drivers/scsi/ipr.c 	t->ioarrin_reg = base + p->ioarrin_reg;
t                9894 drivers/scsi/ipr.c 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
t                9895 drivers/scsi/ipr.c 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
t                9896 drivers/scsi/ipr.c 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
t                9897 drivers/scsi/ipr.c 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
t                9898 drivers/scsi/ipr.c 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
t                9899 drivers/scsi/ipr.c 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
t                9902 drivers/scsi/ipr.c 		t->init_feedback_reg = base + p->init_feedback_reg;
t                9903 drivers/scsi/ipr.c 		t->dump_addr_reg = base + p->dump_addr_reg;
t                9904 drivers/scsi/ipr.c 		t->dump_data_reg = base + p->dump_data_reg;
t                9905 drivers/scsi/ipr.c 		t->endian_swap_reg = base + p->endian_swap_reg;
t                 961 drivers/scsi/isci/host.c static void phy_startup_timeout(struct timer_list *t)
t                 963 drivers/scsi/isci/host.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                1595 drivers/scsi/isci/host.c static void controller_timeout(struct timer_list *t)
t                1597 drivers/scsi/isci/host.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                1740 drivers/scsi/isci/host.c static void power_control_timeout(struct timer_list *t)
t                1742 drivers/scsi/isci/host.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                 501 drivers/scsi/isci/isci.h void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
t                 318 drivers/scsi/isci/phy.c static void phy_sata_timeout(struct timer_list *t)
t                 320 drivers/scsi/isci/phy.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                 772 drivers/scsi/isci/port.c static void port_timeout(struct timer_list *t)
t                 774 drivers/scsi/isci/port.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                 321 drivers/scsi/isci/port_config.c static void mpc_agent_timeout(struct timer_list *t)
t                 324 drivers/scsi/isci/port_config.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                 656 drivers/scsi/isci/port_config.c static void apc_agent_timeout(struct timer_list *t)
t                 659 drivers/scsi/isci/port_config.c 	struct sci_timer *tmr = from_timer(tmr, t, timer);
t                1288 drivers/scsi/libfc/fc_fcp.c static void fc_lun_reset_send(struct timer_list *t)
t                1290 drivers/scsi/libfc/fc_fcp.c 	struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
t                1421 drivers/scsi/libfc/fc_fcp.c static void fc_fcp_timeout(struct timer_list *t)
t                1423 drivers/scsi/libfc/fc_fcp.c 	struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
t                1786 drivers/scsi/libiscsi.c static void iscsi_tmf_timedout(struct timer_list *t)
t                1788 drivers/scsi/libiscsi.c 	struct iscsi_conn *conn = from_timer(conn, t, tmf_timer);
t                2082 drivers/scsi/libiscsi.c static void iscsi_check_transport_timeouts(struct timer_list *t)
t                2084 drivers/scsi/libiscsi.c 	struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
t                  31 drivers/scsi/libsas/sas_expander.c static void smp_task_timedout(struct timer_list *t)
t                  33 drivers/scsi/libsas/sas_expander.c 	struct sas_task_slow *slow = from_timer(slow, t, timer);
t                 220 drivers/scsi/lpfc/lpfc_crtn.h void lpfc_sli4_poll_hbtimer(struct timer_list *t);
t                 287 drivers/scsi/lpfc/lpfc_crtn.h void lpfc_poll_timeout(struct timer_list *t);
t                 359 drivers/scsi/lpfc/lpfc_crtn.h void lpfc_mbox_timeout(struct timer_list *t);
t                3257 drivers/scsi/lpfc/lpfc_ct.c lpfc_delayed_disc_tmo(struct timer_list *t)
t                3259 drivers/scsi/lpfc/lpfc_ct.c 	struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo);
t                3416 drivers/scsi/lpfc/lpfc_els.c lpfc_els_retry_delay(struct timer_list *t)
t                3418 drivers/scsi/lpfc/lpfc_els.c 	struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
t                7856 drivers/scsi/lpfc/lpfc_els.c lpfc_els_timeout(struct timer_list *t)
t                7858 drivers/scsi/lpfc/lpfc_els.c 	struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
t                9571 drivers/scsi/lpfc/lpfc_els.c lpfc_fabric_block_timeout(struct timer_list *t)
t                9573 drivers/scsi/lpfc/lpfc_els.c 	struct lpfc_hba  *phba = from_timer(phba, t, fabric_block_timer);
t                5783 drivers/scsi/lpfc/lpfc_hbadisc.c lpfc_disc_timeout(struct timer_list *t)
t                5785 drivers/scsi/lpfc/lpfc_hbadisc.c 	struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
t                1149 drivers/scsi/lpfc/lpfc_init.c lpfc_hb_timeout(struct timer_list *t)
t                1155 drivers/scsi/lpfc/lpfc_init.c 	phba = from_timer(phba, t, hb_tmofunc);
t                1183 drivers/scsi/lpfc/lpfc_init.c lpfc_rrq_timeout(struct timer_list *t)
t                1188 drivers/scsi/lpfc/lpfc_init.c 	phba = from_timer(phba, t, rrq_tmr);
t                2693 drivers/scsi/lpfc/lpfc_init.c 	int t;
t                2696 drivers/scsi/lpfc/lpfc_init.c 	t = 16;
t                2698 drivers/scsi/lpfc/lpfc_init.c 		HashWorkingPointer[t] =
t                2700 drivers/scsi/lpfc/lpfc_init.c 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
t                2702 drivers/scsi/lpfc/lpfc_init.c 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
t                2703 drivers/scsi/lpfc/lpfc_init.c 	} while (++t <= 79);
t                2704 drivers/scsi/lpfc/lpfc_init.c 	t = 0;
t                2712 drivers/scsi/lpfc/lpfc_init.c 		if (t < 20) {
t                2714 drivers/scsi/lpfc/lpfc_init.c 		} else if (t < 40) {
t                2716 drivers/scsi/lpfc/lpfc_init.c 		} else if (t < 60) {
t                2721 drivers/scsi/lpfc/lpfc_init.c 		TEMP += S(5, A) + E + HashWorkingPointer[t];
t                2727 drivers/scsi/lpfc/lpfc_init.c 	} while (++t <= 79);
t                2763 drivers/scsi/lpfc/lpfc_init.c 	int t;
t                2774 drivers/scsi/lpfc/lpfc_init.c 	for (t = 0; t < 7; t++)
t                2775 drivers/scsi/lpfc/lpfc_init.c 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
t                4637 drivers/scsi/lpfc/lpfc_init.c lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
t                4639 drivers/scsi/lpfc/lpfc_init.c 	struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
t                4470 drivers/scsi/lpfc/lpfc_scsi.c void lpfc_poll_timeout(struct timer_list *t)
t                4472 drivers/scsi/lpfc/lpfc_scsi.c 	struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
t                3291 drivers/scsi/lpfc/lpfc_sli.c void lpfc_poll_eratt(struct timer_list *t)
t                3297 drivers/scsi/lpfc/lpfc_sli.c 	phba = from_timer(phba, t, eratt_poll);
t                7789 drivers/scsi/lpfc/lpfc_sli.c lpfc_mbox_timeout(struct timer_list *t)
t                7791 drivers/scsi/lpfc/lpfc_sli.c 	struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
t                14321 drivers/scsi/lpfc/lpfc_sli.c void lpfc_sli4_poll_hbtimer(struct timer_list *t)
t                14323 drivers/scsi/lpfc/lpfc_sli.c 	struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
t                3363 drivers/scsi/megaraid/megaraid_mbox.c 	uint8_t		t;
t                3368 drivers/scsi/megaraid/megaraid_mbox.c 	for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
t                3369 drivers/scsi/megaraid/megaraid_mbox.c 		adapter->device_ids[adapter->max_channel][t] =
t                3370 drivers/scsi/megaraid/megaraid_mbox.c 			(t < adapter->init_id) ?  t : t - 1;
t                3378 drivers/scsi/megaraid/megaraid_mbox.c 		for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
t                3379 drivers/scsi/megaraid/megaraid_mbox.c 			adapter->device_ids[c][t] = (c << 8) | t;
t                3847 drivers/scsi/megaraid/megaraid_mbox.c megaraid_sysfs_get_ldmap_timeout(struct timer_list *t)
t                3849 drivers/scsi/megaraid/megaraid_mbox.c 	struct uioc_timeout *timeout = from_timer(timeout, t, timer);
t                 783 drivers/scsi/megaraid/megaraid_mm.c lld_timedout(struct timer_list *t)
t                 785 drivers/scsi/megaraid/megaraid_mm.c 	struct uioc_timeout *timeout = from_timer(timeout, t, timer);
t                2252 drivers/scsi/megaraid/megaraid_sas_base.c static void megasas_sriov_heartbeat_handler(struct timer_list *t);
t                2648 drivers/scsi/megaraid/megaraid_sas_base.c static void megasas_sriov_heartbeat_handler(struct timer_list *t)
t                2651 drivers/scsi/megaraid/megaraid_sas_base.c 		from_timer(instance, t, sriov_heartbeat_timer);
t                 238 drivers/scsi/mesh.c static void dumplog(struct mesh_state *ms, int t)
t                 240 drivers/scsi/mesh.c 	struct mesh_target *tp = &ms->tgts[t];
t                 253 drivers/scsi/mesh.c 		       t, lp->bs1, lp->bs0, lp->phase);
t                 307 drivers/scsi/mesh.c 	int t;
t                 326 drivers/scsi/mesh.c 	for (t = 0; t < 8; ++t) {
t                 327 drivers/scsi/mesh.c 		tp = &ms->tgts[t];
t                 331 drivers/scsi/mesh.c 		       t, tp->current_req, tp->data_goes_out, tp->saved_ptr);
t                 408 drivers/scsi/mesh.c 	int t, id;
t                 460 drivers/scsi/mesh.c 		for (t = 100; t > 0; --t) {
t                 515 drivers/scsi/mesh.c 	for (t = 230; t > 0; --t) {
t                 534 drivers/scsi/mesh.c 		for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t)
t                 861 drivers/scsi/mesh.c 	int b, t, prev;
t                 937 drivers/scsi/mesh.c 	for (t = 0; t < 8; ++t)
t                 938 drivers/scsi/mesh.c 		if ((b & (1 << t)) != 0 && t != ms->host->this_id)
t                 940 drivers/scsi/mesh.c 	if (b != (1 << t) + (1 << ms->host->this_id)) {
t                 950 drivers/scsi/mesh.c 	ms->conn_tgt = t;
t                 951 drivers/scsi/mesh.c 	tp = &ms->tgts[t];
t                 953 drivers/scsi/mesh.c 	if (ALLOW_DEBUG(t)) {
t                 954 drivers/scsi/mesh.c 		printk(KERN_DEBUG "mesh: reselected by target %d\n", t);
t                 960 drivers/scsi/mesh.c 		printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t);
t                1324 drivers/scsi/mesh.c 	int t, nb;
t                1328 drivers/scsi/mesh.c 		t = 50;
t                1329 drivers/scsi/mesh.c 		while (t > 0 && in_8(&mr->fifo_count) != 0
t                1331 drivers/scsi/mesh.c 			--t;
t                1448 drivers/scsi/mesh.c 	int seq, n, t;
t                1495 drivers/scsi/mesh.c 		t = 30;		/* wait up to 30us */
t                1496 drivers/scsi/mesh.c 		while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0)
t                1568 drivers/scsi/mesh.c 			t = 230;		/* wait up to 230us */
t                1570 drivers/scsi/mesh.c 				if (--t < 0) {
t                1269 drivers/scsi/mvsas/mv_sas.c static void mvs_tmf_timedout(struct timer_list *t)
t                1271 drivers/scsi/mvsas/mv_sas.c 	struct sas_task_slow *slow = from_timer(slow, t, timer);
t                1939 drivers/scsi/mvsas/mv_sas.c static void mvs_sig_time_out(struct timer_list *t)
t                1941 drivers/scsi/mvsas/mv_sas.c 	struct mvs_phy *phy = from_timer(phy, t, timer);
t                 789 drivers/scsi/ncr53c8xx.c 	int c, h, t, u, v;
t                 794 drivers/scsi/ncr53c8xx.c 	t = NO_TARGET;
t                 801 drivers/scsi/ncr53c8xx.c 			t = ALL_TARGETS;
t                 805 drivers/scsi/ncr53c8xx.c 			if (t != target)
t                 806 drivers/scsi/ncr53c8xx.c 				t = (target == v) ? v : NO_TARGET;
t                 815 drivers/scsi/ncr53c8xx.c 				(t == ALL_TARGETS || t == target) &&
t                 820 drivers/scsi/ncr53c8xx.c 			t = ALL_TARGETS;
t                8088 drivers/scsi/ncr53c8xx.c static void ncr53c8xx_timeout(struct timer_list *t)
t                8090 drivers/scsi/ncr53c8xx.c 	struct ncb *np = from_timer(np, t, timer);
t                1524 drivers/scsi/pm8001/pm8001_hwi.c 		struct sas_task *t = (struct sas_task *)pm8001_dev;
t                1532 drivers/scsi/pm8001/pm8001_hwi.c 		if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
t                1536 drivers/scsi/pm8001/pm8001_hwi.c 		spin_lock_irqsave(&t->task_state_lock, flags1);
t                1537 drivers/scsi/pm8001/pm8001_hwi.c 		if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
t                1538 drivers/scsi/pm8001/pm8001_hwi.c 			spin_unlock_irqrestore(&t->task_state_lock, flags1);
t                1542 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags1);
t                1548 drivers/scsi/pm8001/pm8001_hwi.c 			if ((tag != 0xFFFFFFFF) && (ccb->task == t))
t                1555 drivers/scsi/pm8001/pm8001_hwi.c 		ts = &t->task_status;
t                1562 drivers/scsi/pm8001/pm8001_hwi.c 		spin_lock_irqsave(&t->task_state_lock, flags1);
t                1563 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                1564 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                1565 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_state_flags |= SAS_TASK_STATE_DONE;
t                1566 drivers/scsi/pm8001/pm8001_hwi.c 		if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                1567 drivers/scsi/pm8001/pm8001_hwi.c 			spin_unlock_irqrestore(&t->task_state_lock, flags1);
t                1571 drivers/scsi/pm8001/pm8001_hwi.c 				t, pw->handler, ts->resp, ts->stat));
t                1572 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                1575 drivers/scsi/pm8001/pm8001_hwi.c 			spin_unlock_irqrestore(&t->task_state_lock, flags1);
t                1576 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                1579 drivers/scsi/pm8001/pm8001_hwi.c 			t->task_done(t);
t                1584 drivers/scsi/pm8001/pm8001_hwi.c 		struct sas_task *t = (struct sas_task *)pm8001_dev;
t                1594 drivers/scsi/pm8001/pm8001_hwi.c 		ret = pm8001_query_task(t);
t                1613 drivers/scsi/pm8001/pm8001_hwi.c 		spin_lock_irqsave(&t->task_state_lock, flags1);
t                1615 drivers/scsi/pm8001/pm8001_hwi.c 		if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
t                1616 drivers/scsi/pm8001/pm8001_hwi.c 			spin_unlock_irqrestore(&t->task_state_lock, flags1);
t                1619 drivers/scsi/pm8001/pm8001_hwi.c 				(void)pm8001_abort_task(t);
t                1623 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags1);
t                1629 drivers/scsi/pm8001/pm8001_hwi.c 			if ((tag != 0xFFFFFFFF) && (ccb->task == t))
t                1635 drivers/scsi/pm8001/pm8001_hwi.c 				(void)pm8001_abort_task(t);
t                1646 drivers/scsi/pm8001/pm8001_hwi.c 			ret = pm8001_abort_task(t);
t                1675 drivers/scsi/pm8001/pm8001_hwi.c 			t = NULL;
t                1676 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
t                1856 drivers/scsi/pm8001/pm8001_hwi.c 	struct sas_task *t;
t                1878 drivers/scsi/pm8001/pm8001_hwi.c 	t = ccb->task;
t                1883 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                1885 drivers/scsi/pm8001/pm8001_hwi.c 	ts = &t->task_status;
t                1891 drivers/scsi/pm8001/pm8001_hwi.c 			"%016llx", SAS_ADDR(t->dev->sas_addr)));
t                1905 drivers/scsi/pm8001/pm8001_hwi.c 			sas_ssp_task_response(pm8001_ha->dev, t, iu);
t                1974 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task)
t                2044 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task)
t                2085 drivers/scsi/pm8001/pm8001_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2086 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2087 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2088 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2089 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2090 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2094 drivers/scsi/pm8001/pm8001_hwi.c 			t, status, ts->resp, ts->stat));
t                2095 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2097 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2098 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2100 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_done(t);
t                2107 drivers/scsi/pm8001/pm8001_hwi.c 	struct sas_task *t;
t                2120 drivers/scsi/pm8001/pm8001_hwi.c 	t = ccb->task;
t                2125 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                2127 drivers/scsi/pm8001/pm8001_hwi.c 	ts = &t->task_status;
t                2143 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
t                2180 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task)
t                2223 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
t                2273 drivers/scsi/pm8001/pm8001_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2274 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2275 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2276 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2277 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2278 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2282 drivers/scsi/pm8001/pm8001_hwi.c 			t, event, ts->resp, ts->stat));
t                2283 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2285 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2286 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2288 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_done(t);
t                2296 drivers/scsi/pm8001/pm8001_hwi.c 	struct sas_task *t;
t                2325 drivers/scsi/pm8001/pm8001_hwi.c 		t = ccb->task;
t                2333 drivers/scsi/pm8001/pm8001_hwi.c 	if (t) {
t                2334 drivers/scsi/pm8001/pm8001_hwi.c 		if (t->dev && (t->dev->lldd_dev))
t                2335 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_dev = t->dev->lldd_dev;
t                2343 drivers/scsi/pm8001/pm8001_hwi.c 		&& unlikely(!t || !t->lldd_task || !t->dev)) {
t                2349 drivers/scsi/pm8001/pm8001_hwi.c 	ts = &t->task_status;
t                2358 drivers/scsi/pm8001/pm8001_hwi.c 		if (!((t->dev->parent) &&
t                2359 drivers/scsi/pm8001/pm8001_hwi.c 			(dev_is_expander(t->dev->parent->dev_type)))) {
t                2392 drivers/scsi/pm8001/pm8001_hwi.c 				"%016llx", SAS_ADDR(t->dev->sas_addr)));
t                2411 drivers/scsi/pm8001/pm8001_hwi.c 				sas_free_task(t);
t                2424 drivers/scsi/pm8001/pm8001_hwi.c 			if (t->ata_task.dma_xfer == 0 &&
t                2425 drivers/scsi/pm8001/pm8001_hwi.c 			    t->data_dir == DMA_FROM_DEVICE) {
t                2429 drivers/scsi/pm8001/pm8001_hwi.c 			} else if (t->ata_task.use_ncq) {
t                2514 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task) {
t                2520 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2530 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task) {
t                2536 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2554 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task) {
t                2560 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2618 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task) {
t                2623 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2638 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task) {
t                2643 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2662 drivers/scsi/pm8001/pm8001_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2663 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2664 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2665 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2666 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2667 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2671 drivers/scsi/pm8001/pm8001_hwi.c 			t, status, ts->resp, ts->stat));
t                2672 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2674 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2675 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2682 drivers/scsi/pm8001/pm8001_hwi.c 	struct sas_task *t;
t                2697 drivers/scsi/pm8001/pm8001_hwi.c 		t = ccb->task;
t                2718 drivers/scsi/pm8001/pm8001_hwi.c 	t = ccb->task;
t                2723 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                2725 drivers/scsi/pm8001/pm8001_hwi.c 	ts = &t->task_status;
t                2778 drivers/scsi/pm8001/pm8001_hwi.c 		if (!t->uldd_task) {
t                2784 drivers/scsi/pm8001/pm8001_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2882 drivers/scsi/pm8001/pm8001_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2883 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2884 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2885 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2886 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2887 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2891 drivers/scsi/pm8001/pm8001_hwi.c 			t, event, ts->resp, ts->stat));
t                2892 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2894 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2895 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2903 drivers/scsi/pm8001/pm8001_hwi.c 	struct sas_task *t;
t                2917 drivers/scsi/pm8001/pm8001_hwi.c 	t = ccb->task;
t                2918 drivers/scsi/pm8001/pm8001_hwi.c 	ts = &t->task_status;
t                2923 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                3080 drivers/scsi/pm8001/pm8001_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                3081 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                3082 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                3083 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                3084 drivers/scsi/pm8001/pm8001_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                3085 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                3089 drivers/scsi/pm8001/pm8001_hwi.c 			t, status, ts->resp, ts->stat));
t                3090 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                3092 drivers/scsi/pm8001/pm8001_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                3093 drivers/scsi/pm8001/pm8001_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                3095 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_done(t);
t                3722 drivers/scsi/pm8001/pm8001_hwi.c 	struct sas_task *t;
t                3743 drivers/scsi/pm8001/pm8001_hwi.c 	t = ccb->task;
t                3746 drivers/scsi/pm8001/pm8001_hwi.c 	if (!t)	{
t                3751 drivers/scsi/pm8001/pm8001_hwi.c 	ts = &t->task_status;
t                3767 drivers/scsi/pm8001/pm8001_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                3768 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                3769 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                3770 drivers/scsi/pm8001/pm8001_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                3771 drivers/scsi/pm8001/pm8001_hwi.c 	spin_unlock_irqrestore(&t->task_state_lock, flags);
t                3772 drivers/scsi/pm8001/pm8001_hwi.c 	pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                3777 drivers/scsi/pm8001/pm8001_hwi.c 		sas_free_task(t);
t                3781 drivers/scsi/pm8001/pm8001_hwi.c 		t->task_done(t);
t                 385 drivers/scsi/pm8001/pm8001_sas.c 	struct sas_task *t = task;
t                 391 drivers/scsi/pm8001/pm8001_sas.c 		struct task_status_struct *tsm = &t->task_status;
t                 395 drivers/scsi/pm8001/pm8001_sas.c 			t->task_done(t);
t                 400 drivers/scsi/pm8001/pm8001_sas.c 		struct task_status_struct *ts = &t->task_status;
t                 403 drivers/scsi/pm8001/pm8001_sas.c 		t->task_done(t);
t                 409 drivers/scsi/pm8001/pm8001_sas.c 		dev = t->dev;
t                 413 drivers/scsi/pm8001/pm8001_sas.c 			if (sas_protocol_ata(t->task_proto)) {
t                 414 drivers/scsi/pm8001/pm8001_sas.c 				struct task_status_struct *ts = &t->task_status;
t                 419 drivers/scsi/pm8001/pm8001_sas.c 				t->task_done(t);
t                 423 drivers/scsi/pm8001/pm8001_sas.c 				struct task_status_struct *ts = &t->task_status;
t                 426 drivers/scsi/pm8001/pm8001_sas.c 				t->task_done(t);
t                 435 drivers/scsi/pm8001/pm8001_sas.c 		if (!sas_protocol_ata(t->task_proto)) {
t                 436 drivers/scsi/pm8001/pm8001_sas.c 			if (t->num_scatter) {
t                 438 drivers/scsi/pm8001/pm8001_sas.c 					t->scatter,
t                 439 drivers/scsi/pm8001/pm8001_sas.c 					t->num_scatter,
t                 440 drivers/scsi/pm8001/pm8001_sas.c 					t->data_dir);
t                 447 drivers/scsi/pm8001/pm8001_sas.c 			n_elem = t->num_scatter;
t                 450 drivers/scsi/pm8001/pm8001_sas.c 		t->lldd_task = ccb;
t                 453 drivers/scsi/pm8001/pm8001_sas.c 		ccb->task = t;
t                 455 drivers/scsi/pm8001/pm8001_sas.c 		switch (t->task_proto) {
t                 473 drivers/scsi/pm8001/pm8001_sas.c 				t->task_proto);
t                 484 drivers/scsi/pm8001/pm8001_sas.c 		spin_lock(&t->task_state_lock);
t                 485 drivers/scsi/pm8001/pm8001_sas.c 		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
t                 486 drivers/scsi/pm8001/pm8001_sas.c 		spin_unlock(&t->task_state_lock);
t                 496 drivers/scsi/pm8001/pm8001_sas.c 	if (!sas_protocol_ata(t->task_proto))
t                 498 drivers/scsi/pm8001/pm8001_sas.c 			dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t                 499 drivers/scsi/pm8001/pm8001_sas.c 				t->data_dir);
t                 688 drivers/scsi/pm8001/pm8001_sas.c static void pm8001_tmf_timedout(struct timer_list *t)
t                 690 drivers/scsi/pm8001/pm8001_sas.c 	struct sas_task_slow *slow = from_timer(slow, t, timer);
t                1544 drivers/scsi/pm8001/pm80xx_hwi.c 	struct sas_task *t;
t                1565 drivers/scsi/pm8001/pm80xx_hwi.c 	t = ccb->task;
t                1570 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                1572 drivers/scsi/pm8001/pm80xx_hwi.c 	ts = &t->task_status;
t                1578 drivers/scsi/pm8001/pm80xx_hwi.c 			":%016llx", SAS_ADDR(t->dev->sas_addr)));
t                1593 drivers/scsi/pm8001/pm80xx_hwi.c 			sas_ssp_task_response(pm8001_ha->dev, t, iu);
t                1675 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task)
t                1744 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task)
t                1785 drivers/scsi/pm8001/pm80xx_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                1786 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                1787 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                1788 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                1789 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                1790 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                1794 drivers/scsi/pm8001/pm80xx_hwi.c 			t, status, ts->resp, ts->stat));
t                1795 drivers/scsi/pm8001/pm80xx_hwi.c 		if (t->slow_task)
t                1796 drivers/scsi/pm8001/pm80xx_hwi.c 			complete(&t->slow_task->completion);
t                1797 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                1799 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                1800 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                1802 drivers/scsi/pm8001/pm80xx_hwi.c 		t->task_done(t);
t                1809 drivers/scsi/pm8001/pm80xx_hwi.c 	struct sas_task *t;
t                1821 drivers/scsi/pm8001/pm80xx_hwi.c 	t = ccb->task;
t                1826 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                1828 drivers/scsi/pm8001/pm80xx_hwi.c 	ts = &t->task_status;
t                1844 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
t                1885 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task)
t                1927 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
t                1984 drivers/scsi/pm8001/pm80xx_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                1985 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                1986 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                1987 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                1988 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                1989 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                1993 drivers/scsi/pm8001/pm80xx_hwi.c 			t, event, ts->resp, ts->stat));
t                1994 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                1996 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                1997 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                1999 drivers/scsi/pm8001/pm80xx_hwi.c 		t->task_done(t);
t                2007 drivers/scsi/pm8001/pm80xx_hwi.c 	struct sas_task *t;
t                2035 drivers/scsi/pm8001/pm80xx_hwi.c 		t = ccb->task;
t                2043 drivers/scsi/pm8001/pm80xx_hwi.c 	if (t) {
t                2044 drivers/scsi/pm8001/pm80xx_hwi.c 		if (t->dev && (t->dev->lldd_dev))
t                2045 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_dev = t->dev->lldd_dev;
t                2053 drivers/scsi/pm8001/pm80xx_hwi.c 		&& unlikely(!t || !t->lldd_task || !t->dev)) {
t                2059 drivers/scsi/pm8001/pm80xx_hwi.c 	ts = &t->task_status;
t                2068 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!((t->dev->parent) &&
t                2069 drivers/scsi/pm8001/pm80xx_hwi.c 			(dev_is_expander(t->dev->parent->dev_type)))) {
t                2103 drivers/scsi/pm8001/pm80xx_hwi.c 				"%016llx", SAS_ADDR(t->dev->sas_addr)));
t                2122 drivers/scsi/pm8001/pm80xx_hwi.c 				sas_free_task(t);
t                2135 drivers/scsi/pm8001/pm80xx_hwi.c 			if (t->ata_task.dma_xfer == 0 &&
t                2136 drivers/scsi/pm8001/pm80xx_hwi.c 			    t->data_dir == DMA_FROM_DEVICE) {
t                2140 drivers/scsi/pm8001/pm80xx_hwi.c 			} else if (t->ata_task.use_ncq) {
t                2229 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task) {
t                2235 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2245 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task) {
t                2251 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2267 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task) {
t                2273 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2331 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task) {
t                2336 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2351 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task) {
t                2356 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2375 drivers/scsi/pm8001/pm80xx_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2376 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2377 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2378 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2379 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2380 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2384 drivers/scsi/pm8001/pm80xx_hwi.c 			t, status, ts->resp, ts->stat));
t                2385 drivers/scsi/pm8001/pm80xx_hwi.c 		if (t->slow_task)
t                2386 drivers/scsi/pm8001/pm80xx_hwi.c 			complete(&t->slow_task->completion);
t                2387 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2389 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2390 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2397 drivers/scsi/pm8001/pm80xx_hwi.c 	struct sas_task *t;
t                2412 drivers/scsi/pm8001/pm80xx_hwi.c 		t = ccb->task;
t                2433 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev)) {
t                2439 drivers/scsi/pm8001/pm80xx_hwi.c 	ts = &t->task_status;
t                2496 drivers/scsi/pm8001/pm80xx_hwi.c 		if (!t->uldd_task) {
t                2502 drivers/scsi/pm8001/pm80xx_hwi.c 			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2613 drivers/scsi/pm8001/pm80xx_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2614 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2615 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2616 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2617 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2618 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2622 drivers/scsi/pm8001/pm80xx_hwi.c 			t, event, ts->resp, ts->stat));
t                2623 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2625 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2626 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
t                2635 drivers/scsi/pm8001/pm80xx_hwi.c 	struct sas_task *t;
t                2651 drivers/scsi/pm8001/pm80xx_hwi.c 	t = ccb->task;
t                2652 drivers/scsi/pm8001/pm80xx_hwi.c 	ts = &t->task_status;
t                2657 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely(!t || !t->lldd_task || !t->dev))
t                2674 drivers/scsi/pm8001/pm80xx_hwi.c 						(&t->smp_task.smp_resp))));
t                2834 drivers/scsi/pm8001/pm80xx_hwi.c 	spin_lock_irqsave(&t->task_state_lock, flags);
t                2835 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t                2836 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t                2837 drivers/scsi/pm8001/pm80xx_hwi.c 	t->task_state_flags |= SAS_TASK_STATE_DONE;
t                2838 drivers/scsi/pm8001/pm80xx_hwi.c 	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
t                2839 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2843 drivers/scsi/pm8001/pm80xx_hwi.c 			t, status, ts->resp, ts->stat));
t                2844 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2846 drivers/scsi/pm8001/pm80xx_hwi.c 		spin_unlock_irqrestore(&t->task_state_lock, flags);
t                2847 drivers/scsi/pm8001/pm80xx_hwi.c 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
t                2849 drivers/scsi/pm8001/pm80xx_hwi.c 		t->task_done(t);
t                 545 drivers/scsi/pmcraid.c static void pmcraid_bist_done(struct timer_list *t)
t                 547 drivers/scsi/pmcraid.c 	struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
t                 602 drivers/scsi/pmcraid.c static void pmcraid_reset_alert_done(struct timer_list *t)
t                 604 drivers/scsi/pmcraid.c 	struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
t                 686 drivers/scsi/pmcraid.c static void pmcraid_timeout_handler(struct timer_list *t)
t                 688 drivers/scsi/pmcraid.c 	struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
t                 733 drivers/scsi/qla1280.c static void qla1280_mailbox_timeout(struct timer_list *t)
t                 735 drivers/scsi/qla1280.c 	struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
t                3150 drivers/scsi/qla2xxx/qla_gs.c 	fc_port_t *fcport, *conflict, *t;
t                3159 drivers/scsi/qla2xxx/qla_gs.c 		list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
t                3169 drivers/scsi/qla2xxx/qla_gs.c 			list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
t                3221 drivers/scsi/qla2xxx/qla_gs.c 			list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
t                  49 drivers/scsi/qla2xxx/qla_init.c qla2x00_sp_timeout(struct timer_list *t)
t                  51 drivers/scsi/qla2xxx/qla_init.c 	srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
t                1320 drivers/scsi/qla2xxx/qla_os.c qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
t                1350 drivers/scsi/qla2xxx/qla_os.c 			match = cmd->device->id == t;
t                1353 drivers/scsi/qla2xxx/qla_os.c 			match = (cmd->device->id == t &&
t                3462 drivers/scsi/qla2xxx/qla_os.c 		struct task_struct *t = ha->dpc_thread;
t                3465 drivers/scsi/qla2xxx/qla_os.c 		kthread_stop(t);
t                3622 drivers/scsi/qla2xxx/qla_os.c 		struct task_struct *t = ha->dpc_thread;
t                3629 drivers/scsi/qla2xxx/qla_os.c 		kthread_stop(t);
t                6489 drivers/scsi/qla2xxx/qla_os.c 	struct task_struct *t = ha->dpc_thread;
t                6491 drivers/scsi/qla2xxx/qla_os.c 	if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
t                6492 drivers/scsi/qla2xxx/qla_os.c 		wake_up_process(t);
t                6530 drivers/scsi/qla2xxx/qla_os.c qla2x00_timer(struct timer_list *t)
t                6532 drivers/scsi/qla2xxx/qla_os.c 	scsi_qla_host_t *vha = from_timer(vha, t, timer);
t                 288 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt_sess_op *u, *t;
t                 294 drivers/scsi/qla2xxx/qla_target.c 	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
t                 672 drivers/scsi/qla2xxx/qla_target.c 	fc_port_t *t;
t                 676 drivers/scsi/qla2xxx/qla_target.c 		t = e->u.nack.fcport;
t                 677 drivers/scsi/qla2xxx/qla_target.c 		flush_work(&t->del_work);
t                 678 drivers/scsi/qla2xxx/qla_target.c 		flush_work(&t->free_work);
t                 680 drivers/scsi/qla2xxx/qla_target.c 		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
t                 682 drivers/scsi/qla2xxx/qla_target.c 		if (t) {
t                 684 drivers/scsi/qla2xxx/qla_target.c 			    "%s create sess success %p", __func__, t);
t                3974 drivers/scsi/qla4xxx/ql4_os.c static void qla4xxx_timer(struct timer_list *t);
t                4520 drivers/scsi/qla4xxx/ql4_os.c static void qla4xxx_timer(struct timer_list *t)
t                4522 drivers/scsi/qla4xxx/ql4_os.c 	struct scsi_qla_host *ha = from_timer(ha, t, timer);
t                 171 drivers/scsi/scsi_priv.h static inline void scsi_autopm_get_target(struct scsi_target *t) {}
t                 172 drivers/scsi/scsi_priv.h static inline void scsi_autopm_put_target(struct scsi_target *t) {}
t                  13 drivers/scsi/scsi_sas_internal.h 	struct scsi_transport_template t;
t                  41 drivers/scsi/scsi_sas_internal.h #define to_sas_internal(tmpl)	container_of(tmpl, struct sas_internal, t)
t                 314 drivers/scsi/scsi_transport_fc.c 	struct scsi_transport_template t;
t                 343 drivers/scsi/scsi_transport_fc.c #define to_fc_internal(tmpl)	container_of(tmpl, struct fc_internal, t)
t                1958 drivers/scsi/scsi_transport_fc.c 	return &i->t.host_attrs.ac == cont;
t                1977 drivers/scsi/scsi_transport_fc.c 	return &i->t.target_attrs.ac == cont;
t                2162 drivers/scsi/scsi_transport_fc.c 	i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
t                2163 drivers/scsi/scsi_transport_fc.c 	i->t.target_attrs.ac.class = &fc_transport_class.class;
t                2164 drivers/scsi/scsi_transport_fc.c 	i->t.target_attrs.ac.match = fc_target_match;
t                2165 drivers/scsi/scsi_transport_fc.c 	i->t.target_size = sizeof(struct fc_starget_attrs);
t                2166 drivers/scsi/scsi_transport_fc.c 	transport_container_register(&i->t.target_attrs);
t                2168 drivers/scsi/scsi_transport_fc.c 	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
t                2169 drivers/scsi/scsi_transport_fc.c 	i->t.host_attrs.ac.class = &fc_host_class.class;
t                2170 drivers/scsi/scsi_transport_fc.c 	i->t.host_attrs.ac.match = fc_host_match;
t                2171 drivers/scsi/scsi_transport_fc.c 	i->t.host_size = sizeof(struct fc_host_attrs);
t                2173 drivers/scsi/scsi_transport_fc.c 		i->t.host_attrs.statistics = &fc_statistics_group;
t                2174 drivers/scsi/scsi_transport_fc.c 	transport_container_register(&i->t.host_attrs);
t                2189 drivers/scsi/scsi_transport_fc.c 	i->t.create_work_queue = 1;
t                2191 drivers/scsi/scsi_transport_fc.c 	i->t.user_scan = fc_user_scan;
t                2290 drivers/scsi/scsi_transport_fc.c 	return &i->t;
t                2294 drivers/scsi/scsi_transport_fc.c void fc_release_transport(struct scsi_transport_template *t)
t                2296 drivers/scsi/scsi_transport_fc.c 	struct fc_internal *i = to_fc_internal(t);
t                2298 drivers/scsi/scsi_transport_fc.c 	transport_container_unregister(&i->t.target_attrs);
t                2299 drivers/scsi/scsi_transport_fc.c 	transport_container_unregister(&i->t.host_attrs);
t                  80 drivers/scsi/scsi_transport_iscsi.c 	struct scsi_transport_template t;
t                 102 drivers/scsi/scsi_transport_iscsi.c 	container_of(tmpl, struct iscsi_internal, t)
t                 309 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = iface->transport;			\
t                 310 drivers/scsi/scsi_transport_iscsi.c 	return t->get_iface_param(iface, param_type, param, buf);	\
t                 430 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = iface->transport;
t                 615 drivers/scsi/scsi_transport_iscsi.c 	return t->attr_is_visible(param_type, param);
t                 821 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = fnode_sess->transport;		\
t                 822 drivers/scsi/scsi_transport_iscsi.c 	return t->get_flashnode_param(fnode_sess, param, buf);		\
t                 921 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = fnode_sess->transport;
t                 995 drivers/scsi/scsi_transport_iscsi.c 	return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
t                1034 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = fnode_conn->transport;		\
t                1035 drivers/scsi/scsi_transport_iscsi.c 	return t->get_flashnode_param(fnode_sess, param, buf);		\
t                1122 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = fnode_conn->transport;
t                1182 drivers/scsi/scsi_transport_iscsi.c 	return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
t                3763 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = conn->transport;			\
t                3764 drivers/scsi/scsi_transport_iscsi.c 	return t->get_conn_param(conn, param, buf);			\
t                3808 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = conn->transport;			\
t                3818 drivers/scsi/scsi_transport_iscsi.c 	if (!ep && t->ep_connect) {					\
t                3824 drivers/scsi/scsi_transport_iscsi.c 		rc = t->get_ep_param(ep, param, buf);			\
t                3826 drivers/scsi/scsi_transport_iscsi.c 		rc = t->get_conn_param(conn, param, buf);		\
t                3878 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = conn->transport;
t                3946 drivers/scsi/scsi_transport_iscsi.c 	return t->attr_is_visible(ISCSI_PARAM, param);
t                3964 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = session->transport;			\
t                3968 drivers/scsi/scsi_transport_iscsi.c 	return t->get_session_param(session, param, buf);		\
t                4145 drivers/scsi/scsi_transport_iscsi.c 	struct iscsi_transport *t = session->transport;
t                4241 drivers/scsi/scsi_transport_iscsi.c 	return t->attr_is_visible(ISCSI_PARAM, param);
t                4436 drivers/scsi/scsi_transport_iscsi.c         return &priv->t.host_attrs.ac == cont;
t                4457 drivers/scsi/scsi_transport_iscsi.c 	priv->t.user_scan = iscsi_user_scan;
t                4458 drivers/scsi/scsi_transport_iscsi.c 	priv->t.create_work_queue = 1;
t                4471 drivers/scsi/scsi_transport_iscsi.c 	priv->t.host_attrs.ac.class = &iscsi_host_class.class;
t                4472 drivers/scsi/scsi_transport_iscsi.c 	priv->t.host_attrs.ac.match = iscsi_host_match;
t                4473 drivers/scsi/scsi_transport_iscsi.c 	priv->t.host_attrs.ac.grp = &iscsi_host_group;
t                4474 drivers/scsi/scsi_transport_iscsi.c 	priv->t.host_size = sizeof(struct iscsi_cls_host);
t                4475 drivers/scsi/scsi_transport_iscsi.c 	transport_container_register(&priv->t.host_attrs);
t                4494 drivers/scsi/scsi_transport_iscsi.c 	return &priv->t;
t                4523 drivers/scsi/scsi_transport_iscsi.c 	transport_container_unregister(&priv->t.host_attrs);
t                 272 drivers/scsi/scsi_transport_sas.c 	return &i->t.host_attrs.ac == cont;
t                1751 drivers/scsi/scsi_transport_sas.c 	i->t.user_scan = sas_user_scan;
t                1753 drivers/scsi/scsi_transport_sas.c 	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
t                1754 drivers/scsi/scsi_transport_sas.c 	i->t.host_attrs.ac.class = &sas_host_class.class;
t                1755 drivers/scsi/scsi_transport_sas.c 	i->t.host_attrs.ac.match = sas_host_match;
t                1756 drivers/scsi/scsi_transport_sas.c 	transport_container_register(&i->t.host_attrs);
t                1757 drivers/scsi/scsi_transport_sas.c 	i->t.host_size = sizeof(struct sas_host_attrs);
t                1842 drivers/scsi/scsi_transport_sas.c 	return &i->t;
t                1850 drivers/scsi/scsi_transport_sas.c void sas_release_transport(struct scsi_transport_template *t)
t                1852 drivers/scsi/scsi_transport_sas.c 	struct sas_internal *i = to_sas_internal(t);
t                1854 drivers/scsi/scsi_transport_sas.c 	transport_container_unregister(&i->t.host_attrs);
t                  60 drivers/scsi/scsi_transport_spi.c 	struct scsi_transport_template t;
t                  64 drivers/scsi/scsi_transport_spi.c #define to_spi_internal(tmpl)	container_of(tmpl, struct spi_internal, t)
t                1410 drivers/scsi/scsi_transport_spi.c 	return &i->t.target_attrs.ac == cont;
t                1566 drivers/scsi/scsi_transport_spi.c 	i->t.target_attrs.ac.class = &spi_transport_class.class;
t                1567 drivers/scsi/scsi_transport_spi.c 	i->t.target_attrs.ac.grp = &target_attribute_group;
t                1568 drivers/scsi/scsi_transport_spi.c 	i->t.target_attrs.ac.match = spi_target_match;
t                1569 drivers/scsi/scsi_transport_spi.c 	transport_container_register(&i->t.target_attrs);
t                1570 drivers/scsi/scsi_transport_spi.c 	i->t.target_size = sizeof(struct spi_transport_attrs);
t                1571 drivers/scsi/scsi_transport_spi.c 	i->t.host_attrs.ac.class = &spi_host_class.class;
t                1572 drivers/scsi/scsi_transport_spi.c 	i->t.host_attrs.ac.grp = &host_attribute_group;
t                1573 drivers/scsi/scsi_transport_spi.c 	i->t.host_attrs.ac.match = spi_host_match;
t                1574 drivers/scsi/scsi_transport_spi.c 	transport_container_register(&i->t.host_attrs);
t                1575 drivers/scsi/scsi_transport_spi.c 	i->t.host_size = sizeof(struct spi_host_attrs);
t                1578 drivers/scsi/scsi_transport_spi.c 	return &i->t;
t                1582 drivers/scsi/scsi_transport_spi.c void spi_release_transport(struct scsi_transport_template *t)
t                1584 drivers/scsi/scsi_transport_spi.c 	struct spi_internal *i = to_spi_internal(t);
t                1586 drivers/scsi/scsi_transport_spi.c 	transport_container_unregister(&i->t.target_attrs);
t                1587 drivers/scsi/scsi_transport_spi.c 	transport_container_unregister(&i->t.host_attrs);
t                  31 drivers/scsi/scsi_transport_srp.c 	struct scsi_transport_template t;
t                  42 drivers/scsi/scsi_transport_srp.c #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
t                 660 drivers/scsi/scsi_transport_srp.c 	return &i->t.host_attrs.ac == cont;
t                 816 drivers/scsi/scsi_transport_srp.c 	i->t.host_size = sizeof(struct srp_host_attrs);
t                 817 drivers/scsi/scsi_transport_srp.c 	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
t                 818 drivers/scsi/scsi_transport_srp.c 	i->t.host_attrs.ac.class = &srp_host_class.class;
t                 819 drivers/scsi/scsi_transport_srp.c 	i->t.host_attrs.ac.match = srp_host_match;
t                 821 drivers/scsi/scsi_transport_srp.c 	transport_container_register(&i->t.host_attrs);
t                 848 drivers/scsi/scsi_transport_srp.c 	return &i->t;
t                 856 drivers/scsi/scsi_transport_srp.c void srp_release_transport(struct scsi_transport_template *t)
t                 858 drivers/scsi/scsi_transport_srp.c 	struct srp_internal *i = to_srp_internal(t);
t                 860 drivers/scsi/scsi_transport_srp.c 	transport_container_unregister(&i->t.host_attrs);
t                3207 drivers/scsi/smartpqi/smartpqi_init.c static void pqi_heartbeat_timer_handler(struct timer_list *t)
t                3211 drivers/scsi/smartpqi/smartpqi_init.c 	struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
t                 110 drivers/scsi/snic/snic_disc.h #define snic_tgt_to_shost(t)	\
t                 111 drivers/scsi/snic/snic_disc.h 	dev_to_shost(t->dev.parent)
t                 315 drivers/scsi/st.c #define st_printk(prefix, t, fmt, a...) \
t                 316 drivers/scsi/st.c 	sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
t                 318 drivers/scsi/st.c #define DEBC_printk(t, fmt, a...) \
t                 319 drivers/scsi/st.c 	if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
t                 321 drivers/scsi/st.c #define DEBC_printk(t, fmt, a...)
t                 663 drivers/scsi/storvsc_drv.c 	int ret, t;
t                 710 drivers/scsi/storvsc_drv.c 	t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
t                 711 drivers/scsi/storvsc_drv.c 	if (t == 0) {
t                 755 drivers/scsi/storvsc_drv.c 	int ret, t;
t                 771 drivers/scsi/storvsc_drv.c 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
t                 772 drivers/scsi/storvsc_drv.c 	if (t == 0)
t                1479 drivers/scsi/storvsc_drv.c 	int ret, t;
t                1504 drivers/scsi/storvsc_drv.c 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
t                1505 drivers/scsi/storvsc_drv.c 	if (t == 0)
t                 555 drivers/scsi/sym53c8xx_2/sym_glue.c static void sym53c8xx_timer(struct timer_list *t)
t                 557 drivers/scsi/sym53c8xx_2/sym_glue.c 	struct sym_hcb *np = from_timer(np, t, s.timer);
t                 897 drivers/scsi/sym53c8xx_2/sym_glue.c 	int t, l;
t                 916 drivers/scsi/sym53c8xx_2/sym_glue.c 		for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
t                 917 drivers/scsi/sym53c8xx_2/sym_glue.c 			if (!((uc->target >> t) & 1))
t                 919 drivers/scsi/sym53c8xx_2/sym_glue.c 			tp = &np->target[t];
t                 261 drivers/scsi/ufs/ufshci.h #define UIC_ARG_ATTR_TYPE(t)		(((t) & 0xFF) << 16)
t                 306 drivers/scsi/ufs/ufshci.h #define INT_AGGR_TIMEOUT_VAL(t)		(((t) & 0xFF) << 0)
t                  73 drivers/soc/fsl/qbman/qman_test_api.c 	u64 t = qm_fd_addr_get64(fd);
t                  74 drivers/soc/fsl/qbman/qman_test_api.c 	int z = t >> 40;
t                  78 drivers/soc/fsl/qbman/qman_test_api.c 	t <<= 1;
t                  80 drivers/soc/fsl/qbman/qman_test_api.c 		t |= 1;
t                  81 drivers/soc/fsl/qbman/qman_test_api.c 	qm_fd_addr_set64(fd, t);
t                  18 drivers/soc/mediatek/mtk-cmdq-helper.c static void cmdq_client_timeout(struct timer_list *t)
t                  20 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = from_timer(client, t, timer);
t                 116 drivers/spi/spi-altera.c 	struct spi_device *spi, struct spi_transfer *t)
t                 120 drivers/spi/spi-altera.c 	hw->tx = t->tx_buf;
t                 121 drivers/spi/spi-altera.c 	hw->rx = t->rx_buf;
t                 123 drivers/spi/spi-altera.c 	hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
t                 124 drivers/spi/spi-altera.c 	hw->len = t->len / hw->bytes_per_word;
t                 146 drivers/spi/spi-altera.c 	return t->len;
t                  54 drivers/spi/spi-au1550.c 	int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
t                 221 drivers/spi/spi-au1550.c static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
t                 227 drivers/spi/spi-au1550.c 	if (t) {
t                 228 drivers/spi/spi-au1550.c 		bpw = t->bits_per_word;
t                 229 drivers/spi/spi-au1550.c 		hz = t->speed_hz;
t                 305 drivers/spi/spi-au1550.c static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
t                 312 drivers/spi/spi-au1550.c 	hw->len = t->len;
t                 316 drivers/spi/spi-au1550.c 	hw->tx = t->tx_buf;
t                 317 drivers/spi/spi-au1550.c 	hw->rx = t->rx_buf;
t                 318 drivers/spi/spi-au1550.c 	dma_tx_addr = t->tx_dma;
t                 319 drivers/spi/spi-au1550.c 	dma_rx_addr = t->rx_dma;
t                 329 drivers/spi/spi-au1550.c 	if (t->tx_buf) {
t                 330 drivers/spi/spi-au1550.c 		if (t->tx_dma == 0) {	/* if DMA_ADDR_INVALID, map it */
t                 332 drivers/spi/spi-au1550.c 					(void *)t->tx_buf,
t                 333 drivers/spi/spi-au1550.c 					t->len, DMA_TO_DEVICE);
t                 339 drivers/spi/spi-au1550.c 	if (t->rx_buf) {
t                 340 drivers/spi/spi-au1550.c 		if (t->rx_dma == 0) {	/* if DMA_ADDR_INVALID, map it */
t                 342 drivers/spi/spi-au1550.c 					(void *)t->rx_buf,
t                 343 drivers/spi/spi-au1550.c 					t->len, DMA_FROM_DEVICE);
t                 348 drivers/spi/spi-au1550.c 		if (t->len > hw->dma_rx_tmpbuf_size) {
t                 352 drivers/spi/spi-au1550.c 			ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len,
t                 360 drivers/spi/spi-au1550.c 			t->len, DMA_FROM_DEVICE);
t                 363 drivers/spi/spi-au1550.c 	if (!t->tx_buf) {
t                 365 drivers/spi/spi-au1550.c 				t->len, DMA_BIDIRECTIONAL);
t                 371 drivers/spi/spi-au1550.c 				    t->len, DDMA_FLAGS_IE);
t                 376 drivers/spi/spi-au1550.c 				      t->len, DDMA_FLAGS_IE);
t                 396 drivers/spi/spi-au1550.c 	if (!t->rx_buf) {
t                 398 drivers/spi/spi-au1550.c 		dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len,
t                 402 drivers/spi/spi-au1550.c 	if (t->rx_buf && t->rx_dma == 0 )
t                 403 drivers/spi/spi-au1550.c 		dma_unmap_single(hw->dev, dma_rx_addr, t->len,
t                 405 drivers/spi/spi-au1550.c 	if (t->tx_buf && t->tx_dma == 0 )
t                 406 drivers/spi/spi-au1550.c 		dma_unmap_single(hw->dev, dma_tx_addr, t->len,
t                 503 drivers/spi/spi-au1550.c static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
t                 508 drivers/spi/spi-au1550.c 	hw->tx = t->tx_buf;
t                 509 drivers/spi/spi-au1550.c 	hw->rx = t->rx_buf;
t                 510 drivers/spi/spi-au1550.c 	hw->len = t->len;
t                 636 drivers/spi/spi-au1550.c static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 639 drivers/spi/spi-au1550.c 	return hw->txrx_bufs(spi, t);
t                 169 drivers/spi/spi-axi-spi-engine.c 	unsigned int t;
t                 174 drivers/spi/spi-axi-spi-engine.c 	t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
t                 175 drivers/spi/spi-axi-spi-engine.c 	while (t) {
t                 176 drivers/spi/spi-axi-spi-engine.c 		unsigned int n = min(t, 256U);
t                 179 drivers/spi/spi-axi-spi-engine.c 		t -= n;
t                 910 drivers/spi/spi-bcm-qspi.c 	struct spi_transfer t[2];
t                 915 drivers/spi/spi-bcm-qspi.c 	memset(t, 0, sizeof(t));
t                 923 drivers/spi/spi-bcm-qspi.c 	t[0].tx_buf = cmd;
t                 924 drivers/spi/spi-bcm-qspi.c 	t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
t                 925 drivers/spi/spi-bcm-qspi.c 	t[0].bits_per_word = spi->bits_per_word;
t                 926 drivers/spi/spi-bcm-qspi.c 	t[0].tx_nbits = op->cmd.buswidth;
t                 929 drivers/spi/spi-bcm-qspi.c 	ret = bcm_qspi_transfer_one(master, spi, &t[0]);
t                 935 drivers/spi/spi-bcm-qspi.c 		t[1].rx_buf = op->data.buf.in;
t                 936 drivers/spi/spi-bcm-qspi.c 		t[1].len = op->data.nbytes;
t                 937 drivers/spi/spi-bcm-qspi.c 		t[1].rx_nbits =  op->data.buswidth;
t                 938 drivers/spi/spi-bcm-qspi.c 		t[1].bits_per_word = spi->bits_per_word;
t                 939 drivers/spi/spi-bcm-qspi.c 		ret = bcm_qspi_transfer_one(master, spi, &t[1]);
t                 155 drivers/spi/spi-bcm63xx-hsspi.c static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
t                 160 drivers/spi/spi-bcm63xx-hsspi.c 	int pending = t->len;
t                 162 drivers/spi/spi-bcm63xx-hsspi.c 	const u8 *tx = t->tx_buf;
t                 163 drivers/spi/spi-bcm63xx-hsspi.c 	u8 *rx = t->rx_buf;
t                 165 drivers/spi/spi-bcm63xx-hsspi.c 	bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
t                 178 drivers/spi/spi-bcm63xx-hsspi.c 	if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
t                 179 drivers/spi/spi-bcm63xx-hsspi.c 	    (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
t                 264 drivers/spi/spi-bcm63xx-hsspi.c 	struct spi_transfer *t;
t                 287 drivers/spi/spi-bcm63xx-hsspi.c 	list_for_each_entry(t, &msg->transfers, transfer_list) {
t                 288 drivers/spi/spi-bcm63xx-hsspi.c 		status = bcm63xx_hsspi_do_txrx(spi, t);
t                 292 drivers/spi/spi-bcm63xx-hsspi.c 		msg->actual_length += t->len;
t                 294 drivers/spi/spi-bcm63xx-hsspi.c 		if (t->delay_usecs)
t                 295 drivers/spi/spi-bcm63xx-hsspi.c 			udelay(t->delay_usecs);
t                 297 drivers/spi/spi-bcm63xx-hsspi.c 		if (t->cs_change)
t                 196 drivers/spi/spi-bcm63xx.c 				      struct spi_transfer *t)
t                 207 drivers/spi/spi-bcm63xx.c 		if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) {
t                 220 drivers/spi/spi-bcm63xx.c 		clk_cfg, t->speed_hz);
t                 233 drivers/spi/spi-bcm63xx.c 	struct spi_transfer *t = first;
t                 241 drivers/spi/spi-bcm63xx.c 		t->tx_buf, t->rx_buf, t->len);
t                 243 drivers/spi/spi-bcm63xx.c 	if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
t                 244 drivers/spi/spi-bcm63xx.c 		prepend_len = t->len;
t                 248 drivers/spi/spi-bcm63xx.c 		if (t->tx_buf) {
t                 250 drivers/spi/spi-bcm63xx.c 			memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
t                 253 drivers/spi/spi-bcm63xx.c 			if (t != first)
t                 257 drivers/spi/spi-bcm63xx.c 		if (t->rx_buf) {
t                 260 drivers/spi/spi-bcm63xx.c 			if (t == first)
t                 264 drivers/spi/spi-bcm63xx.c 		len += t->len;
t                 266 drivers/spi/spi-bcm63xx.c 		t = list_entry(t->transfer_list.next, struct spi_transfer,
t                 308 drivers/spi/spi-bcm63xx.c 	t = first;
t                 311 drivers/spi/spi-bcm63xx.c 		if (t->rx_buf)
t                 312 drivers/spi/spi-bcm63xx.c 			memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
t                 314 drivers/spi/spi-bcm63xx.c 		if (t != first || prepend_len == 0)
t                 315 drivers/spi/spi-bcm63xx.c 			len += t->len;
t                 317 drivers/spi/spi-bcm63xx.c 		t = list_entry(t->transfer_list.next, struct spi_transfer,
t                 328 drivers/spi/spi-bcm63xx.c 	struct spi_transfer *t, *first = NULL;
t                 340 drivers/spi/spi-bcm63xx.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 342 drivers/spi/spi-bcm63xx.c 			first = t;
t                 345 drivers/spi/spi-bcm63xx.c 		total_len += t->len;
t                 347 drivers/spi/spi-bcm63xx.c 		if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
t                 350 drivers/spi/spi-bcm63xx.c 		else if (can_use_prepend && t->tx_buf)
t                 364 drivers/spi/spi-bcm63xx.c 		if (t->speed_hz != first->speed_hz) {
t                 371 drivers/spi/spi-bcm63xx.c 		if (t->delay_usecs) {
t                 377 drivers/spi/spi-bcm63xx.c 		if (t->cs_change ||
t                 378 drivers/spi/spi-bcm63xx.c 		    list_is_last(&t->transfer_list, &m->transfers)) {
t                  61 drivers/spi/spi-bitbang.c 	struct spi_transfer	*t,
t                  64 drivers/spi/spi-bitbang.c 	unsigned		bits = t->bits_per_word;
t                  65 drivers/spi/spi-bitbang.c 	unsigned		count = t->len;
t                  66 drivers/spi/spi-bitbang.c 	const u8		*tx = t->tx_buf;
t                  67 drivers/spi/spi-bitbang.c 	u8			*rx = t->rx_buf;
t                  79 drivers/spi/spi-bitbang.c 	return t->len - count;
t                  89 drivers/spi/spi-bitbang.c 	struct spi_transfer	*t,
t                  92 drivers/spi/spi-bitbang.c 	unsigned		bits = t->bits_per_word;
t                  93 drivers/spi/spi-bitbang.c 	unsigned		count = t->len;
t                  94 drivers/spi/spi-bitbang.c 	const u16		*tx = t->tx_buf;
t                  95 drivers/spi/spi-bitbang.c 	u16			*rx = t->rx_buf;
t                 107 drivers/spi/spi-bitbang.c 	return t->len - count;
t                 117 drivers/spi/spi-bitbang.c 	struct spi_transfer	*t,
t                 120 drivers/spi/spi-bitbang.c 	unsigned		bits = t->bits_per_word;
t                 121 drivers/spi/spi-bitbang.c 	unsigned		count = t->len;
t                 122 drivers/spi/spi-bitbang.c 	const u32		*tx = t->tx_buf;
t                 123 drivers/spi/spi-bitbang.c 	u32			*rx = t->rx_buf;
t                 135 drivers/spi/spi-bitbang.c 	return t->len - count;
t                 138 drivers/spi/spi-bitbang.c int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
t                 144 drivers/spi/spi-bitbang.c 	if (t) {
t                 145 drivers/spi/spi-bitbang.c 		bits_per_word = t->bits_per_word;
t                 146 drivers/spi/spi-bitbang.c 		hz = t->speed_hz;
t                 220 drivers/spi/spi-bitbang.c static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 230 drivers/spi/spi-bitbang.c 		err = bitbang->set_line_direction(spi, !!(t->tx_buf));
t                 238 drivers/spi/spi-bitbang.c 		flags = t->tx_buf ? SPI_MASTER_NO_RX : SPI_MASTER_NO_TX;
t                 239 drivers/spi/spi-bitbang.c 		return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
t                 241 drivers/spi/spi-bitbang.c 	return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0);
t                 300 drivers/spi/spi-coldfire-qspi.c 				struct spi_transfer *t)
t                 305 drivers/spi/spi-coldfire-qspi.c 	qmr |= t->bits_per_word << 10;
t                 310 drivers/spi/spi-coldfire-qspi.c 	qmr |= mcfqspi_qmr_baud(t->speed_hz);
t                 314 drivers/spi/spi-coldfire-qspi.c 	if (t->bits_per_word == 8)
t                 315 drivers/spi/spi-coldfire-qspi.c 		mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
t                 317 drivers/spi/spi-coldfire-qspi.c 		mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
t                 318 drivers/spi/spi-coldfire-qspi.c 				       t->rx_buf);
t                 271 drivers/spi/spi-davinci.c 		struct spi_transfer *t)
t                 285 drivers/spi/spi-davinci.c 	if (t) {
t                 286 drivers/spi/spi-davinci.c 		bits_per_word = t->bits_per_word;
t                 287 drivers/spi/spi-davinci.c 		hz = t->speed_hz;
t                 571 drivers/spi/spi-davinci.c static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 590 drivers/spi/spi-davinci.c 	dspi->tx = t->tx_buf;
t                 591 drivers/spi/spi-davinci.c 	dspi->rx = t->rx_buf;
t                 592 drivers/spi/spi-davinci.c 	dspi->wcount = t->len / data_type;
t                 602 drivers/spi/spi-davinci.c 	if (!davinci_spi_can_dma(spi->master, spi, t)) {
t                 631 drivers/spi/spi-davinci.c 				t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM,
t                 636 drivers/spi/spi-davinci.c 		if (!t->tx_buf) {
t                 642 drivers/spi/spi-davinci.c 			t->tx_sg.sgl = t->rx_sg.sgl;
t                 643 drivers/spi/spi-davinci.c 			t->tx_sg.nents = t->rx_sg.nents;
t                 647 drivers/spi/spi-davinci.c 				t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV,
t                 683 drivers/spi/spi-davinci.c 	if (davinci_spi_can_dma(spi->master, spi, t))
t                 705 drivers/spi/spi-davinci.c 	return t->len;
t                 114 drivers/spi/spi-efm32.c 		struct spi_transfer *t)
t                 118 drivers/spi/spi-efm32.c 	unsigned bpw = t->bits_per_word ?: spi->bits_per_word;
t                 119 drivers/spi/spi-efm32.c 	unsigned speed = t->speed_hz ?: spi->max_speed_hz;
t                 181 drivers/spi/spi-efm32.c static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 191 drivers/spi/spi-efm32.c 	ddata->tx_buf = t->tx_buf;
t                 192 drivers/spi/spi-efm32.c 	ddata->rx_buf = t->rx_buf;
t                 194 drivers/spi/spi-efm32.c 		t->len * DIV_ROUND_UP(t->bits_per_word, 8);
t                 208 drivers/spi/spi-efm32.c 	ret = t->len - max(ddata->tx_len, ddata->rx_len);
t                  97 drivers/spi/spi-falcon.c int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t,
t                 102 drivers/spi/spi-falcon.c 	const u8 *txp = t->tx_buf;
t                 103 drivers/spi/spi-falcon.c 	u8 *rxp = t->rx_buf;
t                 104 drivers/spi/spi-falcon.c 	unsigned int bytelen = ((8 * t->len + 7) / 8);
t                 358 drivers/spi/spi-falcon.c 	struct spi_transfer *t;
t                 367 drivers/spi/spi-falcon.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 368 drivers/spi/spi-falcon.c 		if (list_is_last(&t->transfer_list, &m->transfers))
t                 372 drivers/spi/spi-falcon.c 		ret = falcon_sflash_xfer(m->spi, t, spi_flags);
t                 378 drivers/spi/spi-falcon.c 		m->actual_length += t->len;
t                 380 drivers/spi/spi-falcon.c 		WARN_ON(t->delay_usecs || t->cs_change);
t                 101 drivers/spi/spi-fsl-cpm.c 		     struct spi_transfer *t, bool is_dma_mapped)
t                 114 drivers/spi/spi-fsl-cpm.c 	if (!t->tx_buf) {
t                 119 drivers/spi/spi-fsl-cpm.c 	if (!t->rx_buf) {
t                 127 drivers/spi/spi-fsl-cpm.c 		mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
t                 133 drivers/spi/spi-fsl-cpm.c 	} else if (t->tx_buf) {
t                 134 drivers/spi/spi-fsl-cpm.c 		mspi->tx_dma = t->tx_dma;
t                 138 drivers/spi/spi-fsl-cpm.c 		mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
t                 144 drivers/spi/spi-fsl-cpm.c 	} else if (t->rx_buf) {
t                 145 drivers/spi/spi-fsl-cpm.c 		mspi->rx_dma = t->rx_dma;
t                 151 drivers/spi/spi-fsl-cpm.c 	mspi->xfer_in_progress = t;
t                 152 drivers/spi/spi-fsl-cpm.c 	mspi->count = t->len;
t                 161 drivers/spi/spi-fsl-cpm.c 		dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
t                 169 drivers/spi/spi-fsl-cpm.c 	struct spi_transfer *t = mspi->xfer_in_progress;
t                 172 drivers/spi/spi-fsl-cpm.c 		dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
t                 174 drivers/spi/spi-fsl-cpm.c 		dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
t                  23 drivers/spi/spi-fsl-cpm.h 			    struct spi_transfer *t, bool is_dma_mapped);
t                  31 drivers/spi/spi-fsl-cpm.h 				   struct spi_transfer *t,
t                 152 drivers/spi/spi-fsl-espi.c 	struct spi_transfer *t, *first;
t                 163 drivers/spi/spi-fsl-espi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 164 drivers/spi/spi-fsl-espi.c 		if (first->bits_per_word != t->bits_per_word ||
t                 165 drivers/spi/spi-fsl-espi.c 		    first->speed_hz != t->speed_hz) {
t                 185 drivers/spi/spi-fsl-espi.c 	struct spi_transfer *t;
t                 198 drivers/spi/spi-fsl-espi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 200 drivers/spi/spi-fsl-espi.c 			if (!t->tx_buf || t->rx_buf ||
t                 201 drivers/spi/spi-fsl-espi.c 			    t->len > FSL_ESPI_FIFO_SIZE)
t                 203 drivers/spi/spi-fsl-espi.c 			rxskip = t->len;
t                 205 drivers/spi/spi-fsl-espi.c 			if (t->tx_buf || !t->rx_buf)
t                 324 drivers/spi/spi-fsl-espi.c 					struct spi_transfer *t)
t                 327 drivers/spi/spi-fsl-espi.c 	int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
t                 328 drivers/spi/spi-fsl-espi.c 	u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
t                 352 drivers/spi/spi-fsl-espi.c static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 355 drivers/spi/spi-fsl-espi.c 	unsigned int rx_len = t->len;
t                 363 drivers/spi/spi-fsl-espi.c 	spcom |= SPCOM_TRANLEN(t->len - 1);
t                 368 drivers/spi/spi-fsl-espi.c 		rx_len = t->len - espi->rxskip;
t                 369 drivers/spi/spi-fsl-espi.c 		if (t->rx_nbits == SPI_NBITS_DUAL)
t                 440 drivers/spi/spi-fsl-espi.c 	struct spi_transfer *t, trans = {};
t                 447 drivers/spi/spi-fsl-espi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 448 drivers/spi/spi-fsl-espi.c 		if (t->delay_usecs > delay_usecs)
t                 449 drivers/spi/spi-fsl-espi.c 			delay_usecs = t->delay_usecs;
t                 450 drivers/spi/spi-fsl-espi.c 		if (t->rx_nbits > rx_nbits)
t                 451 drivers/spi/spi-fsl-espi.c 			rx_nbits = t->rx_nbits;
t                 454 drivers/spi/spi-fsl-espi.c 	t = list_first_entry(&m->transfers, struct spi_transfer,
t                 458 drivers/spi/spi-fsl-espi.c 	trans.speed_hz = t->speed_hz;
t                 459 drivers/spi/spi-fsl-espi.c 	trans.bits_per_word = t->bits_per_word;
t                 107 drivers/spi/spi-fsl-lib.h 		struct spi_transfer *t, unsigned int len);
t                 436 drivers/spi/spi-fsl-lpspi.c 				     struct spi_transfer *t)
t                 441 drivers/spi/spi-fsl-lpspi.c 	if (t == NULL)
t                 445 drivers/spi/spi-fsl-lpspi.c 	fsl_lpspi->config.bpw = t->bits_per_word;
t                 446 drivers/spi/spi-fsl-lpspi.c 	fsl_lpspi->config.speed_hz = t->speed_hz;
t                 466 drivers/spi/spi-fsl-lpspi.c 	if (t->len <= fsl_lpspi->txfifosize)
t                 467 drivers/spi/spi-fsl-lpspi.c 		fsl_lpspi->watermark = t->len;
t                 471 drivers/spi/spi-fsl-lpspi.c 	if (fsl_lpspi_can_dma(controller, spi, t))
t                 707 drivers/spi/spi-fsl-lpspi.c 				  struct spi_transfer *t)
t                 713 drivers/spi/spi-fsl-lpspi.c 	fsl_lpspi->tx_buf = t->tx_buf;
t                 714 drivers/spi/spi-fsl-lpspi.c 	fsl_lpspi->rx_buf = t->rx_buf;
t                 715 drivers/spi/spi-fsl-lpspi.c 	fsl_lpspi->remain = t->len;
t                 733 drivers/spi/spi-fsl-lpspi.c 				  struct spi_transfer *t)
t                 740 drivers/spi/spi-fsl-lpspi.c 	ret = fsl_lpspi_setup_transfer(controller, spi, t);
t                 748 drivers/spi/spi-fsl-lpspi.c 		ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
t                 750 drivers/spi/spi-fsl-lpspi.c 		ret = fsl_lpspi_pio_transfer(controller, t);
t                 226 drivers/spi/spi-fsl-spi.c 					struct spi_transfer *t)
t                 236 drivers/spi/spi-fsl-spi.c 	if (t) {
t                 237 drivers/spi/spi-fsl-spi.c 		bits_per_word = t->bits_per_word;
t                 238 drivers/spi/spi-fsl-spi.c 		hz = t->speed_hz;
t                 291 drivers/spi/spi-fsl-spi.c 				struct spi_transfer *t, unsigned int len)
t                 308 drivers/spi/spi-fsl-spi.c static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
t                 313 drivers/spi/spi-fsl-spi.c 	unsigned int len = t->len;
t                 319 drivers/spi/spi-fsl-spi.c 	if (t->bits_per_word)
t                 320 drivers/spi/spi-fsl-spi.c 		bits_per_word = t->bits_per_word;
t                 335 drivers/spi/spi-fsl-spi.c 	mpc8xxx_spi->tx = t->tx_buf;
t                 336 drivers/spi/spi-fsl-spi.c 	mpc8xxx_spi->rx = t->rx_buf;
t                 341 drivers/spi/spi-fsl-spi.c 		ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
t                 343 drivers/spi/spi-fsl-spi.c 		ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
t                 363 drivers/spi/spi-fsl-spi.c 	struct spi_transfer *t, *first;
t                 373 drivers/spi/spi-fsl-spi.c 		list_for_each_entry(t, &m->transfers, transfer_list) {
t                 374 drivers/spi/spi-fsl-spi.c 			if (t->len < 256 || t->bits_per_word != 8)
t                 376 drivers/spi/spi-fsl-spi.c 			if ((t->len & 3) == 0)
t                 377 drivers/spi/spi-fsl-spi.c 				t->bits_per_word = 32;
t                 378 drivers/spi/spi-fsl-spi.c 			else if ((t->len & 1) == 0)
t                 379 drivers/spi/spi-fsl-spi.c 				t->bits_per_word = 16;
t                 385 drivers/spi/spi-fsl-spi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 387 drivers/spi/spi-fsl-spi.c 			first = t;
t                 388 drivers/spi/spi-fsl-spi.c 		cs_change = t->cs_change;
t                 389 drivers/spi/spi-fsl-spi.c 		if (first->speed_hz != t->speed_hz) {
t                 399 drivers/spi/spi-fsl-spi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 400 drivers/spi/spi-fsl-spi.c 		if (cs_change || last_bpw != t->bits_per_word)
t                 401 drivers/spi/spi-fsl-spi.c 			status = fsl_spi_setup_transfer(spi, t);
t                 404 drivers/spi/spi-fsl-spi.c 		last_bpw = t->bits_per_word;
t                 410 drivers/spi/spi-fsl-spi.c 		cs_change = t->cs_change;
t                 411 drivers/spi/spi-fsl-spi.c 		if (t->len)
t                 412 drivers/spi/spi-fsl-spi.c 			status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
t                 417 drivers/spi/spi-fsl-spi.c 		m->actual_length += t->len;
t                 419 drivers/spi/spi-fsl-spi.c 		if (t->delay_usecs)
t                 420 drivers/spi/spi-fsl-spi.c 			udelay(t->delay_usecs);
t                 568 drivers/spi/spi-imx.c 				       struct spi_transfer *t)
t                 571 drivers/spi/spi-imx.c 	u32 clk = t->speed_hz, delay;
t                 585 drivers/spi/spi-imx.c 	ctrl |= mx51_ecspi_clkdiv(spi_imx, t->speed_hz, &clk);
t                 698 drivers/spi/spi-imx.c 				 struct spi_transfer *t)
t                 703 drivers/spi/spi-imx.c 	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
t                 803 drivers/spi/spi-imx.c 				 struct spi_transfer *t)
t                 809 drivers/spi/spi-imx.c 	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, t->speed_hz, max, &clk)
t                 879 drivers/spi/spi-imx.c 				struct spi_transfer *t)
t                 884 drivers/spi/spi-imx.c 	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
t                1195 drivers/spi/spi-imx.c 				 struct spi_transfer *t)
t                1199 drivers/spi/spi-imx.c 	if (!t)
t                1202 drivers/spi/spi-imx.c 	spi_imx->bits_per_word = t->bits_per_word;
t                1232 drivers/spi/spi-imx.c 	if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
t                1240 drivers/spi/spi-imx.c 		spi_imx->slave_burst = t->len;
t                1243 drivers/spi/spi-imx.c 	spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
t                  97 drivers/spi/spi-jcore.c 			  struct spi_transfer *t)
t                 111 drivers/spi/spi-jcore.c 	jcore_spi_baudrate(hw, t->speed_hz);
t                 114 drivers/spi/spi-jcore.c 	tx = t->tx_buf;
t                 115 drivers/spi/spi-jcore.c 	rx = t->rx_buf;
t                 116 drivers/spi/spi-jcore.c 	len = t->len;
t                 430 drivers/spi/spi-lantiq-ssc.c 			      struct spi_device *spidev, struct spi_transfer *t)
t                 432 drivers/spi/spi-lantiq-ssc.c 	unsigned int speed_hz = t->speed_hz;
t                 433 drivers/spi/spi-lantiq-ssc.c 	unsigned int bits_per_word = t->bits_per_word;
t                 449 drivers/spi/spi-lantiq-ssc.c 	if (t->tx_buf)
t                 454 drivers/spi/spi-lantiq-ssc.c 	if (t->rx_buf)
t                 679 drivers/spi/spi-lantiq-ssc.c 			  struct spi_transfer *t)
t                 685 drivers/spi/spi-lantiq-ssc.c 	spi->tx = t->tx_buf;
t                 686 drivers/spi/spi-lantiq-ssc.c 	spi->rx = t->rx_buf;
t                 688 drivers/spi/spi-lantiq-ssc.c 	if (t->tx_buf) {
t                 689 drivers/spi/spi-lantiq-ssc.c 		spi->tx_todo = t->len;
t                 696 drivers/spi/spi-lantiq-ssc.c 		spi->rx_todo = t->len;
t                 705 drivers/spi/spi-lantiq-ssc.c 	return t->len;
t                 769 drivers/spi/spi-lantiq-ssc.c 				   struct spi_transfer *t)
t                 773 drivers/spi/spi-lantiq-ssc.c 	hw_setup_transfer(spi, spidev, t);
t                 775 drivers/spi/spi-lantiq-ssc.c 	return transfer_start(spi, spidev, t);
t                 101 drivers/spi/spi-lp8841-rtc.c 			    struct spi_transfer *t)
t                 104 drivers/spi/spi-lp8841-rtc.c 	unsigned		count = t->len;
t                 105 drivers/spi/spi-lp8841-rtc.c 	const u8		*tx = t->tx_buf;
t                 106 drivers/spi/spi-lp8841-rtc.c 	u8			*rx = t->rx_buf;
t                  80 drivers/spi/spi-mpc512x-psc.c 					  struct spi_transfer *t)
t                  84 drivers/spi/spi-mpc512x-psc.c 	cs->speed_hz = (t && t->speed_hz)
t                  85 drivers/spi/spi-mpc512x-psc.c 	    ? t->speed_hz : spi->max_speed_hz;
t                  86 drivers/spi/spi-mpc512x-psc.c 	cs->bits_per_word = (t && t->bits_per_word)
t                  87 drivers/spi/spi-mpc512x-psc.c 	    ? t->bits_per_word : spi->bits_per_word;
t                 150 drivers/spi/spi-mpc512x-psc.c 					 struct spi_transfer *t)
t                 154 drivers/spi/spi-mpc512x-psc.c 	size_t tx_len = t->len;
t                 155 drivers/spi/spi-mpc512x-psc.c 	size_t rx_len = t->len;
t                 156 drivers/spi/spi-mpc512x-psc.c 	u8 *tx_buf = (u8 *)t->tx_buf;
t                 157 drivers/spi/spi-mpc512x-psc.c 	u8 *rx_buf = (u8 *)t->rx_buf;
t                 159 drivers/spi/spi-mpc512x-psc.c 	if (!tx_buf && !rx_buf && t->len)
t                 183 drivers/spi/spi-mpc512x-psc.c 				if (tx_len == EOFBYTE && t->cs_change)
t                 295 drivers/spi/spi-mpc512x-psc.c 	struct spi_transfer *t;
t                 300 drivers/spi/spi-mpc512x-psc.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 301 drivers/spi/spi-mpc512x-psc.c 		status = mpc512x_psc_spi_transfer_setup(spi, t);
t                 307 drivers/spi/spi-mpc512x-psc.c 		cs_change = t->cs_change;
t                 309 drivers/spi/spi-mpc512x-psc.c 		status = mpc512x_psc_spi_transfer_rxtx(spi, t);
t                 312 drivers/spi/spi-mpc512x-psc.c 		m->actual_length += t->len;
t                 314 drivers/spi/spi-mpc512x-psc.c 		if (t->delay_usecs)
t                 315 drivers/spi/spi-mpc512x-psc.c 			udelay(t->delay_usecs);
t                  59 drivers/spi/spi-mpc52xx-psc.c 		struct spi_transfer *t)
t                  63 drivers/spi/spi-mpc52xx-psc.c 	cs->speed_hz = (t && t->speed_hz)
t                  64 drivers/spi/spi-mpc52xx-psc.c 			? t->speed_hz : spi->max_speed_hz;
t                  65 drivers/spi/spi-mpc52xx-psc.c 	cs->bits_per_word = (t && t->bits_per_word)
t                  66 drivers/spi/spi-mpc52xx-psc.c 			? t->bits_per_word : spi->bits_per_word;
t                 127 drivers/spi/spi-mpc52xx-psc.c 						struct spi_transfer *t)
t                 134 drivers/spi/spi-mpc52xx-psc.c 	unsigned char *rx_buf = (unsigned char *)t->rx_buf;
t                 135 drivers/spi/spi-mpc52xx-psc.c 	unsigned char *tx_buf = (unsigned char *)t->tx_buf;
t                 141 drivers/spi/spi-mpc52xx-psc.c 	if (!t->tx_buf && !t->rx_buf && t->len)
t                 146 drivers/spi/spi-mpc52xx-psc.c 	while (rb < t->len) {
t                 147 drivers/spi/spi-mpc52xx-psc.c 		if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
t                 151 drivers/spi/spi-mpc52xx-psc.c 			send_at_once = t->len - sb;
t                 152 drivers/spi/spi-mpc52xx-psc.c 			rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
t                 174 drivers/spi/spi-mpc52xx-psc.c 		if (t->len - rb == 1) {
t                 210 drivers/spi/spi-mpc52xx-psc.c 		struct spi_transfer *t = NULL;
t                 221 drivers/spi/spi-mpc52xx-psc.c 		list_for_each_entry (t, &m->transfers, transfer_list) {
t                 222 drivers/spi/spi-mpc52xx-psc.c 			if (t->bits_per_word || t->speed_hz) {
t                 223 drivers/spi/spi-mpc52xx-psc.c 				status = mpc52xx_psc_spi_transfer_setup(spi, t);
t                 230 drivers/spi/spi-mpc52xx-psc.c 			cs_change = t->cs_change;
t                 232 drivers/spi/spi-mpc52xx-psc.c 			status = mpc52xx_psc_spi_transfer_rxtx(spi, t);
t                 235 drivers/spi/spi-mpc52xx-psc.c 			m->actual_length += t->len;
t                 237 drivers/spi/spi-mpc52xx-psc.c 			if (t->delay_usecs)
t                 238 drivers/spi/spi-mpc52xx-psc.c 				udelay(t->delay_usecs);
t                 253 drivers/spi/spi-mt7621.c 	struct spi_transfer *t = NULL;
t                 258 drivers/spi/spi-mt7621.c 	list_for_each_entry(t, &m->transfers, transfer_list)
t                 259 drivers/spi/spi-mt7621.c 		if (t->speed_hz < speed)
t                 260 drivers/spi/spi-mt7621.c 			speed = t->speed_hz;
t                 271 drivers/spi/spi-mt7621.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 272 drivers/spi/spi-mt7621.c 		if ((t->rx_buf) && (t->tx_buf)) {
t                 282 drivers/spi/spi-mt7621.c 		} else if (t->rx_buf) {
t                 283 drivers/spi/spi-mt7621.c 			mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf);
t                 284 drivers/spi/spi-mt7621.c 		} else if (t->tx_buf) {
t                 285 drivers/spi/spi-mt7621.c 			mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf);
t                 287 drivers/spi/spi-mt7621.c 		m->actual_length += t->len;
t                  25 drivers/spi/spi-mxic.c #define HC_CFG_TYPE(s, t)	((t) << (23 + ((s) * 2)))
t                 450 drivers/spi/spi-mxic.c 				 struct spi_transfer *t)
t                 456 drivers/spi/spi-mxic.c 	if (t->rx_buf && t->tx_buf) {
t                 464 drivers/spi/spi-mxic.c 	ret = mxic_spi_set_freq(mxic, t->speed_hz);
t                 468 drivers/spi/spi-mxic.c 	if (t->tx_buf) {
t                 473 drivers/spi/spi-mxic.c 	} else if (t->rx_buf) {
t                 481 drivers/spi/spi-mxic.c 	       OP_DATA_BUSW(busw) | (t->rx_buf ? OP_READ : 0),
t                 484 drivers/spi/spi-mxic.c 	ret = mxic_spi_data_xfer(mxic, t->tx_buf, t->rx_buf, t->len);
t                  65 drivers/spi/spi-mxs.c 				  const struct spi_transfer *t)
t                  69 drivers/spi/spi-mxs.c 	const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
t                 366 drivers/spi/spi-mxs.c 	struct spi_transfer *t;
t                 376 drivers/spi/spi-mxs.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 378 drivers/spi/spi-mxs.c 		trace_spi_transfer_start(m, t);
t                 380 drivers/spi/spi-mxs.c 		status = mxs_spi_setup_transfer(m->spi, t);
t                 385 drivers/spi/spi-mxs.c 		flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
t                 397 drivers/spi/spi-mxs.c 		if (t->len < 32) {
t                 402 drivers/spi/spi-mxs.c 			if (t->tx_buf)
t                 404 drivers/spi/spi-mxs.c 						(void *)t->tx_buf,
t                 405 drivers/spi/spi-mxs.c 						t->len, flag | TXRX_WRITE);
t                 406 drivers/spi/spi-mxs.c 			if (t->rx_buf)
t                 408 drivers/spi/spi-mxs.c 						t->rx_buf, t->len,
t                 415 drivers/spi/spi-mxs.c 			if (t->tx_buf)
t                 417 drivers/spi/spi-mxs.c 						(void *)t->tx_buf, t->len,
t                 419 drivers/spi/spi-mxs.c 			if (t->rx_buf)
t                 421 drivers/spi/spi-mxs.c 						t->rx_buf, t->len,
t                 425 drivers/spi/spi-mxs.c 		trace_spi_transfer_stop(m, t);
t                 432 drivers/spi/spi-mxs.c 		m->actual_length += t->len;
t                 167 drivers/spi/spi-npcm-pspi.c 				     struct spi_transfer *t)
t                 171 drivers/spi/spi-npcm-pspi.c 	priv->tx_buf = t->tx_buf;
t                 172 drivers/spi/spi-npcm-pspi.c 	priv->rx_buf = t->rx_buf;
t                 173 drivers/spi/spi-npcm-pspi.c 	priv->tx_bytes = t->len;
t                 174 drivers/spi/spi-npcm-pspi.c 	priv->rx_bytes = t->len;
t                 181 drivers/spi/spi-npcm-pspi.c 	if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
t                 182 drivers/spi/spi-npcm-pspi.c 		npcm_pspi_set_transfer_size(priv, t->bits_per_word);
t                 183 drivers/spi/spi-npcm-pspi.c 		priv->bits_per_word = t->bits_per_word;
t                 186 drivers/spi/spi-npcm-pspi.c 	if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
t                 187 drivers/spi/spi-npcm-pspi.c 		npcm_pspi_set_baudrate(priv, t->speed_hz);
t                 188 drivers/spi/spi-npcm-pspi.c 		priv->speed_hz = t->speed_hz;
t                 249 drivers/spi/spi-npcm-pspi.c 				  struct spi_transfer *t)
t                 254 drivers/spi/spi-npcm-pspi.c 	npcm_pspi_setup_transfer(spi, t);
t                  80 drivers/spi/spi-oc-tiny.c 				   struct spi_transfer *t)
t                  85 drivers/spi/spi-oc-tiny.c 	if (t) {
t                  86 drivers/spi/spi-oc-tiny.c 		if (t->speed_hz && t->speed_hz != hw->speed_hz)
t                  87 drivers/spi/spi-oc-tiny.c 			baud = tiny_spi_baud(spi, t->speed_hz);
t                 120 drivers/spi/spi-oc-tiny.c static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 123 drivers/spi/spi-oc-tiny.c 	const u8 *txp = t->tx_buf;
t                 124 drivers/spi/spi-oc-tiny.c 	u8 *rxp = t->rx_buf;
t                 129 drivers/spi/spi-oc-tiny.c 		hw->len = t->len;
t                 130 drivers/spi/spi-oc-tiny.c 		hw->txp = t->tx_buf;
t                 131 drivers/spi/spi-oc-tiny.c 		hw->rxp = t->rx_buf;
t                 136 drivers/spi/spi-oc-tiny.c 		if (t->len > 1) {
t                 155 drivers/spi/spi-oc-tiny.c 		for (i = 1; i < t->len; i++) {
t                 158 drivers/spi/spi-oc-tiny.c 			if (rxp || (i != t->len - 1))
t                 168 drivers/spi/spi-oc-tiny.c 	return t->len;
t                 234 drivers/spi/spi-omap-100k.c 		struct spi_transfer *t)
t                 240 drivers/spi/spi-omap-100k.c 	if (t != NULL)
t                 241 drivers/spi/spi-omap-100k.c 		word_len = t->bits_per_word;
t                 294 drivers/spi/spi-omap-100k.c 	struct spi_transfer *t = NULL;
t                 298 drivers/spi/spi-omap-100k.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 299 drivers/spi/spi-omap-100k.c 		if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
t                 303 drivers/spi/spi-omap-100k.c 		status = omap1_spi100k_setup_transfer(spi, t);
t                 312 drivers/spi/spi-omap-100k.c 		if (t->len) {
t                 315 drivers/spi/spi-omap-100k.c 			count = omap1_spi100k_txrx_pio(spi, t);
t                 318 drivers/spi/spi-omap-100k.c 			if (count != t->len) {
t                 324 drivers/spi/spi-omap-100k.c 		if (t->delay_usecs)
t                 325 drivers/spi/spi-omap-100k.c 			udelay(t->delay_usecs);
t                 329 drivers/spi/spi-omap-100k.c 		if (t->cs_change) {
t                 205 drivers/spi/spi-omap-uwire.c static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
t                 207 drivers/spi/spi-omap-uwire.c 	unsigned	len = t->len;
t                 208 drivers/spi/spi-omap-uwire.c 	unsigned	bits = t->bits_per_word;
t                 213 drivers/spi/spi-omap-uwire.c 	if (!t->tx_buf && !t->rx_buf)
t                 219 drivers/spi/spi-omap-uwire.c 	if (t->tx_buf) {
t                 220 drivers/spi/spi-omap-uwire.c 		const u8	*buf = t->tx_buf;
t                 266 drivers/spi/spi-omap-uwire.c 	} else if (t->rx_buf) {
t                 267 drivers/spi/spi-omap-uwire.c 		u8		*buf = t->rx_buf;
t                 309 drivers/spi/spi-omap-uwire.c static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
t                 347 drivers/spi/spi-omap-uwire.c 	if (t != NULL)
t                 348 drivers/spi/spi-omap-uwire.c 		hz = t->speed_hz;
t                 294 drivers/spi/spi-omap2-mcspi.c 				struct spi_transfer *t, int enable)
t                 308 drivers/spi/spi-omap2-mcspi.c 		if (t->len % bytes_per_word != 0)
t                 311 drivers/spi/spi-omap2-mcspi.c 		if (t->rx_buf != NULL && t->tx_buf != NULL)
t                 316 drivers/spi/spi-omap2-mcspi.c 		wcnt = t->len / bytes_per_word;
t                 321 drivers/spi/spi-omap2-mcspi.c 		if (t->rx_buf != NULL) {
t                 326 drivers/spi/spi-omap2-mcspi.c 		if (t->tx_buf != NULL) {
t                 339 drivers/spi/spi-omap2-mcspi.c 	if (t->rx_buf != NULL)
t                 342 drivers/spi/spi-omap2-mcspi.c 	if (t->tx_buf != NULL)
t                 893 drivers/spi/spi-omap2-mcspi.c 		struct spi_transfer *t)
t                 903 drivers/spi/spi-omap2-mcspi.c 	if (t != NULL && t->bits_per_word)
t                 904 drivers/spi/spi-omap2-mcspi.c 		word_len = t->bits_per_word;
t                 908 drivers/spi/spi-omap2-mcspi.c 	if (t && t->speed_hz)
t                 909 drivers/spi/spi-omap2-mcspi.c 		speed_hz = t->speed_hz;
t                1130 drivers/spi/spi-omap2-mcspi.c 				    struct spi_transfer *t)
t                1169 drivers/spi/spi-omap2-mcspi.c 	    (t->speed_hz != spi->max_speed_hz) ||
t                1170 drivers/spi/spi-omap2-mcspi.c 	    (t->bits_per_word != spi->bits_per_word)) {
t                1172 drivers/spi/spi-omap2-mcspi.c 		status = omap2_mcspi_setup_transfer(spi, t);
t                1175 drivers/spi/spi-omap2-mcspi.c 		if (t->speed_hz == spi->max_speed_hz &&
t                1176 drivers/spi/spi-omap2-mcspi.c 		    t->bits_per_word == spi->bits_per_word)
t                1191 drivers/spi/spi-omap2-mcspi.c 	if (t->tx_buf == NULL)
t                1193 drivers/spi/spi-omap2-mcspi.c 	else if (t->rx_buf == NULL)
t                1196 drivers/spi/spi-omap2-mcspi.c 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
t                1198 drivers/spi/spi-omap2-mcspi.c 		if (t->len > ((cs->word_len + 7) >> 3))
t                1204 drivers/spi/spi-omap2-mcspi.c 	if (t->len) {
t                1209 drivers/spi/spi-omap2-mcspi.c 		    master->can_dma(master, spi, t))
t                1210 drivers/spi/spi-omap2-mcspi.c 			omap2_mcspi_set_fifo(spi, t, 1);
t                1215 drivers/spi/spi-omap2-mcspi.c 		if (t->tx_buf == NULL)
t                1221 drivers/spi/spi-omap2-mcspi.c 		    master->can_dma(master, spi, t))
t                1222 drivers/spi/spi-omap2-mcspi.c 			count = omap2_mcspi_txrx_dma(spi, t);
t                1224 drivers/spi/spi-omap2-mcspi.c 			count = omap2_mcspi_txrx_pio(spi, t);
t                1226 drivers/spi/spi-omap2-mcspi.c 		if (count != t->len) {
t                1235 drivers/spi/spi-omap2-mcspi.c 		omap2_mcspi_set_fifo(spi, t, 0);
t                1257 drivers/spi/spi-omap2-mcspi.c 	if (mcspi->fifo_depth > 0 && t)
t                1258 drivers/spi/spi-omap2-mcspi.c 		omap2_mcspi_set_fifo(spi, t, 0);
t                 291 drivers/spi/spi-orion.c orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
t                 300 drivers/spi/spi-orion.c 	if ((t != NULL) && t->speed_hz)
t                 301 drivers/spi/spi-orion.c 		speed = t->speed_hz;
t                 303 drivers/spi/spi-orion.c 	if ((t != NULL) && t->bits_per_word)
t                 304 drivers/spi/spi-orion.c 		bits_per_word = t->bits_per_word;
t                 492 drivers/spi/spi-orion.c 					struct spi_transfer *t)
t                 496 drivers/spi/spi-orion.c 	status = orion_spi_setup_transfer(spi, t);
t                 500 drivers/spi/spi-orion.c 	if (t->len)
t                 501 drivers/spi/spi-orion.c 		orion_spi_write_read(spi, t);
t                 142 drivers/spi/spi-ppc4xx.c static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
t                 148 drivers/spi/spi-ppc4xx.c 		t->tx_buf, t->rx_buf, t->len);
t                 152 drivers/spi/spi-ppc4xx.c 	hw->tx = t->tx_buf;
t                 153 drivers/spi/spi-ppc4xx.c 	hw->rx = t->rx_buf;
t                 154 drivers/spi/spi-ppc4xx.c 	hw->len = t->len;
t                 166 drivers/spi/spi-ppc4xx.c static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
t                 183 drivers/spi/spi-ppc4xx.c 	if (t) {
t                 184 drivers/spi/spi-ppc4xx.c 		if (t->bits_per_word)
t                 185 drivers/spi/spi-ppc4xx.c 			bits_per_word = t->bits_per_word;
t                 187 drivers/spi/spi-ppc4xx.c 		if (t->speed_hz)
t                 188 drivers/spi/spi-ppc4xx.c 			speed = min(t->speed_hz, spi->max_speed_hz);
t                  95 drivers/spi/spi-rb4xx.c 			      struct spi_device *spi, struct spi_transfer *t)
t                 116 drivers/spi/spi-rb4xx.c 	tx_buf = t->tx_buf;
t                 117 drivers/spi/spi-rb4xx.c 	rx_buf = t->rx_buf;
t                 118 drivers/spi/spi-rb4xx.c 	for (i = 0; i < t->len; ++i) {
t                 119 drivers/spi/spi-rb4xx.c 		if (t->tx_nbits == SPI_NBITS_DUAL)
t                 116 drivers/spi/spi-s3c24xx.c 				    struct spi_transfer *t)
t                 124 drivers/spi/spi-s3c24xx.c 	hz  = t ? t->speed_hz : spi->max_speed_hz;
t                 160 drivers/spi/spi-s3c24xx.c 				 struct spi_transfer *t)
t                 166 drivers/spi/spi-s3c24xx.c 	ret = s3c24xx_spi_update_state(spi, t);
t                 398 drivers/spi/spi-s3c24xx.c static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
t                 402 drivers/spi/spi-s3c24xx.c 	hw->tx = t->tx_buf;
t                 403 drivers/spi/spi-s3c24xx.c 	hw->rx = t->rx_buf;
t                 404 drivers/spi/spi-s3c24xx.c 	hw->len = t->len;
t                 410 drivers/spi/spi-s3c24xx.c 	if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
t                 117 drivers/spi/spi-s3c64xx.c #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
t                  66 drivers/spi/spi-sc18is602.c 			  struct spi_transfer *t, bool do_transfer)
t                  68 drivers/spi/spi-sc18is602.c 	unsigned int len = t->len;
t                  82 drivers/spi/spi-sc18is602.c 	if (t->tx_buf) {
t                  83 drivers/spi/spi-sc18is602.c 		memcpy(&hw->buffer[hw->tlen], t->tx_buf, len);
t                  85 drivers/spi/spi-sc18is602.c 		if (t->rx_buf)
t                  89 drivers/spi/spi-sc18is602.c 	} else if (t->rx_buf) {
t                 112 drivers/spi/spi-sc18is602.c 		if (t->rx_buf) {
t                 123 drivers/spi/spi-sc18is602.c 			memcpy(t->rx_buf, &hw->buffer[hw->rindex], len);
t                 175 drivers/spi/spi-sc18is602.c 				    struct spi_transfer *t, int tlen)
t                 177 drivers/spi/spi-sc18is602.c 	if (t && t->len + tlen > SC18IS602_BUFSIZ)
t                 188 drivers/spi/spi-sc18is602.c 	struct spi_transfer *t;
t                 192 drivers/spi/spi-sc18is602.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 195 drivers/spi/spi-sc18is602.c 		status = sc18is602_check_transfer(spi, t, hw->tlen);
t                 199 drivers/spi/spi-sc18is602.c 		status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
t                 203 drivers/spi/spi-sc18is602.c 		do_transfer = t->cs_change || list_is_last(&t->transfer_list,
t                 206 drivers/spi/spi-sc18is602.c 		if (t->len) {
t                 207 drivers/spi/spi-sc18is602.c 			status = sc18is602_txrx(hw, m, t, do_transfer);
t                 214 drivers/spi/spi-sc18is602.c 		if (t->delay_usecs)
t                 215 drivers/spi/spi-sc18is602.c 			udelay(t->delay_usecs);
t                  71 drivers/spi/spi-sh-hspi.c 	int t = 256;
t                  73 drivers/spi/spi-sh-hspi.c 	while (t--) {
t                  97 drivers/spi/spi-sh-hspi.c 			  struct spi_transfer *t)
t                 123 drivers/spi/spi-sh-hspi.c 		tmp = abs(t->speed_hz - rate);
t                 136 drivers/spi/spi-sh-hspi.c 	dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
t                 147 drivers/spi/spi-sh-hspi.c 	struct spi_transfer *t;
t                 158 drivers/spi/spi-sh-hspi.c 	list_for_each_entry(t, &msg->transfers, transfer_list) {
t                 161 drivers/spi/spi-sh-hspi.c 			hspi_hw_setup(hspi, msg, t);
t                 165 drivers/spi/spi-sh-hspi.c 		cs_change = t->cs_change;
t                 167 drivers/spi/spi-sh-hspi.c 		for (i = 0; i < t->len; i++) {
t                 175 drivers/spi/spi-sh-hspi.c 			if (t->tx_buf)
t                 176 drivers/spi/spi-sh-hspi.c 				tx = (u32)((u8 *)t->tx_buf)[i];
t                 186 drivers/spi/spi-sh-hspi.c 			if (t->rx_buf)
t                 187 drivers/spi/spi-sh-hspi.c 				((u8 *)t->rx_buf)[i] = (u8)rx;
t                 191 drivers/spi/spi-sh-hspi.c 		msg->actual_length += t->len;
t                 193 drivers/spi/spi-sh-hspi.c 		if (t->delay_usecs)
t                 194 drivers/spi/spi-sh-hspi.c 			udelay(t->delay_usecs);
t                 908 drivers/spi/spi-sh-msiof.c 				 struct spi_transfer *t)
t                 914 drivers/spi/spi-sh-msiof.c 	const void *tx_buf = t->tx_buf;
t                 915 drivers/spi/spi-sh-msiof.c 	void *rx_buf = t->rx_buf;
t                 916 drivers/spi/spi-sh-msiof.c 	unsigned int len = t->len;
t                 917 drivers/spi/spi-sh-msiof.c 	unsigned int bits = t->bits_per_word;
t                 929 drivers/spi/spi-sh-msiof.c 		sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
t                1039 drivers/spi/spi-sh-msiof.c 			bits = t->bits_per_word;
t                 154 drivers/spi/spi-sh.c 			struct spi_transfer *t)
t                 157 drivers/spi/spi-sh.c 	int remain = t->len;
t                 162 drivers/spi/spi-sh.c 	if (t->len)
t                 165 drivers/spi/spi-sh.c 	data = (unsigned char *)t->tx_buf;
t                 200 drivers/spi/spi-sh.c 	if (list_is_last(&t->transfer_list, &mesg->transfers)) {
t                 219 drivers/spi/spi-sh.c 			  struct spi_transfer *t)
t                 222 drivers/spi/spi-sh.c 	int remain = t->len;
t                 227 drivers/spi/spi-sh.c 	if (t->len > SPI_SH_MAX_BYTE)
t                 230 drivers/spi/spi-sh.c 		spi_sh_write(ss, t->len, SPI_SH_CR3);
t                 237 drivers/spi/spi-sh.c 	data = (unsigned char *)t->rx_buf;
t                 264 drivers/spi/spi-sh.c 	if (t->len > SPI_SH_MAX_BYTE) {
t                 278 drivers/spi/spi-sh.c 	struct spi_transfer *t;
t                 290 drivers/spi/spi-sh.c 		list_for_each_entry(t, &mesg->transfers, transfer_list) {
t                 292 drivers/spi/spi-sh.c 					t->tx_buf, t->rx_buf);
t                 294 drivers/spi/spi-sh.c 					t->len, t->delay_usecs);
t                 296 drivers/spi/spi-sh.c 			if (t->tx_buf) {
t                 297 drivers/spi/spi-sh.c 				ret = spi_sh_send(ss, mesg, t);
t                 301 drivers/spi/spi-sh.c 			if (t->rx_buf) {
t                 302 drivers/spi/spi-sh.c 				ret = spi_sh_receive(ss, mesg, t);
t                 306 drivers/spi/spi-sh.c 			mesg->actual_length += t->len;
t                 168 drivers/spi/spi-sifive.c 			 struct spi_transfer *t)
t                 174 drivers/spi/spi-sifive.c 	cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
t                 178 drivers/spi/spi-sifive.c 	mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
t                 181 drivers/spi/spi-sifive.c 	cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
t                 195 drivers/spi/spi-sifive.c 	if (!t->rx_buf)
t                 205 drivers/spi/spi-sifive.c 	return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
t                 256 drivers/spi/spi-sifive.c 			struct spi_transfer *t)
t                 259 drivers/spi/spi-sifive.c 	int poll = sifive_spi_prep_transfer(spi, device, t);
t                 260 drivers/spi/spi-sifive.c 	const u8 *tx_ptr = t->tx_buf;
t                 261 drivers/spi/spi-sifive.c 	u8 *rx_ptr = t->rx_buf;
t                 262 drivers/spi/spi-sifive.c 	unsigned int remaining_words = t->len;
t                 460 drivers/spi/spi-sirf.c 	struct spi_transfer *t)
t                 463 drivers/spi/spi-sirf.c 	int timeout = t->len * 10;
t                 469 drivers/spi/spi-sirf.c 	memcpy(&cmd, sspi->tx, t->len);
t                 472 drivers/spi/spi-sirf.c 			((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
t                 473 drivers/spi/spi-sirf.c 	if (sspi->word_width == 2 && t->len == 4 &&
t                 485 drivers/spi/spi-sirf.c 	sspi->left_rx_word -= t->len;
t                 489 drivers/spi/spi-sirf.c 	struct spi_transfer *t)
t                 493 drivers/spi/spi-sirf.c 	int timeout = t->len * 10;
t                 547 drivers/spi/spi-sirf.c 	sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
t                 548 drivers/spi/spi-sirf.c 					(t->tx_buf != t->rx_buf) ?
t                 551 drivers/spi/spi-sirf.c 		sspi->dst_start, t->len, DMA_DEV_TO_MEM,
t                 556 drivers/spi/spi-sirf.c 	sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
t                 557 drivers/spi/spi-sirf.c 					(t->tx_buf != t->rx_buf) ?
t                 560 drivers/spi/spi-sirf.c 		sspi->src_start, t->len, DMA_MEM_TO_DEV,
t                 595 drivers/spi/spi-sirf.c 	dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
t                 596 drivers/spi/spi-sirf.c 	dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
t                 608 drivers/spi/spi-sirf.c 		struct spi_transfer *t)
t                 611 drivers/spi/spi-sirf.c 	int timeout = t->len * 10;
t                 704 drivers/spi/spi-sirf.c static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
t                 709 drivers/spi/spi-sirf.c 	sspi->tx = t->tx_buf;
t                 710 drivers/spi/spi-sirf.c 	sspi->rx = t->rx_buf;
t                 711 drivers/spi/spi-sirf.c 	sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
t                 720 drivers/spi/spi-sirf.c 		spi_sirfsoc_cmd_transfer(spi, t);
t                 721 drivers/spi/spi-sirf.c 	else if (IS_DMA_VALID(t))
t                 722 drivers/spi/spi-sirf.c 		spi_sirfsoc_dma_transfer(spi, t);
t                 724 drivers/spi/spi-sirf.c 		spi_sirfsoc_pio_transfer(spi, t);
t                 726 drivers/spi/spi-sirf.c 	return t->len - sspi->left_rx_word * sspi->word_width;
t                 870 drivers/spi/spi-sirf.c spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
t                 879 drivers/spi/spi-sirf.c 	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
t                 880 drivers/spi/spi-sirf.c 	hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
t                 965 drivers/spi/spi-sirf.c 		if (t && t->tx_buf && !t->rx_buf &&
t                 966 drivers/spi/spi-sirf.c 			(t->len <= SIRFSOC_MAX_CMD_BYTES)) {
t                 969 drivers/spi/spi-sirf.c 				(SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
t                 979 drivers/spi/spi-sirf.c 	if (IS_DMA_VALID(t)) {
t                 276 drivers/spi/spi-sprd-adi.c 				 struct spi_transfer *t)
t                 282 drivers/spi/spi-sprd-adi.c 	if (t->rx_buf) {
t                 283 drivers/spi/spi-sprd-adi.c 		phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
t                 293 drivers/spi/spi-sprd-adi.c 		*(u32 *)t->rx_buf = val;
t                 294 drivers/spi/spi-sprd-adi.c 	} else if (t->tx_buf) {
t                 295 drivers/spi/spi-sprd-adi.c 		u32 *p = (u32 *)t->tx_buf;
t                 174 drivers/spi/spi-sprd.c 					 struct spi_transfer *t)
t                 180 drivers/spi/spi-sprd.c 	u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
t                 194 drivers/spi/spi-sprd.c static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
t                 199 drivers/spi/spi-sprd.c 	us = sprd_spi_transfer_max_timeout(ss, t);
t                 219 drivers/spi/spi-sprd.c static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
t                 224 drivers/spi/spi-sprd.c 	us = sprd_spi_transfer_max_timeout(ss, t);
t                 402 drivers/spi/spi-sprd.c static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
t                 422 drivers/spi/spi-sprd.c 			ret = sprd_spi_wait_for_tx_end(ss, t);
t                 435 drivers/spi/spi-sprd.c 			ret = sprd_spi_wait_for_rx_end(ss, t);
t                 517 drivers/spi/spi-sprd.c static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
t                 528 drivers/spi/spi-sprd.c 	ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
t                 535 drivers/spi/spi-sprd.c static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
t                 546 drivers/spi/spi-sprd.c 	ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
t                 550 drivers/spi/spi-sprd.c 	return t->len;
t                 587 drivers/spi/spi-sprd.c 				  struct spi_transfer *t)
t                 596 drivers/spi/spi-sprd.c 		write_size = sprd_spi_dma_tx_config(ss, t);
t                 632 drivers/spi/spi-sprd.c 		ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
t                 633 drivers/spi/spi-sprd.c 			(t->len - t->len % ss->dma.fragmens_len) :
t                 634 drivers/spi/spi-sprd.c 			 t->len;
t                 635 drivers/spi/spi-sprd.c 		ret = sprd_spi_dma_rx_config(ss, t);
t                 672 drivers/spi/spi-sprd.c static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
t                 689 drivers/spi/spi-sprd.c 	word_delay = clamp_t(u16, t->word_delay, SPRD_SPI_MIN_DELAY_CYCLE,
t                 717 drivers/spi/spi-sprd.c 				   struct spi_transfer *t)
t                 720 drivers/spi/spi-sprd.c 	u8 bits_per_word = t->bits_per_word;
t                 723 drivers/spi/spi-sprd.c 	ss->len = t->len;
t                 724 drivers/spi/spi-sprd.c 	ss->tx_buf = t->tx_buf;
t                 725 drivers/spi/spi-sprd.c 	ss->rx_buf = t->rx_buf;
t                 728 drivers/spi/spi-sprd.c 	sprd_spi_init_hw(ss, t);
t                 731 drivers/spi/spi-sprd.c 	sprd_spi_set_speed(ss, t->speed_hz);
t                 741 drivers/spi/spi-sprd.c 		ss->trans_len = t->len;
t                 748 drivers/spi/spi-sprd.c 		ss->trans_len = t->len >> 1;
t                 755 drivers/spi/spi-sprd.c 		ss->trans_len = t->len >> 2;
t                 768 drivers/spi/spi-sprd.c 	if (t->tx_buf)
t                 770 drivers/spi/spi-sprd.c 	if (t->rx_buf)
t                 789 drivers/spi/spi-sprd.c 				 struct spi_transfer *t)
t                 793 drivers/spi/spi-sprd.c 	ret = sprd_spi_setup_transfer(sdev, t);
t                 797 drivers/spi/spi-sprd.c 	if (sctlr->can_dma(sctlr, sdev, t))
t                 798 drivers/spi/spi-sprd.c 		ret = sprd_spi_dma_txrx_bufs(sdev, t);
t                 800 drivers/spi/spi-sprd.c 		ret = sprd_spi_txrx_bufs(sdev, t);
t                 802 drivers/spi/spi-sprd.c 	if (ret == t->len)
t                 889 drivers/spi/spi-sprd.c 			     struct spi_device *spi, struct spi_transfer *t)
t                 893 drivers/spi/spi-sprd.c 	return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
t                 120 drivers/spi/spi-st-ssc4.c 			       struct spi_device *spi, struct spi_transfer *t)
t                 126 drivers/spi/spi-st-ssc4.c 	spi_st->tx_ptr = t->tx_buf;
t                 127 drivers/spi/spi-st-ssc4.c 	spi_st->rx_ptr = t->rx_buf;
t                 135 drivers/spi/spi-st-ssc4.c 		spi_st->words_remaining = t->len / 2;
t                 137 drivers/spi/spi-st-ssc4.c 	} else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
t                 143 drivers/spi/spi-st-ssc4.c 		spi_st->words_remaining = t->len / 2;
t                 153 drivers/spi/spi-st-ssc4.c 		spi_st->words_remaining = t->len;
t                 171 drivers/spi/spi-st-ssc4.c 	return t->len;
t                 260 drivers/spi/spi-tegra114.c 	struct spi_transfer *t)
t                 262 drivers/spi/spi-tegra114.c 	unsigned remain_len = t->len - tspi->cur_pos;
t                 264 drivers/spi/spi-tegra114.c 	unsigned bits_per_word = t->bits_per_word;
t                 271 drivers/spi/spi-tegra114.c 	     bits_per_word == 32) && t->len > 3) {
t                 293 drivers/spi/spi-tegra114.c 	struct tegra_spi_data *tspi, struct spi_transfer *t)
t                 302 drivers/spi/spi-tegra114.c 	u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
t                 326 drivers/spi/spi-tegra114.c 		if (nbytes > t->len - tspi->cur_pos)
t                 327 drivers/spi/spi-tegra114.c 			nbytes = t->len - tspi->cur_pos;
t                 345 drivers/spi/spi-tegra114.c 		struct tegra_spi_data *tspi, struct spi_transfer *t)
t                 352 drivers/spi/spi-tegra114.c 	u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
t                 367 drivers/spi/spi-tegra114.c 		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
t                 372 drivers/spi/spi-tegra114.c 		if (len > t->len - tspi->cur_pos)
t                 373 drivers/spi/spi-tegra114.c 			len = t->len - tspi->cur_pos;
t                 389 drivers/spi/spi-tegra114.c 		struct tegra_spi_data *tspi, struct spi_transfer *t)
t                 398 drivers/spi/spi-tegra114.c 		memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
t                 403 drivers/spi/spi-tegra114.c 		u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
t                 407 drivers/spi/spi-tegra114.c 		if (consume > t->len - tspi->cur_pos)
t                 408 drivers/spi/spi-tegra114.c 			consume = t->len - tspi->cur_pos;
t                 428 drivers/spi/spi-tegra114.c 		struct tegra_spi_data *tspi, struct spi_transfer *t)
t                 437 drivers/spi/spi-tegra114.c 		memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
t                 442 drivers/spi/spi-tegra114.c 		unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
t                 443 drivers/spi/spi-tegra114.c 		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
t                 447 drivers/spi/spi-tegra114.c 		if (consume > t->len - tspi->cur_pos)
t                 448 drivers/spi/spi-tegra114.c 			consume = t->len - tspi->cur_pos;
t                 536 drivers/spi/spi-tegra114.c 		struct tegra_spi_data *tspi, struct spi_transfer *t)
t                 588 drivers/spi/spi-tegra114.c 		tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
t                 630 drivers/spi/spi-tegra114.c 		struct tegra_spi_data *tspi, struct spi_transfer *t)
t                 636 drivers/spi/spi-tegra114.c 		cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
t                 764 drivers/spi/spi-tegra114.c 					struct spi_transfer *t,
t                 770 drivers/spi/spi-tegra114.c 	u32 speed = t->speed_hz;
t                 771 drivers/spi/spi-tegra114.c 	u8 bits_per_word = t->bits_per_word;
t                 785 drivers/spi/spi-tegra114.c 	tspi->curr_xfer = t;
t                 825 drivers/spi/spi-tegra114.c 		if (is_single_xfer && !(t->cs_change)) {
t                 859 drivers/spi/spi-tegra114.c 		struct spi_transfer *t, u32 command1)
t                 865 drivers/spi/spi-tegra114.c 	total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
t                 867 drivers/spi/spi-tegra114.c 	if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
t                 879 drivers/spi/spi-tegra114.c 	if (t->rx_buf) {
t                 883 drivers/spi/spi-tegra114.c 	if (t->tx_buf) {
t                 898 drivers/spi/spi-tegra114.c 		ret = tegra_spi_start_dma_based_transfer(tspi, t);
t                 900 drivers/spi/spi-tegra114.c 		ret = tegra_spi_start_cpu_based_transfer(tspi, t);
t                1126 drivers/spi/spi-tegra114.c 	struct spi_transfer *t = tspi->curr_xfer;
t                1146 drivers/spi/spi-tegra114.c 		tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
t                1153 drivers/spi/spi-tegra114.c 	if (tspi->cur_pos == t->len) {
t                1158 drivers/spi/spi-tegra114.c 	tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
t                1159 drivers/spi/spi-tegra114.c 	tegra_spi_start_cpu_based_transfer(tspi, t);
t                1167 drivers/spi/spi-tegra114.c 	struct spi_transfer *t = tspi->curr_xfer;
t                1221 drivers/spi/spi-tegra114.c 		tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
t                1228 drivers/spi/spi-tegra114.c 	if (tspi->cur_pos == t->len) {
t                1235 drivers/spi/spi-tegra114.c 							tspi, t);
t                1237 drivers/spi/spi-tegra114.c 		err = tegra_spi_start_dma_based_transfer(tspi, t);
t                1239 drivers/spi/spi-tegra114.c 		err = tegra_spi_start_cpu_based_transfer(tspi, t);
t                 159 drivers/spi/spi-tegra20-sflash.c 	struct spi_transfer *t)
t                 161 drivers/spi/spi-tegra20-sflash.c 	unsigned remain_len = t->len - tsd->cur_pos;
t                 164 drivers/spi/spi-tegra20-sflash.c 	tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
t                 173 drivers/spi/spi-tegra20-sflash.c 	struct tegra_sflash_data *tsd, struct spi_transfer *t)
t                 178 drivers/spi/spi-tegra20-sflash.c 	u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
t                 203 drivers/spi/spi-tegra20-sflash.c 		struct tegra_sflash_data *tsd, struct spi_transfer *t)
t                 207 drivers/spi/spi-tegra20-sflash.c 	u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
t                 224 drivers/spi/spi-tegra20-sflash.c 		struct tegra_sflash_data *tsd, struct spi_transfer *t)
t                 239 drivers/spi/spi-tegra20-sflash.c 		cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
t                 251 drivers/spi/spi-tegra20-sflash.c 		struct spi_transfer *t, bool is_first_of_msg,
t                 258 drivers/spi/spi-tegra20-sflash.c 	speed = t->speed_hz;
t                 268 drivers/spi/spi-tegra20-sflash.c 	tsd->curr_xfer = t;
t                 269 drivers/spi/spi-tegra20-sflash.c 	tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
t                 272 drivers/spi/spi-tegra20-sflash.c 		command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
t                 287 drivers/spi/spi-tegra20-sflash.c 		command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
t                 292 drivers/spi/spi-tegra20-sflash.c 	if (t->rx_buf) {
t                 296 drivers/spi/spi-tegra20-sflash.c 	if (t->tx_buf) {
t                 303 drivers/spi/spi-tegra20-sflash.c 	return tegra_sflash_start_cpu_based_transfer(tsd, t);
t                 360 drivers/spi/spi-tegra20-sflash.c 	struct spi_transfer *t = tsd->curr_xfer;
t                 378 drivers/spi/spi-tegra20-sflash.c 		tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
t                 385 drivers/spi/spi-tegra20-sflash.c 	if (tsd->cur_pos == t->len) {
t                 390 drivers/spi/spi-tegra20-sflash.c 	tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
t                 391 drivers/spi/spi-tegra20-sflash.c 	tegra_sflash_start_cpu_based_transfer(tsd, t);
t                 238 drivers/spi/spi-tegra20-slink.c 				  struct spi_transfer *t)
t                 256 drivers/spi/spi-tegra20-slink.c 	struct spi_transfer *t)
t                 258 drivers/spi/spi-tegra20-slink.c 	unsigned remain_len = t->len - tspi->cur_pos;
t                 264 drivers/spi/spi-tegra20-slink.c 	bits_per_word = t->bits_per_word;
t                 274 drivers/spi/spi-tegra20-slink.c 	tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
t                 290 drivers/spi/spi-tegra20-slink.c 	struct tegra_slink_data *tspi, struct spi_transfer *t)
t                 299 drivers/spi/spi-tegra20-slink.c 	u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
t                 332 drivers/spi/spi-tegra20-slink.c 		struct tegra_slink_data *tspi, struct spi_transfer *t)
t                 339 drivers/spi/spi-tegra20-slink.c 	u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
t                 365 drivers/spi/spi-tegra20-slink.c 		struct tegra_slink_data *tspi, struct spi_transfer *t)
t                 373 drivers/spi/spi-tegra20-slink.c 		memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
t                 377 drivers/spi/spi-tegra20-slink.c 		u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
t                 396 drivers/spi/spi-tegra20-slink.c 		struct tegra_slink_data *tspi, struct spi_transfer *t)
t                 406 drivers/spi/spi-tegra20-slink.c 		memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
t                 410 drivers/spi/spi-tegra20-slink.c 		unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
t                 411 drivers/spi/spi-tegra20-slink.c 		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
t                 472 drivers/spi/spi-tegra20-slink.c 		struct tegra_slink_data *tspi, struct spi_transfer *t)
t                 513 drivers/spi/spi-tegra20-slink.c 		tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
t                 557 drivers/spi/spi-tegra20-slink.c 		struct tegra_slink_data *tspi, struct spi_transfer *t)
t                 573 drivers/spi/spi-tegra20-slink.c 		cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
t                 678 drivers/spi/spi-tegra20-slink.c 		struct spi_transfer *t)
t                 688 drivers/spi/spi-tegra20-slink.c 	bits_per_word = t->bits_per_word;
t                 689 drivers/spi/spi-tegra20-slink.c 	speed = t->speed_hz;
t                 699 drivers/spi/spi-tegra20-slink.c 	tspi->curr_xfer = t;
t                 700 drivers/spi/spi-tegra20-slink.c 	total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
t                 710 drivers/spi/spi-tegra20-slink.c 	if (t->rx_buf) {
t                 714 drivers/spi/spi-tegra20-slink.c 	if (t->tx_buf) {
t                 731 drivers/spi/spi-tegra20-slink.c 		ret = tegra_slink_start_dma_based_transfer(tspi, t);
t                 733 drivers/spi/spi-tegra20-slink.c 		ret = tegra_slink_start_cpu_based_transfer(tspi, t);
t                 847 drivers/spi/spi-tegra20-slink.c 	struct spi_transfer *t = tspi->curr_xfer;
t                 866 drivers/spi/spi-tegra20-slink.c 		tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
t                 873 drivers/spi/spi-tegra20-slink.c 	if (tspi->cur_pos == t->len) {
t                 878 drivers/spi/spi-tegra20-slink.c 	tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
t                 879 drivers/spi/spi-tegra20-slink.c 	tegra_slink_start_cpu_based_transfer(tspi, t);
t                 887 drivers/spi/spi-tegra20-slink.c 	struct spi_transfer *t = tspi->curr_xfer;
t                 940 drivers/spi/spi-tegra20-slink.c 		tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
t                 947 drivers/spi/spi-tegra20-slink.c 	if (tspi->cur_pos == t->len) {
t                 954 drivers/spi/spi-tegra20-slink.c 							tspi, t);
t                 956 drivers/spi/spi-tegra20-slink.c 		err = tegra_slink_start_dma_based_transfer(tspi, t);
t                 958 drivers/spi/spi-tegra20-slink.c 		err = tegra_slink_start_cpu_based_transfer(tspi, t);
t                 245 drivers/spi/spi-ti-qspi.c static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
t                 253 drivers/spi/spi-ti-qspi.c 	txbuf = t->tx_buf;
t                 255 drivers/spi/spi-ti-qspi.c 	wlen = t->bits_per_word >> 3;	/* in bytes */
t                 314 drivers/spi/spi-ti-qspi.c static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
t                 321 drivers/spi/spi-ti-qspi.c 	rxbuf = t->rx_buf;
t                 323 drivers/spi/spi-ti-qspi.c 	switch (t->rx_nbits) {
t                 334 drivers/spi/spi-ti-qspi.c 	wlen = t->bits_per_word >> 3;	/* in bytes */
t                 364 drivers/spi/spi-ti-qspi.c static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
t                 369 drivers/spi/spi-ti-qspi.c 	if (t->tx_buf) {
t                 370 drivers/spi/spi-ti-qspi.c 		ret = qspi_write_msg(qspi, t, count);
t                 377 drivers/spi/spi-ti-qspi.c 	if (t->rx_buf) {
t                 378 drivers/spi/spi-ti-qspi.c 		ret = qspi_read_msg(qspi, t, count);
t                 587 drivers/spi/spi-ti-qspi.c 	struct spi_transfer *t;
t                 603 drivers/spi/spi-ti-qspi.c 	list_for_each_entry(t, &m->transfers, transfer_list)
t                 604 drivers/spi/spi-ti-qspi.c 		frame_len_words += t->len / (t->bits_per_word >> 3);
t                 619 drivers/spi/spi-ti-qspi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 621 drivers/spi/spi-ti-qspi.c 			     QSPI_WLEN(t->bits_per_word));
t                 623 drivers/spi/spi-ti-qspi.c 		wlen = t->bits_per_word >> 3;
t                 624 drivers/spi/spi-ti-qspi.c 		transfer_len_words = min(t->len / wlen, frame_len_words);
t                 626 drivers/spi/spi-ti-qspi.c 		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
t                 150 drivers/spi/spi-txx9.c 	struct spi_transfer *t;
t                 177 drivers/spi/spi-txx9.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 178 drivers/spi/spi-txx9.c 		const void *txbuf = t->tx_buf;
t                 179 drivers/spi/spi-txx9.c 		void *rxbuf = t->rx_buf;
t                 181 drivers/spi/spi-txx9.c 		unsigned int len = t->len;
t                 183 drivers/spi/spi-txx9.c 		u32 speed_hz = t->speed_hz;
t                 184 drivers/spi/spi-txx9.c 		u8 bits_per_word = t->bits_per_word;
t                 206 drivers/spi/spi-txx9.c 		cs_change = t->cs_change;
t                 250 drivers/spi/spi-txx9.c 		m->actual_length += t->len;
t                 251 drivers/spi/spi-txx9.c 		if (t->delay_usecs)
t                 252 drivers/spi/spi-txx9.c 			udelay(t->delay_usecs);
t                 256 drivers/spi/spi-txx9.c 		if (t->transfer_list.next == &m->transfers)
t                 304 drivers/spi/spi-txx9.c 	struct spi_transfer *t;
t                 310 drivers/spi/spi-txx9.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 311 drivers/spi/spi-txx9.c 		if (!t->tx_buf && !t->rx_buf && t->len)
t                 206 drivers/spi/spi-uniphier.c 				       struct spi_transfer *t)
t                 212 drivers/spi/spi-uniphier.c 	priv->tx_buf = t->tx_buf;
t                 213 drivers/spi/spi-uniphier.c 	priv->rx_buf = t->rx_buf;
t                 214 drivers/spi/spi-uniphier.c 	priv->tx_bytes = priv->rx_bytes = t->len;
t                 222 drivers/spi/spi-uniphier.c 	if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
t                 223 drivers/spi/spi-uniphier.c 		uniphier_spi_set_transfer_size(spi, t->bits_per_word);
t                 224 drivers/spi/spi-uniphier.c 		priv->bits_per_word = t->bits_per_word;
t                 227 drivers/spi/spi-uniphier.c 	if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
t                 228 drivers/spi/spi-uniphier.c 		uniphier_spi_set_baudrate(spi, t->speed_hz);
t                 229 drivers/spi/spi-uniphier.c 		priv->speed_hz = t->speed_hz;
t                 339 drivers/spi/spi-uniphier.c 					 struct spi_transfer *t)
t                 366 drivers/spi/spi-uniphier.c 					  struct spi_transfer *t)
t                 389 drivers/spi/spi-uniphier.c 	return uniphier_spi_transfer_one_irq(master, spi, t);
t                 394 drivers/spi/spi-uniphier.c 				     struct spi_transfer *t)
t                 400 drivers/spi/spi-uniphier.c 	if (!t->len)
t                 403 drivers/spi/spi-uniphier.c 	uniphier_spi_setup_transfer(spi, t);
t                 411 drivers/spi/spi-uniphier.c 	if (t->len > threshold)
t                 412 drivers/spi/spi-uniphier.c 		return uniphier_spi_transfer_one_irq(master, spi, t);
t                 414 drivers/spi/spi-uniphier.c 		return uniphier_spi_transfer_one_poll(master, spi, t);
t                  73 drivers/spi/spi-xcomm.c 	struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
t                  75 drivers/spi/spi-xcomm.c 	if (t->len > 62)
t                  78 drivers/spi/spi-xcomm.c 	if (t->speed_hz != spi_xcomm->current_speed) {
t                  81 drivers/spi/spi-xcomm.c 		divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
t                  89 drivers/spi/spi-xcomm.c 		spi_xcomm->current_speed = t->speed_hz;
t                 111 drivers/spi/spi-xcomm.c 	struct spi_device *spi, struct spi_transfer *t)
t                 115 drivers/spi/spi-xcomm.c 	if (t->tx_buf) {
t                 117 drivers/spi/spi-xcomm.c 		memcpy(spi_xcomm->buf + 1, t->tx_buf, t->len);
t                 119 drivers/spi/spi-xcomm.c 		ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1);
t                 122 drivers/spi/spi-xcomm.c 		else if (ret != t->len + 1)
t                 124 drivers/spi/spi-xcomm.c 	} else if (t->rx_buf) {
t                 125 drivers/spi/spi-xcomm.c 		ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len);
t                 128 drivers/spi/spi-xcomm.c 		else if (ret != t->len)
t                 132 drivers/spi/spi-xcomm.c 	return t->len;
t                 142 drivers/spi/spi-xcomm.c 	struct spi_transfer *t;
t                 149 drivers/spi/spi-xcomm.c 	list_for_each_entry(t, &msg->transfers, transfer_list) {
t                 151 drivers/spi/spi-xcomm.c 		if (!t->tx_buf && !t->rx_buf && t->len) {
t                 156 drivers/spi/spi-xcomm.c 		status = spi_xcomm_setup_transfer(spi_xcomm, spi, t, &settings);
t                 160 drivers/spi/spi-xcomm.c 		is_last = list_is_last(&t->transfer_list, &msg->transfers);
t                 161 drivers/spi/spi-xcomm.c 		cs_change = t->cs_change;
t                 168 drivers/spi/spi-xcomm.c 		if (t->rx_buf) {
t                 170 drivers/spi/spi-xcomm.c 			status = spi_xcomm_sync_config(spi_xcomm, t->len);
t                 180 drivers/spi/spi-xcomm.c 		if (t->len) {
t                 181 drivers/spi/spi-xcomm.c 			status = spi_xcomm_txrx_bufs(spi_xcomm, spi, t);
t                 191 drivers/spi/spi-xcomm.c 		if (t->delay_usecs)
t                 192 drivers/spi/spi-xcomm.c 			udelay(t->delay_usecs);
t                 226 drivers/spi/spi-xilinx.c 		struct spi_transfer *t)
t                 238 drivers/spi/spi-xilinx.c static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
t                 247 drivers/spi/spi-xilinx.c 	xspi->tx_ptr = t->tx_buf;
t                 248 drivers/spi/spi-xilinx.c 	xspi->rx_ptr = t->rx_buf;
t                 249 drivers/spi/spi-xilinx.c 	remaining_words = t->len / xspi->bytes_per_word;
t                 333 drivers/spi/spi-xilinx.c 	return t->len;
t                 320 drivers/spi/spi-xlp.c static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
t                 326 drivers/spi/spi-xlp.c 	tx_buf = t->tx_buf;
t                 327 drivers/spi/spi-xlp.c 	rx_buf = t->rx_buf;
t                 328 drivers/spi/spi-xlp.c 	bytesleft = t->len;
t                 349 drivers/spi/spi-xlp.c 					struct spi_transfer *t)
t                 357 drivers/spi/spi-xlp.c 	if (spi_transfer_is_last(master, t))
t                 362 drivers/spi/spi-xlp.c 	if (xlp_spi_txrx_bufs(xspi, t))
t                 114 drivers/spi/spidev.c 	struct spi_transfer	t = {
t                 122 drivers/spi/spidev.c 	spi_message_add_tail(&t, &m);
t                 129 drivers/spi/spidev.c 	struct spi_transfer	t = {
t                 137 drivers/spi/spidev.c 	spi_message_add_tail(&t, &m);
t                 195 drivers/ssb/pci.c 	static const u8 t[] = {
t                 229 drivers/ssb/pci.c 	return t[crc ^ data];
t                 196 drivers/staging/comedi/drivers/comedi_test.c static void waveform_ai_timer(struct timer_list *t)
t                 198 drivers/staging/comedi/drivers/comedi_test.c 	struct waveform_private *devpriv = from_timer(devpriv, t, ai_timer);
t                 433 drivers/staging/comedi/drivers/comedi_test.c static void waveform_ao_timer(struct timer_list *t)
t                 435 drivers/staging/comedi/drivers/comedi_test.c 	struct waveform_private *devpriv = from_timer(devpriv, t, ao_timer);
t                 520 drivers/staging/comedi/drivers/das16.c static void das16_timer_interrupt(struct timer_list *t)
t                 522 drivers/staging/comedi/drivers/das16.c 	struct das16_private_struct *devpriv = from_timer(devpriv, t, timer);
t                 579 drivers/staging/comedi/drivers/jr3_pci.c static void jr3_pci_poll_dev(struct timer_list *t)
t                 581 drivers/staging/comedi/drivers/jr3_pci.c 	struct jr3_pci_dev_private *devpriv = from_timer(devpriv, t, timer);
t                1142 drivers/staging/exfat/exfat_core.c 	u16 t = 0x00, d = 0x21;
t                1147 drivers/staging/exfat/exfat_core.c 		t = GET16_A(ep->create_time);
t                1151 drivers/staging/exfat/exfat_core.c 		t = GET16_A(ep->modify_time);
t                1156 drivers/staging/exfat/exfat_core.c 	tp->sec  = (t & 0x001F) << 1;
t                1157 drivers/staging/exfat/exfat_core.c 	tp->min  = (t >> 5) & 0x003F;
t                1158 drivers/staging/exfat/exfat_core.c 	tp->hour = (t >> 11);
t                1167 drivers/staging/exfat/exfat_core.c 	u16 t = 0x00, d = 0x21;
t                1172 drivers/staging/exfat/exfat_core.c 		t = GET16_A(ep->create_time);
t                1176 drivers/staging/exfat/exfat_core.c 		t = GET16_A(ep->modify_time);
t                1180 drivers/staging/exfat/exfat_core.c 		t = GET16_A(ep->access_time);
t                1185 drivers/staging/exfat/exfat_core.c 	tp->sec  = (t & 0x001F) << 1;
t                1186 drivers/staging/exfat/exfat_core.c 	tp->min  = (t >> 5) & 0x003F;
t                1187 drivers/staging/exfat/exfat_core.c 	tp->hour = (t >> 11);
t                1196 drivers/staging/exfat/exfat_core.c 	u16 t, d;
t                1199 drivers/staging/exfat/exfat_core.c 	t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
t                1204 drivers/staging/exfat/exfat_core.c 		SET16_A(ep->create_time, t);
t                1208 drivers/staging/exfat/exfat_core.c 		SET16_A(ep->modify_time, t);
t                1217 drivers/staging/exfat/exfat_core.c 	u16 t, d;
t                1220 drivers/staging/exfat/exfat_core.c 	t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
t                1225 drivers/staging/exfat/exfat_core.c 		SET16_A(ep->create_time, t);
t                1229 drivers/staging/exfat/exfat_core.c 		SET16_A(ep->modify_time, t);
t                1233 drivers/staging/exfat/exfat_core.c 		SET16_A(ep->access_time, t);
t                 244 drivers/staging/exfat/exfat_super.c 	struct nls_table *t = EXFAT_SB(dentry->d_sb)->nls_io;
t                 250 drivers/staging/exfat/exfat_super.c 		if (!t) {
t                 254 drivers/staging/exfat/exfat_super.c 			if (nls_strnicmp(t, name->name, str, alen) == 0)
t                  19 drivers/staging/fbtft/fb_ra8875.c 	struct spi_transfer t = {
t                  36 drivers/staging/fbtft/fb_ra8875.c 	spi_message_add_tail(&t, &m);
t                  10 drivers/staging/fbtft/fbtft-io.c 	struct spi_transfer t = {
t                  26 drivers/staging/fbtft/fbtft-io.c 	spi_message_add_tail(&t, &m);
t                  89 drivers/staging/fbtft/fbtft-io.c 	struct spi_transfer	t = {
t                 110 drivers/staging/fbtft/fbtft-io.c 		t.tx_buf = txbuf;
t                 117 drivers/staging/fbtft/fbtft-io.c 	spi_message_add_tail(&t, &m);
t                 248 drivers/staging/fieldbus/anybuss/host.c 					struct ab_task *t);
t                 283 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 285 drivers/staging/fieldbus/anybuss/host.c 	t = kmem_cache_alloc(cache, GFP_KERNEL);
t                 286 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                 288 drivers/staging/fieldbus/anybuss/host.c 	t->cache = cache;
t                 289 drivers/staging/fieldbus/anybuss/host.c 	kref_init(&t->refcount);
t                 290 drivers/staging/fieldbus/anybuss/host.c 	t->task_fn = task_fn;
t                 291 drivers/staging/fieldbus/anybuss/host.c 	t->done_fn = NULL;
t                 292 drivers/staging/fieldbus/anybuss/host.c 	t->result = 0;
t                 293 drivers/staging/fieldbus/anybuss/host.c 	init_completion(&t->done);
t                 294 drivers/staging/fieldbus/anybuss/host.c 	return t;
t                 299 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t = container_of(refcount, struct ab_task, refcount);
t                 300 drivers/staging/fieldbus/anybuss/host.c 	struct kmem_cache *cache = t->cache;
t                 302 drivers/staging/fieldbus/anybuss/host.c 	kmem_cache_free(cache, t);
t                 305 drivers/staging/fieldbus/anybuss/host.c static void ab_task_put(struct ab_task *t)
t                 307 drivers/staging/fieldbus/anybuss/host.c 	kref_put(&t->refcount, __ab_task_destroy);
t                 310 drivers/staging/fieldbus/anybuss/host.c static struct ab_task *__ab_task_get(struct ab_task *t)
t                 312 drivers/staging/fieldbus/anybuss/host.c 	kref_get(&t->refcount);
t                 313 drivers/staging/fieldbus/anybuss/host.c 	return t;
t                 316 drivers/staging/fieldbus/anybuss/host.c static void __ab_task_finish(struct ab_task *t, struct anybuss_host *cd)
t                 318 drivers/staging/fieldbus/anybuss/host.c 	if (t->done_fn)
t                 319 drivers/staging/fieldbus/anybuss/host.c 		t->done_fn(cd);
t                 320 drivers/staging/fieldbus/anybuss/host.c 	complete(&t->done);
t                 327 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 329 drivers/staging/fieldbus/anybuss/host.c 	ret = kfifo_out(q, &t, sizeof(t));
t                 331 drivers/staging/fieldbus/anybuss/host.c 	__ab_task_finish(t, cd);
t                 332 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                 336 drivers/staging/fieldbus/anybuss/host.c ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
t                 341 drivers/staging/fieldbus/anybuss/host.c 	t->start_jiffies = jiffies;
t                 342 drivers/staging/fieldbus/anybuss/host.c 	__ab_task_get(t);
t                 343 drivers/staging/fieldbus/anybuss/host.c 	ret = kfifo_in_spinlocked(q, &t, sizeof(t), slock);
t                 345 drivers/staging/fieldbus/anybuss/host.c 		ab_task_put(t);
t                 353 drivers/staging/fieldbus/anybuss/host.c ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
t                 358 drivers/staging/fieldbus/anybuss/host.c 	ret = ab_task_enqueue(t, q, slock, wq);
t                 361 drivers/staging/fieldbus/anybuss/host.c 	ret = wait_for_completion_interruptible(&t->done);
t                 364 drivers/staging/fieldbus/anybuss/host.c 	return t->result;
t                 482 drivers/staging/fieldbus/anybuss/host.c 			     struct ab_task *t)
t                 498 drivers/staging/fieldbus/anybuss/host.c 			      struct ab_task *t)
t                 504 drivers/staging/fieldbus/anybuss/host.c 	if (time_after(jiffies, t->start_jiffies + TIMEOUT)) {
t                 514 drivers/staging/fieldbus/anybuss/host.c 			    struct ab_task *t)
t                 528 drivers/staging/fieldbus/anybuss/host.c 	t->task_fn = task_fn_power_on_2;
t                 535 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 538 drivers/staging/fieldbus/anybuss/host.c 	t = ab_task_create_get(cd->qcache, power_on ?
t                 540 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                 542 drivers/staging/fieldbus/anybuss/host.c 	err = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
t                 543 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                 550 drivers/staging/fieldbus/anybuss/host.c static int task_fn_area_3(struct anybuss_host *cd, struct ab_task *t)
t                 552 drivers/staging/fieldbus/anybuss/host.c 	struct area_priv *pd = &t->area_pd;
t                 558 drivers/staging/fieldbus/anybuss/host.c 		if (time_after(jiffies, t->start_jiffies + TIMEOUT))
t                 565 drivers/staging/fieldbus/anybuss/host.c static int task_fn_area_2(struct anybuss_host *cd, struct ab_task *t)
t                 567 drivers/staging/fieldbus/anybuss/host.c 	struct area_priv *pd = &t->area_pd;
t                 576 drivers/staging/fieldbus/anybuss/host.c 		if (time_after(jiffies, t->start_jiffies + TIMEOUT)) {
t                 596 drivers/staging/fieldbus/anybuss/host.c 	t->task_fn = task_fn_area_3;
t                 600 drivers/staging/fieldbus/anybuss/host.c static int task_fn_area(struct anybuss_host *cd, struct ab_task *t)
t                 602 drivers/staging/fieldbus/anybuss/host.c 	struct area_priv *pd = &t->area_pd;
t                 615 drivers/staging/fieldbus/anybuss/host.c 	t->task_fn = task_fn_area_2;
t                 623 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 626 drivers/staging/fieldbus/anybuss/host.c 	t = ab_task_create_get(qcache, task_fn_area);
t                 627 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                 629 drivers/staging/fieldbus/anybuss/host.c 	ap = &t->area_pd;
t                 634 drivers/staging/fieldbus/anybuss/host.c 	return t;
t                 641 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 644 drivers/staging/fieldbus/anybuss/host.c 	t = ab_task_create_get(qcache, task_fn_area);
t                 645 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                 647 drivers/staging/fieldbus/anybuss/host.c 	ap = &t->area_pd;
t                 653 drivers/staging/fieldbus/anybuss/host.c 	return t;
t                 660 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 663 drivers/staging/fieldbus/anybuss/host.c 	t = ab_task_create_get(qcache, task_fn_area);
t                 664 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                 666 drivers/staging/fieldbus/anybuss/host.c 	ap = &t->area_pd;
t                 672 drivers/staging/fieldbus/anybuss/host.c 		ab_task_put(t);
t                 675 drivers/staging/fieldbus/anybuss/host.c 	return t;
t                 696 drivers/staging/fieldbus/anybuss/host.c static int task_fn_mbox_2(struct anybuss_host *cd, struct ab_task *t)
t                 698 drivers/staging/fieldbus/anybuss/host.c 	struct mbox_priv *pd = &t->mbox_pd;
t                 706 drivers/staging/fieldbus/anybuss/host.c 		if (time_after(jiffies, t->start_jiffies + TIMEOUT))
t                 720 drivers/staging/fieldbus/anybuss/host.c static int task_fn_mbox(struct anybuss_host *cd, struct ab_task *t)
t                 722 drivers/staging/fieldbus/anybuss/host.c 	struct mbox_priv *pd = &t->mbox_pd;
t                 731 drivers/staging/fieldbus/anybuss/host.c 		if (time_after(jiffies, t->start_jiffies + TIMEOUT))
t                 745 drivers/staging/fieldbus/anybuss/host.c 	t->start_jiffies = jiffies;
t                 746 drivers/staging/fieldbus/anybuss/host.c 	t->task_fn = task_fn_mbox_2;
t                 810 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 821 drivers/staging/fieldbus/anybuss/host.c 	t = ab_task_create_get(cd->qcache, task_fn_mbox);
t                 822 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                 824 drivers/staging/fieldbus/anybuss/host.c 	pd = &t->mbox_pd;
t                 844 drivers/staging/fieldbus/anybuss/host.c 	err = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
t                 856 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                 864 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 867 drivers/staging/fieldbus/anybuss/host.c 	ret = kfifo_out_peek(q, &t, sizeof(t));
t                 870 drivers/staging/fieldbus/anybuss/host.c 	t->result = t->task_fn(cd, t);
t                 871 drivers/staging/fieldbus/anybuss/host.c 	if (t->result != -EINPROGRESS)
t                 878 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 882 drivers/staging/fieldbus/anybuss/host.c 		ret = kfifo_out_peek(qs, &t, sizeof(t));
t                 883 drivers/staging/fieldbus/anybuss/host.c 		if (ret && (t->result != -EINPROGRESS))
t                 918 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                 949 drivers/staging/fieldbus/anybuss/host.c 	t = create_area_writer(cd->qcache, IND_AX_FBCTRL,
t                 951 drivers/staging/fieldbus/anybuss/host.c 	if (!t) {
t                 955 drivers/staging/fieldbus/anybuss/host.c 	t->done_fn = softint_ack;
t                 956 drivers/staging/fieldbus/anybuss/host.c 	ret = ab_task_enqueue(t, cd->powerq, &cd->qlock, &cd->wq);
t                 957 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                1060 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                1068 drivers/staging/fieldbus/anybuss/host.c 	t = create_area_reader(cd->qcache, IND_AX_FBCTRL, addr, count);
t                1069 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                1071 drivers/staging/fieldbus/anybuss/host.c 	ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
t                1074 drivers/staging/fieldbus/anybuss/host.c 	memcpy(buf, t->area_pd.buf, count);
t                1076 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                1087 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                1092 drivers/staging/fieldbus/anybuss/host.c 	t = create_area_user_writer(cd->qcache, IND_AX_IN,
t                1094 drivers/staging/fieldbus/anybuss/host.c 	if (IS_ERR(t))
t                1095 drivers/staging/fieldbus/anybuss/host.c 		return PTR_ERR(t);
t                1096 drivers/staging/fieldbus/anybuss/host.c 	ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
t                1097 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                1112 drivers/staging/fieldbus/anybuss/host.c 	struct ab_task *t;
t                1117 drivers/staging/fieldbus/anybuss/host.c 	t = create_area_reader(cd->qcache, IND_AX_OUT,
t                1119 drivers/staging/fieldbus/anybuss/host.c 	if (!t)
t                1121 drivers/staging/fieldbus/anybuss/host.c 	ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq);
t                1124 drivers/staging/fieldbus/anybuss/host.c 	if (copy_to_user(buf, t->area_pd.buf, len))
t                1127 drivers/staging/fieldbus/anybuss/host.c 	ab_task_put(t);
t                 111 drivers/staging/fieldbus/dev_core.c 	const char *t;
t                 115 drivers/staging/fieldbus/dev_core.c 		t = "profinet";
t                 118 drivers/staging/fieldbus/dev_core.c 		t = "unknown";
t                 122 drivers/staging/fieldbus/dev_core.c 	return sprintf(buf, "%s\n", t);
t                 142 drivers/staging/fwserial/fwserial.c 	char t[10];
t                 144 drivers/staging/fwserial/fwserial.c 	snprintf(t, 10, "< %d", 1 << k);
t                 145 drivers/staging/fwserial/fwserial.c 	seq_printf(m, "\n%14s  %6s", " ", t);
t                 504 drivers/staging/fwserial/fwserial.c 	int n, t, c, brk = 0;
t                 513 drivers/staging/fwserial/fwserial.c 		t = min(n, 16);
t                 515 drivers/staging/fwserial/fwserial.c 						      TTY_BREAK, t);
t                 518 drivers/staging/fwserial/fwserial.c 		if (c < t)
t                1750 drivers/staging/fwserial/fwserial.c static void fwserial_plug_timeout(struct timer_list *t)
t                1752 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = from_timer(peer, t, timer);
t                  55 drivers/staging/gdm724x/gdm_mux.c 	struct mux_tx *t;
t                  57 drivers/staging/gdm724x/gdm_mux.c 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
t                  58 drivers/staging/gdm724x/gdm_mux.c 	if (!t)
t                  61 drivers/staging/gdm724x/gdm_mux.c 	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
t                  62 drivers/staging/gdm724x/gdm_mux.c 	t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
t                  63 drivers/staging/gdm724x/gdm_mux.c 	if (!t->urb || !t->buf) {
t                  64 drivers/staging/gdm724x/gdm_mux.c 		usb_free_urb(t->urb);
t                  65 drivers/staging/gdm724x/gdm_mux.c 		kfree(t->buf);
t                  66 drivers/staging/gdm724x/gdm_mux.c 		kfree(t);
t                  70 drivers/staging/gdm724x/gdm_mux.c 	return t;
t                  73 drivers/staging/gdm724x/gdm_mux.c static void free_mux_tx(struct mux_tx *t)
t                  75 drivers/staging/gdm724x/gdm_mux.c 	if (t) {
t                  76 drivers/staging/gdm724x/gdm_mux.c 		usb_free_urb(t->urb);
t                  77 drivers/staging/gdm724x/gdm_mux.c 		kfree(t->buf);
t                  78 drivers/staging/gdm724x/gdm_mux.c 		kfree(t);
t                 328 drivers/staging/gdm724x/gdm_mux.c 	struct mux_tx *t = urb->context;
t                 332 drivers/staging/gdm724x/gdm_mux.c 		free_mux_tx(t);
t                 336 drivers/staging/gdm724x/gdm_mux.c 	if (t->callback)
t                 337 drivers/staging/gdm724x/gdm_mux.c 		t->callback(t->cb_data);
t                 339 drivers/staging/gdm724x/gdm_mux.c 	free_mux_tx(t);
t                 348 drivers/staging/gdm724x/gdm_mux.c 	struct mux_tx *t = NULL;
t                 364 drivers/staging/gdm724x/gdm_mux.c 	t = alloc_mux_tx(total_len);
t                 365 drivers/staging/gdm724x/gdm_mux.c 	if (!t) {
t                 371 drivers/staging/gdm724x/gdm_mux.c 	mux_header = (struct mux_pkt_header *)t->buf;
t                 377 drivers/staging/gdm724x/gdm_mux.c 	memcpy(t->buf + MUX_HEADER_SIZE, data, len);
t                 378 drivers/staging/gdm724x/gdm_mux.c 	memset(t->buf + MUX_HEADER_SIZE + len, 0,
t                 381 drivers/staging/gdm724x/gdm_mux.c 	t->len = total_len;
t                 382 drivers/staging/gdm724x/gdm_mux.c 	t->callback = cb;
t                 383 drivers/staging/gdm724x/gdm_mux.c 	t->cb_data = cb_data;
t                 385 drivers/staging/gdm724x/gdm_mux.c 	usb_fill_bulk_urb(t->urb,
t                 388 drivers/staging/gdm724x/gdm_mux.c 			  t->buf,
t                 391 drivers/staging/gdm724x/gdm_mux.c 			  t);
t                 393 drivers/staging/gdm724x/gdm_mux.c 	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
t                  79 drivers/staging/gdm724x/gdm_usb.c 	struct usb_tx *t = NULL;
t                  82 drivers/staging/gdm724x/gdm_usb.c 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
t                  83 drivers/staging/gdm724x/gdm_usb.c 	if (!t) {
t                  88 drivers/staging/gdm724x/gdm_usb.c 	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
t                  92 drivers/staging/gdm724x/gdm_usb.c 	t->buf = kmalloc(len, GFP_ATOMIC);
t                  93 drivers/staging/gdm724x/gdm_usb.c 	if (!t->urb || !t->buf) {
t                 100 drivers/staging/gdm724x/gdm_usb.c 		if (t) {
t                 101 drivers/staging/gdm724x/gdm_usb.c 			usb_free_urb(t->urb);
t                 102 drivers/staging/gdm724x/gdm_usb.c 			kfree(t->buf);
t                 103 drivers/staging/gdm724x/gdm_usb.c 			kfree(t);
t                 108 drivers/staging/gdm724x/gdm_usb.c 	return t;
t                 128 drivers/staging/gdm724x/gdm_usb.c static void free_tx_struct(struct usb_tx *t)
t                 130 drivers/staging/gdm724x/gdm_usb.c 	if (t) {
t                 131 drivers/staging/gdm724x/gdm_usb.c 		usb_free_urb(t->urb);
t                 132 drivers/staging/gdm724x/gdm_usb.c 		kfree(t->buf);
t                 133 drivers/staging/gdm724x/gdm_usb.c 		kfree(t);
t                 248 drivers/staging/gdm724x/gdm_usb.c 	struct usb_tx	*t, *t_next;
t                 259 drivers/staging/gdm724x/gdm_usb.c 	list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
t                 260 drivers/staging/gdm724x/gdm_usb.c 		list_del(&t->list);
t                 261 drivers/staging/gdm724x/gdm_usb.c 		free_tx_struct(t);
t                 541 drivers/staging/gdm724x/gdm_usb.c 	struct usb_tx *t = urb->context;
t                 542 drivers/staging/gdm724x/gdm_usb.c 	struct tx_cxt *tx = t->tx;
t                 551 drivers/staging/gdm724x/gdm_usb.c 	if (t->callback)
t                 552 drivers/staging/gdm724x/gdm_usb.c 		t->callback(t->cb_data);
t                 554 drivers/staging/gdm724x/gdm_usb.c 	free_tx_struct(t);
t                 562 drivers/staging/gdm724x/gdm_usb.c static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
t                 569 drivers/staging/gdm724x/gdm_usb.c 	usb_fill_bulk_urb(t->urb,
t                 572 drivers/staging/gdm724x/gdm_usb.c 			  t->buf,
t                 575 drivers/staging/gdm724x/gdm_usb.c 			  t);
t                 577 drivers/staging/gdm724x/gdm_usb.c 	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
t                 640 drivers/staging/gdm724x/gdm_usb.c 	struct usb_tx *t = NULL;
t                 659 drivers/staging/gdm724x/gdm_usb.c 		t = list_entry(tx->hci_list.next, struct usb_tx, list);
t                 660 drivers/staging/gdm724x/gdm_usb.c 		list_del(&t->list);
t                 661 drivers/staging/gdm724x/gdm_usb.c 		len = t->len;
t                 662 drivers/staging/gdm724x/gdm_usb.c 		t->is_sdu = 0;
t                 671 drivers/staging/gdm724x/gdm_usb.c 		t = alloc_tx_struct(TX_BUF_SIZE);
t                 672 drivers/staging/gdm724x/gdm_usb.c 		if (!t) {
t                 676 drivers/staging/gdm724x/gdm_usb.c 		t->callback = NULL;
t                 677 drivers/staging/gdm724x/gdm_usb.c 		t->tx = tx;
t                 678 drivers/staging/gdm724x/gdm_usb.c 		t->is_sdu = 1;
t                 689 drivers/staging/gdm724x/gdm_usb.c 	if (t->is_sdu)
t                 690 drivers/staging/gdm724x/gdm_usb.c 		len = packet_aggregation(udev, t->buf);
t                 692 drivers/staging/gdm724x/gdm_usb.c 	if (send_tx_packet(usbdev, t, len)) {
t                 694 drivers/staging/gdm724x/gdm_usb.c 		t->callback = NULL;
t                 695 drivers/staging/gdm724x/gdm_usb.c 		gdm_usb_send_complete(t->urb);
t                 763 drivers/staging/gdm724x/gdm_usb.c 	struct usb_tx *t;
t                 771 drivers/staging/gdm724x/gdm_usb.c 	t = alloc_tx_struct(len);
t                 772 drivers/staging/gdm724x/gdm_usb.c 	if (!t) {
t                 777 drivers/staging/gdm724x/gdm_usb.c 	memcpy(t->buf, data, len);
t                 778 drivers/staging/gdm724x/gdm_usb.c 	t->callback = cb;
t                 779 drivers/staging/gdm724x/gdm_usb.c 	t->cb_data = cb_data;
t                 780 drivers/staging/gdm724x/gdm_usb.c 	t->len = len;
t                 781 drivers/staging/gdm724x/gdm_usb.c 	t->tx = tx;
t                 782 drivers/staging/gdm724x/gdm_usb.c 	t->is_sdu = 0;
t                 785 drivers/staging/gdm724x/gdm_usb.c 	list_add_tail(&t->list, &tx->hci_list);
t                 105 drivers/staging/greybus/tools/loopback_test.c struct loopback_test t;
t                 108 drivers/staging/greybus/tools/loopback_test.c static inline int device_enabled(struct loopback_test *t, int dev_idx);
t                 111 drivers/staging/greybus/tools/loopback_test.c static int get_##field##_aggregate(struct loopback_test *t)		\
t                 115 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {				\
t                 116 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))				\
t                 118 drivers/staging/greybus/tools/loopback_test.c 		if (t->devices[i].results.field > max)			\
t                 119 drivers/staging/greybus/tools/loopback_test.c 			max = t->devices[i].results.field;		\
t                 125 drivers/staging/greybus/tools/loopback_test.c static int get_##field##_aggregate(struct loopback_test *t)		\
t                 129 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {				\
t                 130 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))				\
t                 132 drivers/staging/greybus/tools/loopback_test.c 		if (t->devices[i].results.field < min)			\
t                 133 drivers/staging/greybus/tools/loopback_test.c 			min = t->devices[i].results.field;		\
t                 139 drivers/staging/greybus/tools/loopback_test.c static int get_##field##_aggregate(struct loopback_test *t)		\
t                 144 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {				\
t                 145 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))				\
t                 148 drivers/staging/greybus/tools/loopback_test.c 		val += t->devices[i].results.field;			\
t                 223 drivers/staging/greybus/tools/loopback_test.c static inline int device_enabled(struct loopback_test *t, int dev_idx)
t                 225 drivers/staging/greybus/tools/loopback_test.c 	if (!t->mask || (t->mask & (1 << dev_idx)))
t                 231 drivers/staging/greybus/tools/loopback_test.c static void show_loopback_devices(struct loopback_test *t)
t                 235 drivers/staging/greybus/tools/loopback_test.c 	if (t->device_count == 0) {
t                 240 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++)
t                 241 drivers/staging/greybus/tools/loopback_test.c 		printf("device[%d] = %s\n", i, t->devices[i].name);
t                 323 drivers/staging/greybus/tools/loopback_test.c static int get_results(struct loopback_test *t)
t                 329 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 330 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 333 drivers/staging/greybus/tools/loopback_test.c 		d = &t->devices[i];
t                 374 drivers/staging/greybus/tools/loopback_test.c 	if (t->aggregate_output) {
t                 375 drivers/staging/greybus/tools/loopback_test.c 		r = &t->aggregate_results;
t                 377 drivers/staging/greybus/tools/loopback_test.c 		r->request_min = get_request_min_aggregate(t);
t                 378 drivers/staging/greybus/tools/loopback_test.c 		r->request_max = get_request_max_aggregate(t);
t                 379 drivers/staging/greybus/tools/loopback_test.c 		r->request_avg = get_request_avg_aggregate(t);
t                 381 drivers/staging/greybus/tools/loopback_test.c 		r->latency_min = get_latency_min_aggregate(t);
t                 382 drivers/staging/greybus/tools/loopback_test.c 		r->latency_max = get_latency_max_aggregate(t);
t                 383 drivers/staging/greybus/tools/loopback_test.c 		r->latency_avg = get_latency_avg_aggregate(t);
t                 385 drivers/staging/greybus/tools/loopback_test.c 		r->throughput_min = get_throughput_min_aggregate(t);
t                 386 drivers/staging/greybus/tools/loopback_test.c 		r->throughput_max = get_throughput_max_aggregate(t);
t                 387 drivers/staging/greybus/tools/loopback_test.c 		r->throughput_avg = get_throughput_avg_aggregate(t);
t                 390 drivers/staging/greybus/tools/loopback_test.c 			get_apbridge_unipro_latency_min_aggregate(t);
t                 392 drivers/staging/greybus/tools/loopback_test.c 			get_apbridge_unipro_latency_max_aggregate(t);
t                 394 drivers/staging/greybus/tools/loopback_test.c 			get_apbridge_unipro_latency_avg_aggregate(t);
t                 397 drivers/staging/greybus/tools/loopback_test.c 			get_gbphy_firmware_latency_min_aggregate(t);
t                 399 drivers/staging/greybus/tools/loopback_test.c 			get_gbphy_firmware_latency_max_aggregate(t);
t                 401 drivers/staging/greybus/tools/loopback_test.c 			get_gbphy_firmware_latency_avg_aggregate(t);
t                 416 drivers/staging/greybus/tools/loopback_test.c int format_output(struct loopback_test *t,
t                 429 drivers/staging/greybus/tools/loopback_test.c 	if (t->porcelain) {
t                 432 drivers/staging/greybus/tools/loopback_test.c 			t->test_name,
t                 434 drivers/staging/greybus/tools/loopback_test.c 			t->size,
t                 435 drivers/staging/greybus/tools/loopback_test.c 			t->iteration_max,
t                 437 drivers/staging/greybus/tools/loopback_test.c 			t->use_async ? "Enabled" : "Disabled");
t                 474 drivers/staging/greybus/tools/loopback_test.c 			t->test_name, dev_name, t->size, t->iteration_max,
t                 513 drivers/staging/greybus/tools/loopback_test.c static int log_results(struct loopback_test *t)
t                 530 drivers/staging/greybus/tools/loopback_test.c 	if (t->file_output && !t->porcelain) {
t                 532 drivers/staging/greybus/tools/loopback_test.c 			 t->test_name, t->size, t->iteration_max);
t                 541 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 542 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 545 drivers/staging/greybus/tools/loopback_test.c 		len = format_output(t, &t->devices[i].results,
t                 546 drivers/staging/greybus/tools/loopback_test.c 				    t->devices[i].name,
t                 548 drivers/staging/greybus/tools/loopback_test.c 		if (t->file_output && !t->porcelain) {
t                 557 drivers/staging/greybus/tools/loopback_test.c 	if (t->aggregate_output) {
t                 558 drivers/staging/greybus/tools/loopback_test.c 		len = format_output(t, &t->aggregate_results, "aggregate",
t                 560 drivers/staging/greybus/tools/loopback_test.c 		if (t->file_output && !t->porcelain) {
t                 567 drivers/staging/greybus/tools/loopback_test.c 	if (t->file_output && !t->porcelain)
t                 583 drivers/staging/greybus/tools/loopback_test.c int find_loopback_devices(struct loopback_test *t)
t                 590 drivers/staging/greybus/tools/loopback_test.c 	n = scandir(t->sysfs_prefix, &namelist, NULL, alphasort);
t                 608 drivers/staging/greybus/tools/loopback_test.c 		if (!is_loopback_device(t->sysfs_prefix, namelist[i]->d_name))
t                 611 drivers/staging/greybus/tools/loopback_test.c 		if (t->device_count == MAX_NUM_DEVICES) {
t                 616 drivers/staging/greybus/tools/loopback_test.c 		d = &t->devices[t->device_count++];
t                 620 drivers/staging/greybus/tools/loopback_test.c 			 t->sysfs_prefix, d->name);
t                 623 drivers/staging/greybus/tools/loopback_test.c 			 t->debugfs_prefix, d->name);
t                 625 drivers/staging/greybus/tools/loopback_test.c 		if (t->debug)
t                 638 drivers/staging/greybus/tools/loopback_test.c static int open_poll_files(struct loopback_test *t)
t                 646 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 647 drivers/staging/greybus/tools/loopback_test.c 		dev = &t->devices[i];
t                 649 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 653 drivers/staging/greybus/tools/loopback_test.c 		t->fds[fds_idx].fd = open(buf, O_RDONLY);
t                 654 drivers/staging/greybus/tools/loopback_test.c 		if (t->fds[fds_idx].fd < 0) {
t                 658 drivers/staging/greybus/tools/loopback_test.c 		read(t->fds[fds_idx].fd, &dummy, 1);
t                 659 drivers/staging/greybus/tools/loopback_test.c 		t->fds[fds_idx].events = POLLERR | POLLPRI;
t                 660 drivers/staging/greybus/tools/loopback_test.c 		t->fds[fds_idx].revents = 0;
t                 664 drivers/staging/greybus/tools/loopback_test.c 	t->poll_count = fds_idx;
t                 670 drivers/staging/greybus/tools/loopback_test.c 		close(t->fds[i].fd);
t                 675 drivers/staging/greybus/tools/loopback_test.c static int close_poll_files(struct loopback_test *t)
t                 678 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->poll_count; i++)
t                 679 drivers/staging/greybus/tools/loopback_test.c 		close(t->fds[i].fd);
t                 683 drivers/staging/greybus/tools/loopback_test.c static int is_complete(struct loopback_test *t)
t                 688 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 689 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 692 drivers/staging/greybus/tools/loopback_test.c 		iteration_count = read_sysfs_int(t->devices[i].sysfs_entry,
t                 696 drivers/staging/greybus/tools/loopback_test.c 		if (iteration_count != t->iteration_max)
t                 703 drivers/staging/greybus/tools/loopback_test.c static void stop_tests(struct loopback_test *t)
t                 707 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 708 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 710 drivers/staging/greybus/tools/loopback_test.c 		write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
t                 716 drivers/staging/greybus/tools/loopback_test.c static int wait_for_complete(struct loopback_test *t)
t                 739 drivers/staging/greybus/tools/loopback_test.c 	if (t->poll_timeout.tv_sec != 0)
t                 740 drivers/staging/greybus/tools/loopback_test.c 		ts = &t->poll_timeout;
t                 744 drivers/staging/greybus/tools/loopback_test.c 		ret = ppoll(t->fds, t->poll_count, ts, &mask_old);
t                 746 drivers/staging/greybus/tools/loopback_test.c 			stop_tests(t);
t                 751 drivers/staging/greybus/tools/loopback_test.c 		for (i = 0; i < t->poll_count; i++) {
t                 752 drivers/staging/greybus/tools/loopback_test.c 			if (t->fds[i].revents & POLLPRI) {
t                 754 drivers/staging/greybus/tools/loopback_test.c 				read(t->fds[i].fd, &dummy, 1);
t                 759 drivers/staging/greybus/tools/loopback_test.c 		if (number_of_events == t->poll_count)
t                 763 drivers/staging/greybus/tools/loopback_test.c 	if (!is_complete(t)) {
t                 771 drivers/staging/greybus/tools/loopback_test.c static void prepare_devices(struct loopback_test *t)
t                 779 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++)
t                 780 drivers/staging/greybus/tools/loopback_test.c 		if (t->stop_all || device_enabled(t, i))
t                 781 drivers/staging/greybus/tools/loopback_test.c 			write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
t                 784 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 785 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 788 drivers/staging/greybus/tools/loopback_test.c 		write_sysfs_val(t->devices[i].sysfs_entry, "us_wait",
t                 789 drivers/staging/greybus/tools/loopback_test.c 				t->us_wait);
t                 792 drivers/staging/greybus/tools/loopback_test.c 		write_sysfs_val(t->devices[i].sysfs_entry, "size", t->size);
t                 795 drivers/staging/greybus/tools/loopback_test.c 		write_sysfs_val(t->devices[i].sysfs_entry, "iteration_max",
t                 796 drivers/staging/greybus/tools/loopback_test.c 				t->iteration_max);
t                 798 drivers/staging/greybus/tools/loopback_test.c 		if (t->use_async) {
t                 799 drivers/staging/greybus/tools/loopback_test.c 			write_sysfs_val(t->devices[i].sysfs_entry, "async", 1);
t                 800 drivers/staging/greybus/tools/loopback_test.c 			write_sysfs_val(t->devices[i].sysfs_entry,
t                 801 drivers/staging/greybus/tools/loopback_test.c 					"timeout", t->async_timeout);
t                 802 drivers/staging/greybus/tools/loopback_test.c 			write_sysfs_val(t->devices[i].sysfs_entry,
t                 804 drivers/staging/greybus/tools/loopback_test.c 					t->async_outstanding_operations);
t                 806 drivers/staging/greybus/tools/loopback_test.c 			write_sysfs_val(t->devices[i].sysfs_entry, "async", 0);
t                 810 drivers/staging/greybus/tools/loopback_test.c static int start(struct loopback_test *t)
t                 815 drivers/staging/greybus/tools/loopback_test.c 	for (i = 0; i < t->device_count; i++) {
t                 816 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 819 drivers/staging/greybus/tools/loopback_test.c 		write_sysfs_val(t->devices[i].sysfs_entry, "type", t->test_id);
t                 826 drivers/staging/greybus/tools/loopback_test.c void loopback_run(struct loopback_test *t)
t                 832 drivers/staging/greybus/tools/loopback_test.c 		if (strstr(dict[i].name, t->test_name))
t                 833 drivers/staging/greybus/tools/loopback_test.c 			t->test_id = dict[i].type;
t                 835 drivers/staging/greybus/tools/loopback_test.c 	if (!t->test_id) {
t                 836 drivers/staging/greybus/tools/loopback_test.c 		fprintf(stderr, "invalid test %s\n", t->test_name);
t                 841 drivers/staging/greybus/tools/loopback_test.c 	prepare_devices(t);
t                 843 drivers/staging/greybus/tools/loopback_test.c 	ret = open_poll_files(t);
t                 847 drivers/staging/greybus/tools/loopback_test.c 	start(t);
t                 849 drivers/staging/greybus/tools/loopback_test.c 	ret = wait_for_complete(t);
t                 850 drivers/staging/greybus/tools/loopback_test.c 	close_poll_files(t);
t                 855 drivers/staging/greybus/tools/loopback_test.c 	get_results(t);
t                 857 drivers/staging/greybus/tools/loopback_test.c 	log_results(t);
t                 866 drivers/staging/greybus/tools/loopback_test.c static int sanity_check(struct loopback_test *t)
t                 870 drivers/staging/greybus/tools/loopback_test.c 	if (t->device_count == 0) {
t                 876 drivers/staging/greybus/tools/loopback_test.c 		if (!device_enabled(t, i))
t                 879 drivers/staging/greybus/tools/loopback_test.c 		if (t->mask && !strcmp(t->devices[i].name, "")) {
t                 896 drivers/staging/greybus/tools/loopback_test.c 	memset(&t, 0, sizeof(t));
t                 902 drivers/staging/greybus/tools/loopback_test.c 			snprintf(t.test_name, MAX_STR_LEN, "%s", optarg);
t                 905 drivers/staging/greybus/tools/loopback_test.c 			t.size = atoi(optarg);
t                 908 drivers/staging/greybus/tools/loopback_test.c 			t.iteration_max = atoi(optarg);
t                 911 drivers/staging/greybus/tools/loopback_test.c 			snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
t                 914 drivers/staging/greybus/tools/loopback_test.c 			snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
t                 917 drivers/staging/greybus/tools/loopback_test.c 			t.mask = atol(optarg);
t                 920 drivers/staging/greybus/tools/loopback_test.c 			t.verbose = 1;
t                 923 drivers/staging/greybus/tools/loopback_test.c 			t.debug = 1;
t                 926 drivers/staging/greybus/tools/loopback_test.c 			t.raw_data_dump = 1;
t                 929 drivers/staging/greybus/tools/loopback_test.c 			t.porcelain = 1;
t                 932 drivers/staging/greybus/tools/loopback_test.c 			t.aggregate_output = 1;
t                 935 drivers/staging/greybus/tools/loopback_test.c 			t.list_devices = 1;
t                 938 drivers/staging/greybus/tools/loopback_test.c 			t.use_async = 1;
t                 941 drivers/staging/greybus/tools/loopback_test.c 			t.async_timeout = atoi(optarg);
t                 944 drivers/staging/greybus/tools/loopback_test.c 			t.poll_timeout.tv_sec = atoi(optarg);
t                 947 drivers/staging/greybus/tools/loopback_test.c 			t.async_outstanding_operations = atoi(optarg);
t                 950 drivers/staging/greybus/tools/loopback_test.c 			t.us_wait = atoi(optarg);
t                 953 drivers/staging/greybus/tools/loopback_test.c 			t.file_output = 1;
t                 956 drivers/staging/greybus/tools/loopback_test.c 			t.stop_all = 1;
t                 964 drivers/staging/greybus/tools/loopback_test.c 	if (!strcmp(t.sysfs_prefix, ""))
t                 965 drivers/staging/greybus/tools/loopback_test.c 		snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix);
t                 967 drivers/staging/greybus/tools/loopback_test.c 	if (!strcmp(t.debugfs_prefix, ""))
t                 968 drivers/staging/greybus/tools/loopback_test.c 		snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix);
t                 970 drivers/staging/greybus/tools/loopback_test.c 	ret = find_loopback_devices(&t);
t                 973 drivers/staging/greybus/tools/loopback_test.c 	ret = sanity_check(&t);
t                 977 drivers/staging/greybus/tools/loopback_test.c 	if (t.list_devices) {
t                 978 drivers/staging/greybus/tools/loopback_test.c 		show_loopback_devices(&t);
t                 982 drivers/staging/greybus/tools/loopback_test.c 	if (t.test_name[0] == '\0' || t.iteration_max == 0)
t                 985 drivers/staging/greybus/tools/loopback_test.c 	if (t.async_timeout == 0)
t                 986 drivers/staging/greybus/tools/loopback_test.c 		t.async_timeout = DEFAULT_ASYNC_TIMEOUT;
t                 988 drivers/staging/greybus/tools/loopback_test.c 	loopback_run(&t);
t                 189 drivers/staging/iio/adc/ad7280a.c 	struct spi_transfer t = {
t                 197 drivers/staging/iio/adc/ad7280a.c 	ret = spi_sync_transfer(st->spi, &t, 1);
t                 146 drivers/staging/isdn/avm/c4.c static inline int wait_for_doorbell(avmcard *card, unsigned long t)
t                 150 drivers/staging/isdn/avm/c4.c 	stop = jiffies + t;
t                 434 drivers/staging/isdn/gigaset/bas-gigaset.c static void cmd_in_timeout(struct timer_list *t)
t                 436 drivers/staging/isdn/gigaset/bas-gigaset.c 	struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in);
t                 640 drivers/staging/isdn/gigaset/bas-gigaset.c static void int_in_resubmit(struct timer_list *t)
t                 642 drivers/staging/isdn/gigaset/bas-gigaset.c 	struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in);
t                1444 drivers/staging/isdn/gigaset/bas-gigaset.c static void req_timeout(struct timer_list *t)
t                1446 drivers/staging/isdn/gigaset/bas-gigaset.c 	struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl);
t                1840 drivers/staging/isdn/gigaset/bas-gigaset.c static void atrdy_timeout(struct timer_list *t)
t                1842 drivers/staging/isdn/gigaset/bas-gigaset.c 	struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy);
t                 153 drivers/staging/isdn/gigaset/common.c static void timer_tick(struct timer_list *t)
t                 155 drivers/staging/isdn/gigaset/common.c 	struct cardstate *cs = from_timer(cs, t, timer);
t                 296 drivers/staging/media/imx/imx-ic-prpencvf.c static void prp_eof_timeout(struct timer_list *t)
t                 298 drivers/staging/media/imx/imx-ic-prpencvf.c 	struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
t                 356 drivers/staging/media/imx/imx-media-csi.c static void csi_idmac_eof_timeout(struct timer_list *t)
t                 358 drivers/staging/media/imx/imx-media-csi.c 	struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
t                  75 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t)	SHIFT_AND_MASK_BITS(t, 30, 28)
t                 765 drivers/staging/most/usb/usb.c static void link_stat_timer_handler(struct timer_list *t)
t                 767 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
t                1848 drivers/staging/octeon-usb/octeon-hcd.c 		struct cvmx_usb_transaction *t =
t                1849 drivers/staging/octeon-usb/octeon-hcd.c 			list_first_entry(&pipe->transactions, typeof(*t),
t                1851 drivers/staging/octeon-usb/octeon-hcd.c 		if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
t                1856 drivers/staging/octeon-usb/octeon-hcd.c 		    (!usb->active_split || (usb->active_split == t))) {
t                1857 drivers/staging/octeon-usb/octeon-hcd.c 			prefetch(t);
t                  96 drivers/staging/octeon/ethernet-tx.c 				struct sk_buff *t;
t                  98 drivers/staging/octeon/ethernet-tx.c 				t = __skb_dequeue(&priv->tx_free_list[qos]);
t                  99 drivers/staging/octeon/ethernet-tx.c 				t->next = to_free_list;
t                 100 drivers/staging/octeon/ethernet-tx.c 				to_free_list = t;
t                 107 drivers/staging/octeon/ethernet-tx.c 				struct sk_buff *t = to_free_list;
t                 110 drivers/staging/octeon/ethernet-tx.c 				dev_kfree_skb_any(t);
t                 460 drivers/staging/octeon/ethernet-tx.c 		struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
t                 462 drivers/staging/octeon/ethernet-tx.c 		t->next = to_free_list;
t                 463 drivers/staging/octeon/ethernet-tx.c 		to_free_list = t;
t                 471 drivers/staging/octeon/ethernet-tx.c 		struct sk_buff *t = to_free_list;
t                 474 drivers/staging/octeon/ethernet-tx.c 		dev_kfree_skb_any(t);
t                4725 drivers/staging/qlge/qlge_main.c static void ql_timer(struct timer_list *t)
t                4727 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = from_timer(qdev, t, timer);
t                  16 drivers/staging/rtl8188eu/core/rtw_led.c static void BlinkTimerCallback(struct timer_list *t)
t                  18 drivers/staging/rtl8188eu/core/rtw_led.c 	struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
t                1299 drivers/staging/rtl8188eu/core/rtw_mlme.c void _rtw_join_timeout_handler (struct timer_list *t)
t                1301 drivers/staging/rtl8188eu/core/rtw_mlme.c 	struct adapter *adapter = from_timer(adapter, t, mlmepriv.assoc_timer);
t                1339 drivers/staging/rtl8188eu/core/rtw_mlme.c void rtw_scan_timeout_handler (struct timer_list *t)
t                1341 drivers/staging/rtl8188eu/core/rtw_mlme.c 	struct adapter *adapter = from_timer(adapter, t,
t                1367 drivers/staging/rtl8188eu/core/rtw_mlme.c void rtw_dynamic_check_timer_handlder(struct timer_list *t)
t                1369 drivers/staging/rtl8188eu/core/rtw_mlme.c 	struct adapter *adapter = from_timer(adapter, t,
t                4743 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c void survey_timer_hdl(struct timer_list *t)
t                4745 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	struct adapter *padapter = from_timer(padapter, t,
t                4783 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c void link_timer_hdl(struct timer_list *t)
t                4785 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	struct adapter *padapter = from_timer(padapter, t,
t                4820 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c void addba_timer_hdl(struct timer_list *t)
t                4822 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
t                 267 drivers/staging/rtl8188eu/core/rtw_pwrctrl.c static void pwr_state_check_handler(struct timer_list *t)
t                 270 drivers/staging/rtl8188eu/core/rtw_pwrctrl.c 		from_timer(padapter, t,
t                  34 drivers/staging/rtl8188eu/core/rtw_recv.c static void rtw_signal_stat_timer_hdl(struct timer_list *t);
t                1821 drivers/staging/rtl8188eu/core/rtw_recv.c void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
t                1823 drivers/staging/rtl8188eu/core/rtw_recv.c 	struct recv_reorder_ctrl *preorder_ctrl = from_timer(preorder_ctrl, t,
t                2005 drivers/staging/rtl8188eu/core/rtw_recv.c static void rtw_signal_stat_timer_hdl(struct timer_list *t)
t                2008 drivers/staging/rtl8188eu/core/rtw_recv.c 		from_timer(adapter, t, recvpriv.signal_stat_timer);
t                  27 drivers/staging/rtl8188eu/core/rtw_security.c 	u32	t, u;
t                  41 drivers/staging/rtl8188eu/core/rtw_security.c 		t = state[counter];
t                  42 drivers/staging/rtl8188eu/core/rtw_security.c 		stateindex = (stateindex + key[keyindex] + t) & 0xff;
t                  44 drivers/staging/rtl8188eu/core/rtw_security.c 		state[stateindex] = (u8)t;
t                 950 drivers/staging/rtl8188eu/hal/phy.c 			     u8 t, bool is2t)
t                 983 drivers/staging/rtl8188eu/hal/phy.c 	if (t == 0) {
t                 994 drivers/staging/rtl8188eu/hal/phy.c 	if (t == 0)
t                1040 drivers/staging/rtl8188eu/hal/phy.c 				result[t][0] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_A,
t                1042 drivers/staging/rtl8188eu/hal/phy.c 				result[t][1] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_A,
t                1051 drivers/staging/rtl8188eu/hal/phy.c 				result[t][2] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_A_2,
t                1053 drivers/staging/rtl8188eu/hal/phy.c 				result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
t                1076 drivers/staging/rtl8188eu/hal/phy.c 				result[t][4] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_B,
t                1078 drivers/staging/rtl8188eu/hal/phy.c 				result[t][5] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_B,
t                1080 drivers/staging/rtl8188eu/hal/phy.c 				result[t][6] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_B_2,
t                1082 drivers/staging/rtl8188eu/hal/phy.c 				result[t][7] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_B_2,
t                1086 drivers/staging/rtl8188eu/hal/phy.c 				result[t][4] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_B,
t                1088 drivers/staging/rtl8188eu/hal/phy.c 				result[t][5] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_B,
t                1102 drivers/staging/rtl8188eu/hal/phy.c 	if (t != 0) {
t                 336 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 	int t, sz, w_sz, pull = 0;
t                 353 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 	for (t = 0; t < pattrib->nr_frags; t++) {
t                 357 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 		if (t != (pattrib->nr_frags - 1)) {
t                 325 drivers/staging/rtl8188eu/include/rtw_mlme.h void _rtw_join_timeout_handler(struct timer_list *t);
t                 326 drivers/staging/rtl8188eu/include/rtw_mlme.h void rtw_scan_timeout_handler(struct timer_list *t);
t                 328 drivers/staging/rtl8188eu/include/rtw_mlme.h void rtw_dynamic_check_timer_handlder(struct timer_list *t);
t                 561 drivers/staging/rtl8188eu/include/rtw_mlme_ext.h void survey_timer_hdl(struct timer_list *t);
t                 562 drivers/staging/rtl8188eu/include/rtw_mlme_ext.h void link_timer_hdl(struct timer_list *t);
t                 563 drivers/staging/rtl8188eu/include/rtw_mlme_ext.h void addba_timer_hdl(struct timer_list *t);
t                 244 drivers/staging/rtl8188eu/include/rtw_recv.h void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
t                  77 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static void _rtl92e_watchdog_timer_cb(struct timer_list *t);
t                1520 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
t                1522 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
t                2629 drivers/staging/rtl8192e/rtl8192e/rtl_core.c void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t)
t                2631 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct r8192_priv *priv = from_timer(priv, t, gpio_polling_timer);
t                 578 drivers/staging/rtl8192e/rtl8192e/rtl_core.h void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t);
t                 191 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
t                2125 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
t                2127 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 	struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
t                 523 drivers/staging/rtl8192e/rtl819x_BAProc.c void BaSetupTimeOut(struct timer_list *t)
t                 525 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
t                 533 drivers/staging/rtl8192e/rtl819x_BAProc.c void TxBaInactTimeout(struct timer_list *t)
t                 535 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
t                 545 drivers/staging/rtl8192e/rtl819x_BAProc.c void RxBaInactTimeout(struct timer_list *t)
t                 547 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
t                  19 drivers/staging/rtl8192e/rtl819x_TSProc.c static void RxPktPendingTimeout(struct timer_list *t)
t                  21 drivers/staging/rtl8192e/rtl819x_TSProc.c 	struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
t                  92 drivers/staging/rtl8192e/rtl819x_TSProc.c static void TsAddBaProcess(struct timer_list *t)
t                  94 drivers/staging/rtl8192e/rtl819x_TSProc.c 	struct tx_ts_record *pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
t                2111 drivers/staging/rtl8192e/rtllib.h void BaSetupTimeOut(struct timer_list *t);
t                2112 drivers/staging/rtl8192e/rtllib.h void TxBaInactTimeout(struct timer_list *t);
t                2113 drivers/staging/rtl8192e/rtllib.h void RxBaInactTimeout(struct timer_list *t);
t                 201 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	u16 t = Sbox[Hi8(v)];
t                 202 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
t                 393 drivers/staging/rtl8192e/rtllib_softmac.c static void rtllib_send_beacon_cb(struct timer_list *t)
t                 396 drivers/staging/rtl8192e/rtllib_softmac.c 		from_timer(ieee, t, beacon_timer);
t                1422 drivers/staging/rtl8192e/rtllib_softmac.c static void rtllib_associate_abort_cb(struct timer_list *t)
t                1424 drivers/staging/rtl8192e/rtllib_softmac.c 	struct rtllib_device *dev = from_timer(dev, t, associate_timer);
t                1773 drivers/staging/rtl8192e/rtllib_softmac.c 	u8 *t;
t                1783 drivers/staging/rtl8192e/rtllib_softmac.c 		t = skb->data + sizeof(struct rtllib_authentication);
t                1785 drivers/staging/rtl8192e/rtllib_softmac.c 		if (*(t++) == MFIE_TYPE_CHALLENGE) {
t                1786 drivers/staging/rtl8192e/rtllib_softmac.c 			*chlen = *(t++);
t                1787 drivers/staging/rtl8192e/rtllib_softmac.c 			*challenge = kmemdup(t, *chlen, GFP_ATOMIC);
t                2387 drivers/staging/rtl8192u/ieee80211/ieee80211.h void BaSetupTimeOut(struct timer_list *t);
t                2388 drivers/staging/rtl8192u/ieee80211/ieee80211.h void TxBaInactTimeout(struct timer_list *t);
t                2389 drivers/staging/rtl8192u/ieee80211/ieee80211.h void RxBaInactTimeout(struct timer_list *t);
t                  55 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c void ieee80211_crypt_deinit_handler(struct timer_list *t)
t                  57 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c 	struct ieee80211_device *ieee = from_timer(ieee, t, crypt_deinit_timer);
t                  82 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h void ieee80211_crypt_deinit_handler(struct timer_list *t);
t                 204 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	u16 t = Sbox[Hi8(v)];
t                 205 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
t                 377 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_send_beacon_cb(struct timer_list *t)
t                 380 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		from_timer(ieee, t, beacon_timer);
t                1187 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void ieee80211_associate_abort_cb(struct timer_list *t)
t                1189 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
t                1463 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	u8 *t;
t                1471 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		t = skb->data + sizeof(struct ieee80211_authentication);
t                1473 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		if (*(t++) == MFIE_TYPE_CHALLENGE) {
t                1474 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			*chlen = *(t++);
t                1475 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			*challenge = kmemdup(t, *chlen, GFP_ATOMIC);
t                 666 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c void BaSetupTimeOut(struct timer_list *t)
t                 668 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct tx_ts_record *pTxTs = from_timer(pTxTs, t, tx_pending_ba_record.timer);
t                 675 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c void TxBaInactTimeout(struct timer_list *t)
t                 677 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct tx_ts_record *pTxTs = from_timer(pTxTs, t, tx_admitted_ba_record.timer);
t                 688 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c void RxBaInactTimeout(struct timer_list *t)
t                 690 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct rx_ts_record *pRxTs = from_timer(pRxTs, t, rx_admitted_ba_record.timer);
t                  26 drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c static void RxPktPendingTimeout(struct timer_list *t)
t                  28 drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c 	struct rx_ts_record     *pRxTs = from_timer(pRxTs, t, rx_pkt_pending_timer);
t                  93 drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c static void TsAddBaProcess(struct timer_list *t)
t                  95 drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c 	struct tx_ts_record *pTxTs = from_timer(pTxTs, t, ts_add_ba_timer);
t                 470 drivers/staging/rtl8192u/r8192U_core.c static void watch_dog_timer_callback(struct timer_list *t);
t                3341 drivers/staging/rtl8192u/r8192U_core.c static void watch_dog_timer_callback(struct timer_list *t)
t                3343 drivers/staging/rtl8192u/r8192U_core.c 	struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
t                2598 drivers/staging/rtl8192u/r8192U_dm.c void dm_fsync_timer_callback(struct timer_list *t)
t                2600 drivers/staging/rtl8192u/r8192U_dm.c 	struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
t                 169 drivers/staging/rtl8192u/r8192U_dm.h void dm_fsync_timer_callback(struct timer_list *t);
t                  23 drivers/staging/rtl8712/mlme_linux.c static void sitesurvey_ctrl_handler(struct timer_list *t)
t                  26 drivers/staging/rtl8712/mlme_linux.c 		from_timer(adapter, t,
t                  34 drivers/staging/rtl8712/mlme_linux.c static void join_timeout_handler (struct timer_list *t)
t                  37 drivers/staging/rtl8712/mlme_linux.c 		from_timer(adapter, t, mlmepriv.assoc_timer);
t                  42 drivers/staging/rtl8712/mlme_linux.c static void _scan_timeout_handler (struct timer_list *t)
t                  45 drivers/staging/rtl8712/mlme_linux.c 		from_timer(adapter, t, mlmepriv.scan_to_timer);
t                  50 drivers/staging/rtl8712/mlme_linux.c static void dhcp_timeout_handler (struct timer_list *t)
t                  53 drivers/staging/rtl8712/mlme_linux.c 		from_timer(adapter, t, mlmepriv.dhcp_timer);
t                  58 drivers/staging/rtl8712/mlme_linux.c static void wdg_timeout_handler (struct timer_list *t)
t                  61 drivers/staging/rtl8712/mlme_linux.c 		from_timer(adapter, t, mlmepriv.wdg_timer);
t                 127 drivers/staging/rtl8712/recv_linux.c static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
t                 130 drivers/staging/rtl8712/recv_linux.c 			 from_timer(reorder_ctrl, t, reordering_ctrl_timer);
t                  65 drivers/staging/rtl8712/rtl8712_led.c static void BlinkTimerCallback(struct timer_list *t);
t                 812 drivers/staging/rtl8712/rtl8712_led.c static void BlinkTimerCallback(struct timer_list *t)
t                 814 drivers/staging/rtl8712/rtl8712_led.c 	struct LED_871x  *pLed = from_timer(pLed, t, BlinkTimer);
t                 690 drivers/staging/rtl8712/rtl8712_xmit.c 	int t, sz, w_sz;
t                 702 drivers/staging/rtl8712/rtl8712_xmit.c 	for (t = 0; t < pattrib->nr_frags; t++) {
t                 703 drivers/staging/rtl8712/rtl8712_xmit.c 		if (t != (pattrib->nr_frags - 1)) {
t                 707 drivers/staging/rtl8712/rtl8712_xmit.c 			pxmitframe->last[t] = 0;
t                 710 drivers/staging/rtl8712/rtl8712_xmit.c 			pxmitframe->last[t] = 1;
t                 715 drivers/staging/rtl8712/rtl8712_xmit.c 		pxmitframe->bpending[t] = false;
t                 155 drivers/staging/rtl8712/rtl871x_pwrctrl.c static void rpwm_check_handler (struct timer_list *t)
t                 158 drivers/staging/rtl8712/rtl871x_pwrctrl.c 		from_timer(adapter, t, pwrctrlpriv.rpwm_check_timer);
t                  49 drivers/staging/rtl8712/rtl871x_security.c 	u32	t, u;
t                  63 drivers/staging/rtl8712/rtl871x_security.c 		t = state[counter];
t                  64 drivers/staging/rtl8712/rtl871x_security.c 		stateindex = (stateindex + key[keyindex] + t) & 0xff;
t                  66 drivers/staging/rtl8712/rtl871x_security.c 		state[stateindex] = (u8)t;
t                1381 drivers/staging/rtl8712/rtl871x_security.c void r8712_use_tkipkey_handler(struct timer_list *t)
t                1384 drivers/staging/rtl8712/rtl871x_security.c 		from_timer(padapter, t, securitypriv.tkip_timer);
t                 215 drivers/staging/rtl8712/rtl871x_security.h void r8712_use_tkipkey_handler(struct timer_list *t);
t                1763 drivers/staging/rtl8723bs/core/rtw_mlme.c void _rtw_join_timeout_handler(struct timer_list *t)
t                1765 drivers/staging/rtl8723bs/core/rtw_mlme.c 	struct adapter *adapter = from_timer(adapter, t,
t                1811 drivers/staging/rtl8723bs/core/rtw_mlme.c void rtw_scan_timeout_handler(struct timer_list *t)
t                1813 drivers/staging/rtl8723bs/core/rtw_mlme.c 	struct adapter *adapter = from_timer(adapter, t,
t                5775 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c void survey_timer_hdl(struct timer_list *t)
t                5778 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		from_timer(padapter, t, mlmeextpriv.survey_timer);
t                5824 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c void link_timer_hdl(struct timer_list *t)
t                5827 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		from_timer(padapter, t, mlmeextpriv.link_timer);
t                5876 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c void addba_timer_hdl(struct timer_list *t)
t                5878 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
t                5893 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c void sa_query_timer_hdl(struct timer_list *t)
t                5896 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		from_timer(padapter, t, mlmeextpriv.sa_query_timer);
t                 196 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c static void pwr_state_check_handler(struct timer_list *t)
t                 199 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c 		from_timer(pwrctrlpriv, t, pwr_state_check_timer);
t                 810 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c static void pwr_rpwm_timeout_handler(struct timer_list *t)
t                 812 drivers/staging/rtl8723bs/core/rtw_pwrctrl.c 	struct pwrctrl_priv *pwrpriv = from_timer(pwrpriv, t, pwr_rpwm_timer);
t                  21 drivers/staging/rtl8723bs/core/rtw_recv.c static void rtw_signal_stat_timer_hdl(struct timer_list *t);
t                2348 drivers/staging/rtl8723bs/core/rtw_recv.c void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
t                2351 drivers/staging/rtl8723bs/core/rtw_recv.c 		from_timer(preorder_ctrl, t, reordering_ctrl_timer);
t                2592 drivers/staging/rtl8723bs/core/rtw_recv.c static void rtw_signal_stat_timer_hdl(struct timer_list *t)
t                2595 drivers/staging/rtl8723bs/core/rtw_recv.c 		from_timer(adapter, t, recvpriv.signal_stat_timer);
t                 100 drivers/staging/rtl8723bs/core/rtw_security.c 	u32 t, u;
t                 114 drivers/staging/rtl8723bs/core/rtw_security.c 		t = state[counter];
t                 115 drivers/staging/rtl8723bs/core/rtw_security.c 		stateindex = (stateindex + key[keyindex] + t) & 0xff;
t                 117 drivers/staging/rtl8723bs/core/rtw_security.c 		state[stateindex] = (u8)t;
t                2237 drivers/staging/rtl8723bs/core/rtw_security.c 		ROUND(1, t, s);
t                2241 drivers/staging/rtl8723bs/core/rtw_security.c 		ROUND(0, s, t);
t                1489 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 	u8 t,
t                1545 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 	if (t == 0) {
t                1549 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 			ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
t                1556 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 	ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
t                1603 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
t                1604 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
t                1616 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][2] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
t                1617 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][3] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
t                1640 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][4] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
t                1641 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][5] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
t                1653 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][6] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
t                1654 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 				result[t][7] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
t                1671 drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c 	if (t != 0) {
t                 484 drivers/staging/rtl8723bs/include/rtw_mlme.h extern void rtw_join_timeout_handler(struct timer_list *t);
t                 485 drivers/staging/rtl8723bs/include/rtw_mlme.h extern void _rtw_scan_timeout_handler(struct timer_list *t);
t                 584 drivers/staging/rtl8723bs/include/rtw_mlme.h extern void _rtw_join_timeout_handler(struct timer_list *t);
t                 585 drivers/staging/rtl8723bs/include/rtw_mlme.h extern void rtw_scan_timeout_handler(struct timer_list *t);
t                 712 drivers/staging/rtl8723bs/include/rtw_mlme_ext.h void survey_timer_hdl (struct timer_list *t);
t                 713 drivers/staging/rtl8723bs/include/rtw_mlme_ext.h void link_timer_hdl (struct timer_list *t);
t                 714 drivers/staging/rtl8723bs/include/rtw_mlme_ext.h void addba_timer_hdl(struct timer_list *t);
t                 715 drivers/staging/rtl8723bs/include/rtw_mlme_ext.h void sa_query_timer_hdl(struct timer_list *t);
t                 406 drivers/staging/rtl8723bs/include/rtw_recv.h void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
t                  14 drivers/staging/rtl8723bs/os_dep/mlme_linux.c static void _dynamic_check_timer_handler(struct timer_list *t)
t                  17 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 		from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
t                  24 drivers/staging/rtl8723bs/os_dep/mlme_linux.c static void _rtw_set_scan_deny_timer_hdl(struct timer_list *t)
t                  27 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 		from_timer(adapter, t, mlmepriv.set_scan_deny_timer);
t                 275 drivers/staging/speakup/speakup_dtlk.c 	u_char *t;
t                 288 drivers/staging/speakup/speakup_dtlk.c 	t = buf;
t                 290 drivers/staging/speakup/speakup_dtlk.c 	status.serial_number = t[0] + t[1] * 256;
t                 291 drivers/staging/speakup/speakup_dtlk.c 	t += 2;
t                 292 drivers/staging/speakup/speakup_dtlk.c 	for (i = 0; *t != '\r'; t++) {
t                 293 drivers/staging/speakup/speakup_dtlk.c 		status.rom_version[i] = *t;
t                 298 drivers/staging/speakup/speakup_dtlk.c 	t++;
t                 299 drivers/staging/speakup/speakup_dtlk.c 	status.mode = *t++;
t                 300 drivers/staging/speakup/speakup_dtlk.c 	status.punc_level = *t++;
t                 301 drivers/staging/speakup/speakup_dtlk.c 	status.formant_freq = *t++;
t                 302 drivers/staging/speakup/speakup_dtlk.c 	status.pitch = *t++;
t                 303 drivers/staging/speakup/speakup_dtlk.c 	status.speed = *t++;
t                 304 drivers/staging/speakup/speakup_dtlk.c 	status.volume = *t++;
t                 305 drivers/staging/speakup/speakup_dtlk.c 	status.tone = *t++;
t                 306 drivers/staging/speakup/speakup_dtlk.c 	status.expression = *t++;
t                 307 drivers/staging/speakup/speakup_dtlk.c 	status.ext_dict_loaded = *t++;
t                 308 drivers/staging/speakup/speakup_dtlk.c 	status.ext_dict_status = *t++;
t                 309 drivers/staging/speakup/speakup_dtlk.c 	status.free_ram = *t++;
t                 310 drivers/staging/speakup/speakup_dtlk.c 	status.articulation = *t++;
t                 311 drivers/staging/speakup/speakup_dtlk.c 	status.reverb = *t++;
t                 312 drivers/staging/speakup/speakup_dtlk.c 	status.eob = *t++;
t                 130 drivers/staging/speakup/speakup_ltlk.c 	unsigned char *t, i;
t                 139 drivers/staging/speakup/speakup_ltlk.c 	t = buf + 2;
t                 140 drivers/staging/speakup/speakup_ltlk.c 	for (i = 0; *t != '\r'; t++) {
t                 141 drivers/staging/speakup/speakup_ltlk.c 		rom_v[i] = *t;
t                1759 drivers/staging/unisys/visornic/visornic_main.c static void poll_for_irq(struct timer_list *t)
t                1761 drivers/staging/unisys/visornic/visornic_main.c 	struct visornic_devdata *devdata = from_timer(devdata, t,
t                 595 drivers/staging/uwb/drp.c static void uwb_cnflt_timer(struct timer_list *t)
t                 597 drivers/staging/uwb/drp.c 	struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
t                 104 drivers/staging/uwb/neh.c static void uwb_rc_neh_timer(struct timer_list *t);
t                 554 drivers/staging/uwb/neh.c static void uwb_rc_neh_timer(struct timer_list *t)
t                 556 drivers/staging/uwb/neh.c 	struct uwb_rc_neh *neh = from_timer(neh, t, timer);
t                  15 drivers/staging/uwb/rsv.c static void uwb_rsv_timer(struct timer_list *t);
t                 190 drivers/staging/uwb/rsv.c void uwb_rsv_backoff_win_timer(struct timer_list *t)
t                 192 drivers/staging/uwb/rsv.c 	struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
t                 833 drivers/staging/uwb/rsv.c 	struct uwb_rsv *rsv, *t;
t                 836 drivers/staging/uwb/rsv.c 	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
t                 931 drivers/staging/uwb/rsv.c static void uwb_rsv_timer(struct timer_list *t)
t                 933 drivers/staging/uwb/rsv.c 	struct uwb_rsv *rsv = from_timer(rsv, t, timer);
t                 946 drivers/staging/uwb/rsv.c 	struct uwb_rsv *rsv, *t;
t                 949 drivers/staging/uwb/rsv.c 	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
t                 962 drivers/staging/uwb/rsv.c 	list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
t                 307 drivers/staging/uwb/uwb-debug.c 	struct uwb_rsv *rsv, *t;
t                 312 drivers/staging/uwb/uwb-debug.c 	list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
t                 319 drivers/staging/uwb/uwb-internal.h void uwb_rsv_backoff_win_timer(struct timer_list *t);
t                  65 drivers/staging/uwb/whci.c 	unsigned t = 0;
t                  71 drivers/staging/uwb/whci.c 		if (t >= max_ms) {
t                  76 drivers/staging/uwb/whci.c 		t += 10;
t                  82 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c static void suspend_timer_callback(struct timer_list *t);
t                2961 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c static void suspend_timer_callback(struct timer_list *t)
t                2964 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c 					from_timer(arm_state, t, suspend_timer);
t                 987 drivers/staging/wilc1000/wilc_hif.c static void listen_timer_cb(struct timer_list *t)
t                 989 drivers/staging/wilc1000/wilc_hif.c 	struct host_if_drv *hif_drv = from_timer(hif_drv, t,
t                1065 drivers/staging/wilc1000/wilc_hif.c static void timer_scan_cb(struct timer_list *t)
t                1067 drivers/staging/wilc1000/wilc_hif.c 	struct host_if_drv *hif_drv = from_timer(hif_drv, t, scan_timer);
t                1081 drivers/staging/wilc1000/wilc_hif.c static void timer_connect_cb(struct timer_list *t)
t                1083 drivers/staging/wilc1000/wilc_hif.c 	struct host_if_drv *hif_drv = from_timer(hif_drv, t,
t                1542 drivers/staging/wilc1000/wilc_hif.c static void get_periodic_rssi(struct timer_list *t)
t                1544 drivers/staging/wilc1000/wilc_hif.c 	struct wilc_vif *vif = from_timer(vif, t, periodic_rssi);
t                 188 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t);
t                 190 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
t                 192 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usb_throttlefn(struct timer_list *t);
t                3681 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t)
t                3683 drivers/staging/wlan-ng/hfa384x_usb.c 	struct hfa384x *hw = from_timer(hw, t, reqtimer);
t                3740 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usbctlx_resptimerfn(struct timer_list *t)
t                3742 drivers/staging/wlan-ng/hfa384x_usb.c 	struct hfa384x *hw = from_timer(hw, t, resptimer);
t                3780 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usb_throttlefn(struct timer_list *t)
t                3782 drivers/staging/wlan-ng/hfa384x_usb.c 	struct hfa384x *hw = from_timer(hw, t, throttle);
t                 168 drivers/staging/wlan-ng/p80211types.h #define P80211DID_MKID(s, g, i, n, t, a)	(P80211DID_MKSECTION(s) | \
t                 172 drivers/staging/wlan-ng/p80211types.h 					P80211DID_MKISTABLE(t) | \
t                 113 drivers/staging/wlan-ng/prism2mgmt.h void prism2sta_commsqual_timer(struct timer_list *t);
t                1998 drivers/staging/wlan-ng/prism2sta.c void prism2sta_commsqual_timer(struct timer_list *t)
t                2000 drivers/staging/wlan-ng/prism2sta.c 	struct hfa384x *hw = from_timer(hw, t, commsqual_timer);
t                 166 drivers/staging/wusbcore/host/whci/asl.c 	long t;
t                 171 drivers/staging/wusbcore/host/whci/asl.c 		t = wait_event_timeout(
t                 175 drivers/staging/wusbcore/host/whci/asl.c 		if (t == 0)
t                 192 drivers/staging/wusbcore/host/whci/asl.c 	struct whc_qset *qset, *t;
t                 201 drivers/staging/wusbcore/host/whci/asl.c 	list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
t                 228 drivers/staging/wusbcore/host/whci/asl.c 	list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
t                 299 drivers/staging/wusbcore/host/whci/asl.c 	struct whc_std *std, *t;
t                 310 drivers/staging/wusbcore/host/whci/asl.c 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
t                  41 drivers/staging/wusbcore/host/whci/hw.c 	int t;
t                  47 drivers/staging/wusbcore/host/whci/hw.c 	t = wait_event_timeout(whc->cmd_wq,
t                  50 drivers/staging/wusbcore/host/whci/hw.c 	if (t == 0) {
t                 179 drivers/staging/wusbcore/host/whci/pzl.c 	long t;
t                 184 drivers/staging/wusbcore/host/whci/pzl.c 		t = wait_event_timeout(
t                 188 drivers/staging/wusbcore/host/whci/pzl.c 		if (t == 0)
t                 196 drivers/staging/wusbcore/host/whci/pzl.c 	struct whc_qset *qset, *t;
t                 201 drivers/staging/wusbcore/host/whci/pzl.c 		list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
t                 221 drivers/staging/wusbcore/host/whci/pzl.c 	struct whc_qset *qset, *t;
t                 228 drivers/staging/wusbcore/host/whci/pzl.c 		list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
t                 256 drivers/staging/wusbcore/host/whci/pzl.c 	list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
t                 327 drivers/staging/wusbcore/host/whci/pzl.c 	struct whc_std *std, *t;
t                 338 drivers/staging/wusbcore/host/whci/pzl.c 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
t                 322 drivers/staging/wusbcore/host/whci/qset.c 	struct whc_std *std, *t;
t                 324 drivers/staging/wusbcore/host/whci/qset.c 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
t                 338 drivers/staging/wusbcore/host/whci/qset.c 	struct whc_std *std, *t;
t                 340 drivers/staging/wusbcore/host/whci/qset.c 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
t                1222 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = cdev->lldi.tids;
t                1236 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cnp = lookup_stid(t, stid);
t                1249 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk = lookup_tid(t, tid);
t                1378 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
t                1566 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = cdev->lldi.tids;
t                1568 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cxgbit_np *cnp = lookup_stid(t, stid);
t                1588 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = cdev->lldi.tids;
t                1590 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cxgbit_np *cnp = lookup_stid(t, stid);
t                1610 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = cdev->lldi.tids;
t                1618 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk = lookup_tid(t, tid);
t                1891 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = lldi->tids;
t                1893 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk = lookup_tid(t, tid);
t                1912 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = lldi->tids;
t                1914 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk = lookup_tid(t, tid);
t                1952 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct tid_info *t = lldi->tids;
t                1978 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk = lookup_tid(t, tid);
t                3645 drivers/target/iscsi/iscsi_target.c 	struct iscsit_transport *t = conn->conn_transport;
t                3657 drivers/target/iscsi/iscsi_target.c 		ret = t->iscsit_immediate_queue(conn, cmd, state);
t                3774 drivers/target/iscsi/iscsi_target.c 	struct iscsit_transport *t = conn->conn_transport;
t                3785 drivers/target/iscsi/iscsi_target.c 		ret = t->iscsit_response_queue(conn, cmd, state);
t                 744 drivers/target/iscsi/iscsi_target_erl0.c void iscsit_handle_time2retain_timeout(struct timer_list *t)
t                 746 drivers/target/iscsi/iscsi_target_erl0.c 	struct iscsi_session *sess = from_timer(sess, t, time2retain_timer);
t                  15 drivers/target/iscsi/iscsi_target_erl0.h extern void iscsit_handle_time2retain_timeout(struct timer_list *t);
t                1096 drivers/target/iscsi/iscsi_target_erl1.c void iscsit_handle_dataout_timeout(struct timer_list *t)
t                1100 drivers/target/iscsi/iscsi_target_erl1.c 	struct iscsi_cmd *cmd = from_timer(cmd, t, dataout_timer);
t                  33 drivers/target/iscsi/iscsi_target_erl1.h extern void iscsit_handle_dataout_timeout(struct timer_list *t);
t                 800 drivers/target/iscsi/iscsi_target_login.c void iscsi_handle_login_thread_timeout(struct timer_list *t)
t                 802 drivers/target/iscsi/iscsi_target_login.c 	struct iscsi_np *np = from_timer(np, t, np_login_timer);
t                 952 drivers/target/iscsi/iscsi_target_login.c 	struct iscsit_transport *t;
t                 955 drivers/target/iscsi/iscsi_target_login.c 	t = iscsit_get_transport(np->np_network_transport);
t                 956 drivers/target/iscsi/iscsi_target_login.c 	if (!t)
t                 959 drivers/target/iscsi/iscsi_target_login.c 	rc = t->iscsit_setup_np(np, sockaddr);
t                 961 drivers/target/iscsi/iscsi_target_login.c 		iscsit_put_transport(t);
t                 965 drivers/target/iscsi/iscsi_target_login.c 	np->np_transport = t;
t                1086 drivers/target/iscsi/iscsi_target_login.c iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
t                1090 drivers/target/iscsi/iscsi_target_login.c 	if (!t->owner) {
t                1091 drivers/target/iscsi/iscsi_target_login.c 		conn->conn_transport = t;
t                1095 drivers/target/iscsi/iscsi_target_login.c 	rc = try_module_get(t->owner);
t                1097 drivers/target/iscsi/iscsi_target_login.c 		pr_err("try_module_get() failed for %s\n", t->name);
t                1101 drivers/target/iscsi/iscsi_target_login.c 	conn->conn_transport = t;
t                  28 drivers/target/iscsi/iscsi_target_login.h extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
t                 551 drivers/target/iscsi/iscsi_target_nego.c static void iscsi_target_login_timeout(struct timer_list *t)
t                 553 drivers/target/iscsi/iscsi_target_nego.c 	struct conn_timeout *timeout = from_timer(timeout, t, timer);
t                  12 drivers/target/iscsi/iscsi_target_transport.c 	struct iscsit_transport *t;
t                  15 drivers/target/iscsi/iscsi_target_transport.c 	list_for_each_entry(t, &g_transport_list, t_node) {
t                  16 drivers/target/iscsi/iscsi_target_transport.c 		if (t->transport_type == type) {
t                  17 drivers/target/iscsi/iscsi_target_transport.c 			if (t->owner && !try_module_get(t->owner)) {
t                  18 drivers/target/iscsi/iscsi_target_transport.c 				t = NULL;
t                  21 drivers/target/iscsi/iscsi_target_transport.c 			return t;
t                  29 drivers/target/iscsi/iscsi_target_transport.c void iscsit_put_transport(struct iscsit_transport *t)
t                  31 drivers/target/iscsi/iscsi_target_transport.c 	module_put(t->owner);
t                  34 drivers/target/iscsi/iscsi_target_transport.c int iscsit_register_transport(struct iscsit_transport *t)
t                  36 drivers/target/iscsi/iscsi_target_transport.c 	INIT_LIST_HEAD(&t->t_node);
t                  39 drivers/target/iscsi/iscsi_target_transport.c 	list_add_tail(&t->t_node, &g_transport_list);
t                  42 drivers/target/iscsi/iscsi_target_transport.c 	pr_debug("Registered iSCSI transport: %s\n", t->name);
t                  48 drivers/target/iscsi/iscsi_target_transport.c void iscsit_unregister_transport(struct iscsit_transport *t)
t                  51 drivers/target/iscsi/iscsi_target_transport.c 	list_del(&t->t_node);
t                  54 drivers/target/iscsi/iscsi_target_transport.c 	pr_debug("Unregistered iSCSI transport: %s\n", t->name);
t                 912 drivers/target/iscsi/iscsi_target_util.c void iscsit_handle_nopin_response_timeout(struct timer_list *t)
t                 914 drivers/target/iscsi/iscsi_target_util.c 	struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
t                 992 drivers/target/iscsi/iscsi_target_util.c void iscsit_handle_nopin_timeout(struct timer_list *t)
t                 994 drivers/target/iscsi/iscsi_target_util.c 	struct iscsi_conn *conn = from_timer(conn, t, nopin_timer);
t                  51 drivers/target/iscsi/iscsi_target_util.h extern void iscsit_handle_nopin_response_timeout(struct timer_list *t);
t                  55 drivers/target/iscsi/iscsi_target_util.h extern void iscsit_handle_nopin_timeout(struct timer_list *t);
t                 479 drivers/target/target_core_configfs.c 	struct target_fabric_configfs *t;
t                 482 drivers/target/target_core_configfs.c 	list_for_each_entry(t, &g_tf_list, tf_list) {
t                 483 drivers/target/target_core_configfs.c 		if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
t                 484 drivers/target/target_core_configfs.c 			BUG_ON(atomic_read(&t->tf_access_cnt));
t                 485 drivers/target/target_core_configfs.c 			list_del(&t->tf_list);
t                 493 drivers/target/target_core_configfs.c 			kfree(t);
t                2784 drivers/target/target_core_configfs.c 	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
t                2786 drivers/target/target_core_configfs.c 		!!(t->tg_pt_gp_alua_supported_states & _bit));		\
t                2792 drivers/target/target_core_configfs.c 	struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);		\
t                2796 drivers/target/target_core_configfs.c 	if (!t->tg_pt_gp_valid_id) {					\
t                2799 drivers/target/target_core_configfs.c 		       t->tg_pt_gp_valid_id);				\
t                2813 drivers/target/target_core_configfs.c 		t->tg_pt_gp_alua_supported_states |= _bit;		\
t                2815 drivers/target/target_core_configfs.c 		t->tg_pt_gp_alua_supported_states &= ~_bit;		\
t                2840 drivers/target/target_core_transport.c static const char *cmd_state_name(enum transport_state_table t)
t                2842 drivers/target/target_core_transport.c 	switch (t) {
t                1338 drivers/target/target_core_user.c static void tcmu_cmd_timedout(struct timer_list *t)
t                1340 drivers/target/target_core_user.c 	struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
t                1346 drivers/target/target_core_user.c static void tcmu_qfull_timedout(struct timer_list *t)
t                1348 drivers/target/target_core_user.c 	struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
t                 146 drivers/thermal/broadcom/brcmstb_thermal.c 	long t;
t                 157 drivers/thermal/broadcom/brcmstb_thermal.c 	t = avs_tmon_code_to_temp(priv->thermal, val);
t                 158 drivers/thermal/broadcom/brcmstb_thermal.c 	if (t < 0)
t                 161 drivers/thermal/broadcom/brcmstb_thermal.c 		*temp = t;
t                  42 drivers/thermal/da9062-thermal.c #define DA9062_MILLI_CELSIUS(t)			((t) * 1000)
t                 781 drivers/thermal/of-thermal.c 	const char *t;
t                 784 drivers/thermal/of-thermal.c 	err = of_property_read_string(np, "type", &t);
t                 789 drivers/thermal/of-thermal.c 		if (!strcasecmp(t, trip_types[i])) {
t                 413 drivers/thermal/tegra/soctherm.c 	int t;
t                 415 drivers/thermal/tegra/soctherm.c 	t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
t                 417 drivers/thermal/tegra/soctherm.c 		t += 500;
t                 419 drivers/thermal/tegra/soctherm.c 		t *= -1;
t                 421 drivers/thermal/tegra/soctherm.c 	return t;
t                  25 drivers/thermal/thermal_mmio.c 	int t;
t                  29 drivers/thermal/thermal_mmio.c 	t = sensor->read_mmio(sensor->mmio_base) & sensor->mask;
t                  30 drivers/thermal/thermal_mmio.c 	t *= sensor->factor;
t                  32 drivers/thermal/thermal_mmio.c 	*temp = t;
t                  70 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	struct temp_sensor_registers *t;			\
t                  73 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	t = bgp->conf->sensors[(id)].registers;		\
t                  74 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	r = ti_bandgap_readl(bgp, t->reg);			\
t                  75 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	r &= ~t->mask;						\
t                  76 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	r |= (val) << __ffs(t->mask);				\
t                  77 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	ti_bandgap_writel(bgp, r, t->reg);			\
t                 282 drivers/thermal/ti-soc-thermal/ti-bandgap.c int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
t                 290 drivers/thermal/ti-soc-thermal/ti-bandgap.c 	*t = bgp->conf->conv_table[adc_val - conf->adc_start_val];
t                  55 drivers/thermal/ti-soc-thermal/ti-thermal-common.c static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
t                  57 drivers/thermal/ti-soc-thermal/ti-thermal-common.c 	int delta = t * s / 1000 + c;
t                  62 drivers/thermal/ti-soc-thermal/ti-thermal-common.c 	return t + delta;
t                1198 drivers/tty/cyclades.c static void cyz_rx_restart(struct timer_list *t)
t                1200 drivers/tty/cyclades.c 	struct cyclades_port *info = from_timer(info, t, rx_full_timer);
t                  36 drivers/tty/ipwireless/hardware.c static void ipwireless_setup_timer(struct timer_list *t);
t                1677 drivers/tty/ipwireless/hardware.c static void ipwireless_setup_timer(struct timer_list *t)
t                1679 drivers/tty/ipwireless/hardware.c 	struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
t                 683 drivers/tty/mips_ejtag_fdc.c static void mips_ejtag_fdc_tty_timer(struct timer_list *t)
t                 685 drivers/tty/mips_ejtag_fdc.c 	struct mips_ejtag_fdc_tty *priv = from_timer(priv, t, poll_timer);
t                1316 drivers/tty/n_gsm.c static void gsm_control_retransmit(struct timer_list *t)
t                1318 drivers/tty/n_gsm.c 	struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
t                1469 drivers/tty/n_gsm.c static void gsm_dlci_t1(struct timer_list *t)
t                1471 drivers/tty/n_gsm.c 	struct gsm_dlci *dlci = from_timer(dlci, t, t1);
t                 118 drivers/tty/n_r3964.c static void on_timeout(struct timer_list *t);
t                 698 drivers/tty/n_r3964.c static void on_timeout(struct timer_list *t)
t                 700 drivers/tty/n_r3964.c 	struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
t                 703 drivers/tty/rocket.c 	struct ktermios *t = &tty->termios;
t                 706 drivers/tty/rocket.c 	cflag = t->c_cflag;
t                 257 drivers/tty/serial/8250/8250_core.c static void serial8250_timeout(struct timer_list *t)
t                 259 drivers/tty/serial/8250/8250_core.c 	struct uart_8250_port *up = from_timer(up, t, timer);
t                 265 drivers/tty/serial/8250/8250_core.c static void serial8250_backup_timeout(struct timer_list *t)
t                 267 drivers/tty/serial/8250/8250_core.c 	struct uart_8250_port *up = from_timer(up, t, timer);
t                 575 drivers/tty/serial/8250/8250_port.c static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t);
t                 576 drivers/tty/serial/8250/8250_port.c static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t);
t                1414 drivers/tty/serial/8250/8250_port.c static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
t                1420 drivers/tty/serial/8250/8250_port.c 	em485 = container_of(t, struct uart_8250_em485, stop_tx_timer);
t                1438 drivers/tty/serial/8250/8250_port.c 	ktime_t t = ktime_set(sec, nsec);
t                1440 drivers/tty/serial/8250/8250_port.c 	hrtimer_start(hrt, t, HRTIMER_MODE_REL);
t                1563 drivers/tty/serial/8250/8250_port.c static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
t                1569 drivers/tty/serial/8250/8250_port.c 	em485 = container_of(t, struct uart_8250_em485, start_tx_timer);
t                 301 drivers/tty/serial/altera_uart.c static void altera_uart_timer(struct timer_list *t)
t                 303 drivers/tty/serial/altera_uart.c 	struct altera_uart *pp = from_timer(pp, t, tmr);
t                1063 drivers/tty/serial/amba-pl011.c static void pl011_dma_rx_poll(struct timer_list *t)
t                1065 drivers/tty/serial/amba-pl011.c 	struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
t                  69 drivers/tty/serial/ar933x_uart.c 	unsigned int t;
t                  71 drivers/tty/serial/ar933x_uart.c 	t = ar933x_uart_read(up, offset);
t                  72 drivers/tty/serial/ar933x_uart.c 	t &= ~mask;
t                  73 drivers/tty/serial/ar933x_uart.c 	t |= val;
t                  74 drivers/tty/serial/ar933x_uart.c 	ar933x_uart_write(up, offset, t);
t                 183 drivers/tty/serial/ar933x_uart.c 	u64 t;
t                 187 drivers/tty/serial/ar933x_uart.c 	t = clk;
t                 188 drivers/tty/serial/ar933x_uart.c 	t *= step;
t                 189 drivers/tty/serial/ar933x_uart.c 	t += (div / 2);
t                 190 drivers/tty/serial/ar933x_uart.c 	do_div(t, div);
t                 192 drivers/tty/serial/ar933x_uart.c 	return t;
t                 291 drivers/tty/serial/atmel_serial.c 				   struct tasklet_struct *t)
t                 294 drivers/tty/serial/atmel_serial.c 		tasklet_schedule(t);
t                1276 drivers/tty/serial/atmel_serial.c static void atmel_uart_timer_callback(struct timer_list *t)
t                1278 drivers/tty/serial/atmel_serial.c 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
t                1133 drivers/tty/serial/fsl_lpuart.c static void lpuart_timer_func(struct timer_list *t)
t                1135 drivers/tty/serial/fsl_lpuart.c 	struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
t                 266 drivers/tty/serial/ifx6x60.c static void ifx_spi_timeout(struct timer_list *t)
t                 268 drivers/tty/serial/ifx6x60.c 	struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
t                1048 drivers/tty/serial/imx.c static void imx_uart_timeout(struct timer_list *t)
t                1050 drivers/tty/serial/imx.c 	struct imx_port *sport = from_timer(sport, t, timer);
t                 191 drivers/tty/serial/kgdb_nmi.c static void kgdb_nmi_tty_receiver(struct timer_list *t)
t                 193 drivers/tty/serial/kgdb_nmi.c 	struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
t                 181 drivers/tty/serial/max3100.c static void max3100_timeout(struct timer_list *t)
t                 183 drivers/tty/serial/max3100.c 	struct max3100_port *s = from_timer(s, t, timer);
t                1106 drivers/tty/serial/pmac_zilog.c 	int t, version;
t                1147 drivers/tty/serial/pmac_zilog.c 	t = 10000;
t                1150 drivers/tty/serial/pmac_zilog.c 		if (--t <= 0) {
t                1158 drivers/tty/serial/pmac_zilog.c 	t = 100;
t                1166 drivers/tty/serial/pmac_zilog.c 		if (--t <= 0) {
t                1185 drivers/tty/serial/pmac_zilog.c 	t = 5000;
t                1187 drivers/tty/serial/pmac_zilog.c 		if (--t <= 0) {
t                1202 drivers/tty/serial/pmac_zilog.c 	t = 5000;
t                1204 drivers/tty/serial/pmac_zilog.c 		if (--t <= 0) {
t                1210 drivers/tty/serial/pmac_zilog.c 	t = read_zsdata(uap);
t                1211 drivers/tty/serial/pmac_zilog.c 	if (t != cmdbyte)
t                1212 drivers/tty/serial/pmac_zilog.c 		pmz_error("irda_setup speed mode byte = %x (%x)\n", t, cmdbyte);
t                 106 drivers/tty/serial/pnx8xxx_uart.c static void pnx8xxx_timeout(struct timer_list *t)
t                 108 drivers/tty/serial/pnx8xxx_uart.c 	struct pnx8xxx_port *sport = from_timer(sport, t, timer);
t                 116 drivers/tty/serial/sa1100.c static void sa1100_timeout(struct timer_list *t)
t                 118 drivers/tty/serial/sa1100.c 	struct sa1100_port *sport = from_timer(sport, t, timer);
t                 387 drivers/tty/serial/samsung.c 	struct tty_port *t = &port->state->port;
t                 409 drivers/tty/serial/samsung.c 			s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
t                 450 drivers/tty/serial/samsung.c 	struct tty_port *t = &port->state->port;
t                 464 drivers/tty/serial/samsung.c 		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
t                 467 drivers/tty/serial/samsung.c 		tty_flip_buffer_push(t);
t                 554 drivers/tty/serial/samsung.c 	struct tty_port *t = &port->state->port;
t                 575 drivers/tty/serial/samsung.c 		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
t                 583 drivers/tty/serial/samsung.c 		tty_flip_buffer_push(t);
t                 503 drivers/tty/serial/sccnxp.c static void sccnxp_timer(struct timer_list *t)
t                 505 drivers/tty/serial/sccnxp.c 	struct sccnxp_port *s = from_timer(s, t, timer);
t                1093 drivers/tty/serial/sh-sci.c static void rx_fifo_timer_fn(struct timer_list *t)
t                1095 drivers/tty/serial/sh-sci.c 	struct sci_port *s = from_timer(s, t, rx_fifo_timer);
t                1267 drivers/tty/serial/sh-sci.c 	ktime_t t = ktime_set(sec, nsec);
t                1269 drivers/tty/serial/sh-sci.c 	hrtimer_start(hrt, t, HRTIMER_MODE_REL);
t                1464 drivers/tty/serial/sh-sci.c static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
t                1466 drivers/tty/serial/sh-sci.c 	struct sci_port *s = container_of(t, struct sci_port, rx_timer);
t                 703 drivers/tty/synclink.c static void mgsl_tx_timeout(struct timer_list *t);
t                7452 drivers/tty/synclink.c static void mgsl_tx_timeout(struct timer_list *t)
t                7454 drivers/tty/synclink.c 	struct mgsl_struct *info = from_timer(info, t, tx_timer);
t                 496 drivers/tty/synclink_gt.c static void tx_timeout(struct timer_list *t);
t                 497 drivers/tty/synclink_gt.c static void rx_timeout(struct timer_list *t);
t                5093 drivers/tty/synclink_gt.c static void tx_timeout(struct timer_list *t)
t                5095 drivers/tty/synclink_gt.c 	struct slgt_info *info = from_timer(info, t, tx_timer);
t                5117 drivers/tty/synclink_gt.c static void rx_timeout(struct timer_list *t)
t                5119 drivers/tty/synclink_gt.c 	struct slgt_info *info = from_timer(info, t, rx_timer);
t                 618 drivers/tty/synclinkmp.c static void tx_timeout(struct timer_list *t);
t                 619 drivers/tty/synclinkmp.c static void status_timeout(struct timer_list *t);
t                5456 drivers/tty/synclinkmp.c static void tx_timeout(struct timer_list *t)
t                5458 drivers/tty/synclinkmp.c 	SLMP_INFO *info = from_timer(info, t, tx_timer);
t                5483 drivers/tty/synclinkmp.c static void status_timeout(struct timer_list *t)
t                5486 drivers/tty/synclinkmp.c 	SLMP_INFO *info = from_timer(info, t, status_timer);
t                 656 drivers/tty/sysrq.c static void sysrq_do_reset(struct timer_list *t)
t                 658 drivers/tty/sysrq.c 	struct sysrq_state *state = from_timer(state, t, keyreset_timer);
t                2863 drivers/tty/tty_io.c static int this_tty(const void *t, struct file *file, unsigned fd)
t                2867 drivers/tty/tty_io.c 	return file_tty(file) != t ? 0 : fd + 1;
t                 365 drivers/tty/vcc.c static void vcc_rx_timer(struct timer_list *t)
t                 367 drivers/tty/vcc.c 	struct vcc_port *port = from_timer(port, t, rx_timer);
t                 391 drivers/tty/vcc.c static void vcc_tx_timer(struct timer_list *t)
t                 393 drivers/tty/vcc.c 	struct vcc_port *port = from_timer(port, t, tx_timer);
t                 202 drivers/tty/vt/consolemap.c 	unsigned short *t = translations[i];
t                 215 drivers/tty/vt/consolemap.c 		glyph = conv_uni_to_pc(conp, t[j]);
t                 438 drivers/tty/vt/vt.c static void vc_uniscr_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
t                 446 drivers/tty/vt/vt.c 		sz = b - t;
t                 450 drivers/tty/vt/vt.c 			clear = t;
t                 454 drivers/tty/vt/vt.c 			char32_t *tmp = uniscr->lines[t + i];
t                 462 drivers/tty/vt/vt.c 				uniscr->lines[t + j] = uniscr->lines[t + k];
t                 465 drivers/tty/vt/vt.c 			uniscr->lines[t + j] = tmp;
t                 627 drivers/tty/vt/vt.c static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
t                 632 drivers/tty/vt/vt.c 	if (t + nr >= b)
t                 633 drivers/tty/vt/vt.c 		nr = b - t - 1;
t                 634 drivers/tty/vt/vt.c 	if (b > vc->vc_rows || t >= b || nr < 1)
t                 636 drivers/tty/vt/vt.c 	vc_uniscr_scroll(vc, t, b, dir, nr);
t                 637 drivers/tty/vt/vt.c 	if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
t                 640 drivers/tty/vt/vt.c 	s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t);
t                 641 drivers/tty/vt/vt.c 	d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr));
t                 644 drivers/tty/vt/vt.c 		clear = s + (b - t - nr) * vc->vc_cols;
t                 647 drivers/tty/vt/vt.c 	scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
t                 584 drivers/usb/atm/cxacru.c static void cxacru_timeout_kill(struct timer_list *t)
t                 586 drivers/usb/atm/cxacru.c 	struct cxacru_timer *timer = from_timer(timer, t, timer);
t                 560 drivers/usb/atm/speedtch.c static void speedtch_status_poll(struct timer_list *t)
t                 562 drivers/usb/atm/speedtch.c 	struct speedtch_instance_data *instance = from_timer(instance, t,
t                 574 drivers/usb/atm/speedtch.c static void speedtch_resubmit_int(struct timer_list *t)
t                 576 drivers/usb/atm/speedtch.c 	struct speedtch_instance_data *instance = from_timer(instance, t,
t                 414 drivers/usb/atm/ueagle-atm.c #define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
t                 415 drivers/usb/atm/ueagle-atm.c #define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \
t                 975 drivers/usb/atm/usbatm.c 	struct task_struct *t;
t                 977 drivers/usb/atm/usbatm.c 	t = kthread_create(usbatm_do_heavy_init, instance, "%s",
t                 979 drivers/usb/atm/usbatm.c 	if (IS_ERR(t)) {
t                 981 drivers/usb/atm/usbatm.c 				__func__, PTR_ERR(t));
t                 982 drivers/usb/atm/usbatm.c 		return PTR_ERR(t);
t                 985 drivers/usb/atm/usbatm.c 	instance->thread = t;
t                 986 drivers/usb/atm/usbatm.c 	wake_up_process(t);
t                 992 drivers/usb/atm/usbatm.c static void usbatm_tasklet_schedule(struct timer_list *t)
t                 994 drivers/usb/atm/usbatm.c 	struct usbatm_channel *channel = from_timer(channel, t, delay);
t                  35 drivers/usb/chipidea/otg_fsm.c 	unsigned	size, t;
t                  40 drivers/usb/chipidea/otg_fsm.c 	t = scnprintf(next, size, "%d\n", ci->fsm.a_bus_req);
t                  41 drivers/usb/chipidea/otg_fsm.c 	size -= t;
t                  42 drivers/usb/chipidea/otg_fsm.c 	next += t;
t                  84 drivers/usb/chipidea/otg_fsm.c 	unsigned	size, t;
t                  89 drivers/usb/chipidea/otg_fsm.c 	t = scnprintf(next, size, "%d\n", ci->fsm.a_bus_drop);
t                  90 drivers/usb/chipidea/otg_fsm.c 	size -= t;
t                  91 drivers/usb/chipidea/otg_fsm.c 	next += t;
t                 124 drivers/usb/chipidea/otg_fsm.c 	unsigned	size, t;
t                 129 drivers/usb/chipidea/otg_fsm.c 	t = scnprintf(next, size, "%d\n", ci->fsm.b_bus_req);
t                 130 drivers/usb/chipidea/otg_fsm.c 	size -= t;
t                 131 drivers/usb/chipidea/otg_fsm.c 	next += t;
t                 219 drivers/usb/chipidea/otg_fsm.c static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
t                 223 drivers/usb/chipidea/otg_fsm.c 	if (t >= NUM_OTG_FSM_TIMERS)
t                 227 drivers/usb/chipidea/otg_fsm.c 	timer_sec = otg_timer_ms[t] / MSEC_PER_SEC;
t                 228 drivers/usb/chipidea/otg_fsm.c 	timer_nsec = (otg_timer_ms[t] % MSEC_PER_SEC) * NSEC_PER_MSEC;
t                 229 drivers/usb/chipidea/otg_fsm.c 	ci->hr_timeouts[t] = ktime_add(ktime_get(),
t                 231 drivers/usb/chipidea/otg_fsm.c 	ci->enabled_otg_timer_bits |= (1 << t);
t                 234 drivers/usb/chipidea/otg_fsm.c 						ci->hr_timeouts[t])) {
t                 235 drivers/usb/chipidea/otg_fsm.c 			ci->next_otg_timer = t;
t                 237 drivers/usb/chipidea/otg_fsm.c 					ci->hr_timeouts[t], NSEC_PER_MSEC,
t                 246 drivers/usb/chipidea/otg_fsm.c static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
t                 251 drivers/usb/chipidea/otg_fsm.c 	if ((t >= NUM_OTG_FSM_TIMERS) ||
t                 252 drivers/usb/chipidea/otg_fsm.c 			!(ci->enabled_otg_timer_bits & (1 << t)))
t                 256 drivers/usb/chipidea/otg_fsm.c 	ci->enabled_otg_timer_bits &= ~(1 << t);
t                 257 drivers/usb/chipidea/otg_fsm.c 	if (ci->next_otg_timer == t) {
t                 381 drivers/usb/chipidea/otg_fsm.c static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
t                 383 drivers/usb/chipidea/otg_fsm.c 	struct ci_hdrc *ci = container_of(t, struct ci_hdrc, otg_fsm_hrtimer);
t                 434 drivers/usb/chipidea/otg_fsm.c static void ci_otg_fsm_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
t                 438 drivers/usb/chipidea/otg_fsm.c 	if (t < NUM_OTG_FSM_TIMERS)
t                 439 drivers/usb/chipidea/otg_fsm.c 		ci_otg_add_timer(ci, t);
t                 443 drivers/usb/chipidea/otg_fsm.c static void ci_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
t                 447 drivers/usb/chipidea/otg_fsm.c 	if (t < NUM_OTG_FSM_TIMERS)
t                 448 drivers/usb/chipidea/otg_fsm.c 		ci_otg_del_timer(ci, t);
t                 474 drivers/usb/core/devio.c 	const char *t, *d;
t                 480 drivers/usb/core/devio.c 	t = types[usb_pipetype(pipe)];
t                 487 drivers/usb/core/devio.c 					userurb, ep, t, d, length);
t                 491 drivers/usb/core/devio.c 					userurb, ep, t, d, length,
t                 497 drivers/usb/core/devio.c 					ep, t, d, length, timeout_or_status);
t                 501 drivers/usb/core/devio.c 					ep, t, d, length, timeout_or_status);
t                 404 drivers/usb/core/hcd.c 	unsigned n, t = 2 + 2*strlen(s);
t                 406 drivers/usb/core/hcd.c 	if (t > 254)
t                 407 drivers/usb/core/hcd.c 		t = 254;	/* Longest possible UTF string descriptor */
t                 408 drivers/usb/core/hcd.c 	if (len > t)
t                 409 drivers/usb/core/hcd.c 		len = t;
t                 411 drivers/usb/core/hcd.c 	t += USB_DT_STRING << 8;	/* Now t is first 16 bits to store */
t                 415 drivers/usb/core/hcd.c 		*buf++ = t;
t                 418 drivers/usb/core/hcd.c 		*buf++ = t >> 8;
t                 419 drivers/usb/core/hcd.c 		t = (unsigned char)*s++;
t                 796 drivers/usb/core/hcd.c static void rh_timer_func (struct timer_list *t)
t                 798 drivers/usb/core/hcd.c 	struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
t                 637 drivers/usb/core/hub.c static void hub_retry_irq_urb(struct timer_list *t)
t                 639 drivers/usb/core/hub.c 	struct usb_hub *hub = from_timer(hub, t, irq_urb_retry);
t                3247 drivers/usb/dwc2/hcd.c static void dwc2_wakeup_detected(struct timer_list *t)
t                3249 drivers/usb/dwc2/hcd.c 	struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
t                1282 drivers/usb/dwc2/hcd_queue.c static void dwc2_unreserve_timer_fn(struct timer_list *t)
t                1284 drivers/usb/dwc2/hcd_queue.c 	struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
t                1470 drivers/usb/dwc2/hcd_queue.c static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
t                1472 drivers/usb/dwc2/hcd_queue.c 	struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
t                 108 drivers/usb/gadget/composite.c next_ep_desc(struct usb_descriptor_header **t)
t                 110 drivers/usb/gadget/composite.c 	for (; *t; t++) {
t                 111 drivers/usb/gadget/composite.c 		if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
t                 112 drivers/usb/gadget/composite.c 			return t;
t                2589 drivers/usb/gadget/function/f_fs.c 	struct usb_gadget_strings **stringtabs, *t;
t                2639 drivers/usb/gadget/function/f_fs.c 		t = vla_ptr(vlabuf, d, stringtab);
t                2642 drivers/usb/gadget/function/f_fs.c 			*stringtabs++ = t++;
t                2648 drivers/usb/gadget/function/f_fs.c 		t = vla_ptr(vlabuf, d, stringtab);
t                2661 drivers/usb/gadget/function/f_fs.c 		t->language = get_unaligned_le16(data);
t                2662 drivers/usb/gadget/function/f_fs.c 		t->strings  = s;
t                2663 drivers/usb/gadget/function/f_fs.c 		++t;
t                2974 drivers/usb/gadget/function/f_fs.c 		struct usb_os_desc_table *t;
t                2976 drivers/usb/gadget/function/f_fs.c 		t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
t                2977 drivers/usb/gadget/function/f_fs.c 		t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
t                2978 drivers/usb/gadget/function/f_fs.c 		memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
t                2986 drivers/usb/gadget/function/f_fs.c 		struct usb_os_desc_table *t;
t                2991 drivers/usb/gadget/function/f_fs.c 		t = &func->function.os_desc_table[h->interface];
t                2992 drivers/usb/gadget/function/f_fs.c 		t->if_id = func->interfaces_nums[h->interface];
t                3030 drivers/usb/gadget/function/f_fs.c 		t->os_desc->ext_prop_len +=
t                3032 drivers/usb/gadget/function/f_fs.c 		++t->os_desc->ext_prop_count;
t                3033 drivers/usb/gadget/function/f_fs.c 		list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
t                  31 drivers/usb/gadget/function/u_uac1_legacy.c 	struct snd_interval t;
t                  32 drivers/usb/gadget/function/u_uac1_legacy.c 	t.empty = 0;
t                  33 drivers/usb/gadget/function/u_uac1_legacy.c 	t.min = t.max = val;
t                  34 drivers/usb/gadget/function/u_uac1_legacy.c 	t.openmin = t.openmax = 0;
t                  35 drivers/usb/gadget/function/u_uac1_legacy.c 	t.integer = 1;
t                  36 drivers/usb/gadget/function/u_uac1_legacy.c 	return snd_interval_refine(i, &t);
t                  65 drivers/usb/gadget/function/u_uac1_legacy.c 			struct snd_interval t;
t                  66 drivers/usb/gadget/function/u_uac1_legacy.c 			t.openmin = 1;
t                  67 drivers/usb/gadget/function/u_uac1_legacy.c 			t.openmax = 1;
t                  68 drivers/usb/gadget/function/u_uac1_legacy.c 			t.empty = 0;
t                  69 drivers/usb/gadget/function/u_uac1_legacy.c 			t.integer = 0;
t                  71 drivers/usb/gadget/function/u_uac1_legacy.c 				t.min = val - 1;
t                  72 drivers/usb/gadget/function/u_uac1_legacy.c 				t.max = val;
t                  74 drivers/usb/gadget/function/u_uac1_legacy.c 				t.min = val;
t                  75 drivers/usb/gadget/function/u_uac1_legacy.c 				t.max = val+1;
t                  77 drivers/usb/gadget/function/u_uac1_legacy.c 			changed = snd_interval_refine(i, &t);
t                1540 drivers/usb/gadget/udc/at91_udc.c static void at91_vbus_timer(struct timer_list *t)
t                1542 drivers/usb/gadget/udc/at91_udc.c 	struct at91_udc *udc = from_timer(udc, t, vbus_timer);
t                1759 drivers/usb/gadget/udc/dummy_hcd.c static void dummy_timer(struct timer_list *t)
t                1761 drivers/usb/gadget/udc/dummy_hcd.c 	struct dummy_hcd	*dum_hcd = from_timer(dum_hcd, t, timer);
t                1262 drivers/usb/gadget/udc/m66592-udc.c static void m66592_timer(struct timer_list *t)
t                1264 drivers/usb/gadget/udc/m66592-udc.c 	struct m66592 *m66592 = from_timer(m66592, t, timer);
t                1170 drivers/usb/gadget/udc/net2272.c 	unsigned size, t;
t                1182 drivers/usb/gadget/udc/net2272.c 	t = scnprintf(next, size, "%s version %s,"
t                1192 drivers/usb/gadget/udc/net2272.c 	size -= t;
t                1193 drivers/usb/gadget/udc/net2272.c 	next += t;
t                1197 drivers/usb/gadget/udc/net2272.c 	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
t                1203 drivers/usb/gadget/udc/net2272.c 	size -= t;
t                1204 drivers/usb/gadget/udc/net2272.c 	next += t;
t                1217 drivers/usb/gadget/udc/net2272.c 	t = scnprintf(next, size,
t                1221 drivers/usb/gadget/udc/net2272.c 	size -= t;
t                1222 drivers/usb/gadget/udc/net2272.c 	next += t;
t                1234 drivers/usb/gadget/udc/net2272.c 		t = scnprintf(next, size,
t                1247 drivers/usb/gadget/udc/net2272.c 		size -= t;
t                1248 drivers/usb/gadget/udc/net2272.c 		next += t;
t                1250 drivers/usb/gadget/udc/net2272.c 		t = scnprintf(next, size,
t                1260 drivers/usb/gadget/udc/net2272.c 		size -= t;
t                1261 drivers/usb/gadget/udc/net2272.c 		next += t;
t                1263 drivers/usb/gadget/udc/net2272.c 		t = scnprintf(next, size,
t                1268 drivers/usb/gadget/udc/net2272.c 		size -= t;
t                1269 drivers/usb/gadget/udc/net2272.c 		next += t;
t                1273 drivers/usb/gadget/udc/net2272.c 		t = scnprintf(next, size,
t                1277 drivers/usb/gadget/udc/net2272.c 		size -= t;
t                1278 drivers/usb/gadget/udc/net2272.c 		next += t;
t                1942 drivers/usb/gadget/udc/net2272.c 		u8 t;
t                1945 drivers/usb/gadget/udc/net2272.c 		t = 1 << num;
t                1946 drivers/usb/gadget/udc/net2272.c 		if ((scratch & t) == 0)
t                1948 drivers/usb/gadget/udc/net2272.c 		scratch ^= t;
t                1659 drivers/usb/gadget/udc/net2280.c 	unsigned		size, t;
t                1676 drivers/usb/gadget/udc/net2280.c 	t = scnprintf(next, size, "%s version " DRIVER_VERSION
t                1689 drivers/usb/gadget/udc/net2280.c 	size -= t;
t                1690 drivers/usb/gadget/udc/net2280.c 	next += t;
t                1705 drivers/usb/gadget/udc/net2280.c 	t = scnprintf(next, size,
t                1710 drivers/usb/gadget/udc/net2280.c 	size -= t;
t                1711 drivers/usb/gadget/udc/net2280.c 	next += t;
t                1727 drivers/usb/gadget/udc/net2280.c 		t = scnprintf(next, size,
t                1748 drivers/usb/gadget/udc/net2280.c 		size -= t;
t                1749 drivers/usb/gadget/udc/net2280.c 		next += t;
t                1751 drivers/usb/gadget/udc/net2280.c 		t = scnprintf(next, size,
t                1759 drivers/usb/gadget/udc/net2280.c 		size -= t;
t                1760 drivers/usb/gadget/udc/net2280.c 		next += t;
t                1765 drivers/usb/gadget/udc/net2280.c 		t = scnprintf(next, size,
t                1773 drivers/usb/gadget/udc/net2280.c 		size -= t;
t                1774 drivers/usb/gadget/udc/net2280.c 		next += t;
t                1781 drivers/usb/gadget/udc/net2280.c 	t = scnprintf(next, size, "\nirqs:  ");
t                1782 drivers/usb/gadget/udc/net2280.c 	size -= t;
t                1783 drivers/usb/gadget/udc/net2280.c 	next += t;
t                1790 drivers/usb/gadget/udc/net2280.c 		t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
t                1791 drivers/usb/gadget/udc/net2280.c 		size -= t;
t                1792 drivers/usb/gadget/udc/net2280.c 		next += t;
t                1795 drivers/usb/gadget/udc/net2280.c 	t = scnprintf(next, size, "\n");
t                1796 drivers/usb/gadget/udc/net2280.c 	size -= t;
t                1797 drivers/usb/gadget/udc/net2280.c 	next += t;
t                1822 drivers/usb/gadget/udc/net2280.c 		int				t;
t                1830 drivers/usb/gadget/udc/net2280.c 			t = d->bEndpointAddress;
t                1831 drivers/usb/gadget/udc/net2280.c 			t = scnprintf(next, size,
t                1833 drivers/usb/gadget/udc/net2280.c 				ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
t                1834 drivers/usb/gadget/udc/net2280.c 				(t & USB_DIR_IN) ? "in" : "out",
t                1840 drivers/usb/gadget/udc/net2280.c 			t = scnprintf(next, size, "ep0 max 64 pio %s\n",
t                1842 drivers/usb/gadget/udc/net2280.c 		if (t <= 0 || t > size)
t                1844 drivers/usb/gadget/udc/net2280.c 		size -= t;
t                1845 drivers/usb/gadget/udc/net2280.c 		next += t;
t                1848 drivers/usb/gadget/udc/net2280.c 			t = scnprintf(next, size, "\t(nothing queued)\n");
t                1849 drivers/usb/gadget/udc/net2280.c 			if (t <= 0 || t > size)
t                1851 drivers/usb/gadget/udc/net2280.c 			size -= t;
t                1852 drivers/usb/gadget/udc/net2280.c 			next += t;
t                1857 drivers/usb/gadget/udc/net2280.c 				t = scnprintf(next, size,
t                1864 drivers/usb/gadget/udc/net2280.c 				t = scnprintf(next, size,
t                1868 drivers/usb/gadget/udc/net2280.c 			if (t <= 0 || t > size)
t                1870 drivers/usb/gadget/udc/net2280.c 			size -= t;
t                1871 drivers/usb/gadget/udc/net2280.c 			next += t;
t                1877 drivers/usb/gadget/udc/net2280.c 				t = scnprintf(next, size, "\t    td %08x "
t                1883 drivers/usb/gadget/udc/net2280.c 				if (t <= 0 || t > size)
t                1885 drivers/usb/gadget/udc/net2280.c 				size -= t;
t                1886 drivers/usb/gadget/udc/net2280.c 				next += t;
t                2515 drivers/usb/gadget/udc/net2280.c 	u32			t;
t                2526 drivers/usb/gadget/udc/net2280.c 	t = readl(&ep->regs->ep_stat);
t                2530 drivers/usb/gadget/udc/net2280.c 			ep->ep.name, t, req ? &req->req : NULL);
t                2533 drivers/usb/gadget/udc/net2280.c 		writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
t                2536 drivers/usb/gadget/udc/net2280.c 		writel(t, &ep->regs->ep_stat);
t                2551 drivers/usb/gadget/udc/net2280.c 			if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
t                2560 drivers/usb/gadget/udc/net2280.c 			} else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
t                2571 drivers/usb/gadget/udc/net2280.c 			if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
t                2578 drivers/usb/gadget/udc/net2280.c 			} else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
t                2597 drivers/usb/gadget/udc/net2280.c 		if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
t                2609 drivers/usb/gadget/udc/net2280.c 			for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
t                2626 drivers/usb/gadget/udc/net2280.c 				if (likely(t & BIT(FIFO_EMPTY))) {
t                2668 drivers/usb/gadget/udc/net2280.c 				t = readl(&ep->regs->ep_avail);
t                2670 drivers/usb/gadget/udc/net2280.c 					(ep->out_overflow || t)
t                2689 drivers/usb/gadget/udc/net2280.c 					ep->ep.name, t);
t                2693 drivers/usb/gadget/udc/net2280.c 	} else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
t                2698 drivers/usb/gadget/udc/net2280.c 	} else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
t                2747 drivers/usb/gadget/udc/net2280.c 		if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
t                3327 drivers/usb/gadget/udc/net2280.c 			u32		t;
t                3330 drivers/usb/gadget/udc/net2280.c 			t = BIT(num);
t                3331 drivers/usb/gadget/udc/net2280.c 			if ((scratch & t) == 0)
t                3333 drivers/usb/gadget/udc/net2280.c 			scratch ^= t;
t                1857 drivers/usb/gadget/udc/omap_udc.c static void pio_out_timer(struct timer_list *t)
t                1859 drivers/usb/gadget/udc/omap_udc.c 	struct omap_ep	*ep = from_timer(ep, t, timer);
t                1613 drivers/usb/gadget/udc/pxa25x_udc.c static void udc_watchdog(struct timer_list *t)
t                1615 drivers/usb/gadget/udc/pxa25x_udc.c 	struct pxa25x_udc	*dev = from_timer(dev, t, timer);
t                1517 drivers/usb/gadget/udc/r8a66597-udc.c static void r8a66597_timer(struct timer_list *t)
t                1519 drivers/usb/gadget/udc/r8a66597-udc.c 	struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
t                1834 drivers/usb/host/ehci-sched.c 	u32					t;
t                1847 drivers/usb/host/ehci-sched.c 		t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
t                1851 drivers/usb/host/ehci-sched.c 		if (unlikely(t & ISO_ERRS)) {
t                1853 drivers/usb/host/ehci-sched.c 			if (t & EHCI_ISOC_BUF_ERR)
t                1857 drivers/usb/host/ehci-sched.c 			else if (t & EHCI_ISOC_BABBLE)
t                1863 drivers/usb/host/ehci-sched.c 			if (!(t & EHCI_ISOC_BABBLE)) {
t                1864 drivers/usb/host/ehci-sched.c 				desc->actual_length = EHCI_ITD_LENGTH(t);
t                1867 drivers/usb/host/ehci-sched.c 		} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
t                1869 drivers/usb/host/ehci-sched.c 			desc->actual_length = EHCI_ITD_LENGTH(t);
t                2228 drivers/usb/host/ehci-sched.c 	u32					t;
t                2235 drivers/usb/host/ehci-sched.c 	t = hc32_to_cpup(ehci, &sitd->hw_results);
t                2238 drivers/usb/host/ehci-sched.c 	if (unlikely(t & SITD_ERRS)) {
t                2240 drivers/usb/host/ehci-sched.c 		if (t & SITD_STS_DBE)
t                2244 drivers/usb/host/ehci-sched.c 		else if (t & SITD_STS_BABBLE)
t                2248 drivers/usb/host/ehci-sched.c 	} else if (unlikely(t & SITD_STS_ACTIVE)) {
t                2253 drivers/usb/host/ehci-sched.c 		desc->actual_length = desc->length - SITD_LENGTH(t);
t                 398 drivers/usb/host/ehci-timer.c static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
t                 400 drivers/usb/host/ehci-timer.c 	struct ehci_hcd	*ehci = container_of(t, struct ehci_hcd, hrtimer);
t                1339 drivers/usb/host/fotg210-hcd.c static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
t                1342 drivers/usb/host/fotg210-hcd.c 			container_of(t, struct fotg210_hcd, hrtimer);
t                4435 drivers/usb/host/fotg210-hcd.c 	u32 t;
t                4449 drivers/usb/host/fotg210-hcd.c 		t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
t                4453 drivers/usb/host/fotg210-hcd.c 		if (unlikely(t & ISO_ERRS)) {
t                4455 drivers/usb/host/fotg210-hcd.c 			if (t & FOTG210_ISOC_BUF_ERR)
t                4459 drivers/usb/host/fotg210-hcd.c 			else if (t & FOTG210_ISOC_BABBLE)
t                4465 drivers/usb/host/fotg210-hcd.c 			if (!(t & FOTG210_ISOC_BABBLE)) {
t                4467 drivers/usb/host/fotg210-hcd.c 					fotg210_itdlen(urb, desc, t);
t                4470 drivers/usb/host/fotg210-hcd.c 		} else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
t                4472 drivers/usb/host/fotg210-hcd.c 			desc->actual_length = fotg210_itdlen(urb, desc, t);
t                 689 drivers/usb/host/fotg210.h #define fotg210_itdlen(urb, desc, t) ({			\
t                 691 drivers/usb/host/fotg210.h 	(desc)->length - FOTG210_ITD_LENGTH(t) :			\
t                 692 drivers/usb/host/fotg210.h 	FOTG210_ITD_LENGTH(t);					\
t                 973 drivers/usb/host/isp116x-hcd.c 	unsigned long flags, t;
t                 977 drivers/usb/host/isp116x-hcd.c 	t = jiffies + msecs_to_jiffies(100);
t                 979 drivers/usb/host/isp116x-hcd.c 	while (time_before(jiffies, t)) {
t                1251 drivers/usb/host/isp116x-hcd.c 	unsigned long t;
t                1259 drivers/usb/host/isp116x-hcd.c 	t = jiffies + msecs_to_jiffies(timeout);
t                1260 drivers/usb/host/isp116x-hcd.c 	while (time_before_eq(jiffies, t)) {
t                2318 drivers/usb/host/isp1362-hcd.c 	unsigned long t;
t                2335 drivers/usb/host/isp1362-hcd.c 	t = jiffies + msecs_to_jiffies(timeout);
t                2336 drivers/usb/host/isp1362-hcd.c 	while (!clkrdy && time_before_eq(jiffies, t)) {
t                  85 drivers/usb/host/ohci-hcd.c static void io_watchdog_func(struct timer_list *t);
t                 745 drivers/usb/host/ohci-hcd.c static void io_watchdog_func(struct timer_list *t)
t                 747 drivers/usb/host/ohci-hcd.c 	struct ohci_hcd	*ohci = from_timer(ohci, t, io_watchdog);
t                 699 drivers/usb/host/oxu210hp-hcd.c 		unsigned long t;
t                 703 drivers/usb/host/oxu210hp-hcd.c 			t = EHCI_IAA_JIFFIES;
t                 706 drivers/usb/host/oxu210hp-hcd.c 			t = EHCI_IO_JIFFIES;
t                 709 drivers/usb/host/oxu210hp-hcd.c 			t = EHCI_ASYNC_JIFFIES;
t                 713 drivers/usb/host/oxu210hp-hcd.c 			t = EHCI_SHRINK_JIFFIES;
t                 716 drivers/usb/host/oxu210hp-hcd.c 		t += jiffies;
t                 723 drivers/usb/host/oxu210hp-hcd.c 				&& t > oxu->watchdog.expires
t                 726 drivers/usb/host/oxu210hp-hcd.c 		mod_timer(&oxu->watchdog, t);
t                2977 drivers/usb/host/oxu210hp-hcd.c static void oxu_watchdog(struct timer_list *t)
t                2979 drivers/usb/host/oxu210hp-hcd.c 	struct oxu_hcd	*oxu = from_timer(oxu, t, watchdog);
t                1723 drivers/usb/host/r8a66597-hcd.c static void r8a66597_interval_timer(struct timer_list *t)
t                1725 drivers/usb/host/r8a66597-hcd.c 	struct r8a66597_timers *timers = from_timer(timers, t, interval);
t                1747 drivers/usb/host/r8a66597-hcd.c static void r8a66597_td_timer(struct timer_list *t)
t                1749 drivers/usb/host/r8a66597-hcd.c 	struct r8a66597_timers *timers = from_timer(timers, t, td);
t                1801 drivers/usb/host/r8a66597-hcd.c static void r8a66597_timer(struct timer_list *t)
t                1803 drivers/usb/host/r8a66597-hcd.c 	struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
t                1122 drivers/usb/host/sl811-hcd.c sl811h_timer(struct timer_list *t)
t                1124 drivers/usb/host/sl811-hcd.c 	struct sl811 	*sl811 = from_timer(sl811, t, timer);
t                1407 drivers/usb/host/sl811-hcd.c 		u8	t = sl811_read(sl811, SL11H_CTLREG1);
t                1409 drivers/usb/host/sl811-hcd.c 		seq_printf(s, "ctrl1 %02x%s%s%s%s\n", t,
t                1410 drivers/usb/host/sl811-hcd.c 			(t & SL11H_CTL1MASK_SOF_ENA) ? " sofgen" : "",
t                1411 drivers/usb/host/sl811-hcd.c 			({char *s; switch (t & SL11H_CTL1MASK_FORCE) {
t                1417 drivers/usb/host/sl811-hcd.c 			(t & SL11H_CTL1MASK_LSPD) ? " lowspeed" : "",
t                1418 drivers/usb/host/sl811-hcd.c 			(t & SL11H_CTL1MASK_SUSPEND) ? " suspend" : "");
t                  92 drivers/usb/host/uhci-q.c static void uhci_fsbr_timeout(struct timer_list *t)
t                  94 drivers/usb/host/uhci-q.c 	struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
t                 164 drivers/usb/host/xhci-dbgcap.h #define dbc_epctx_info2(t, p, b)	\
t                 165 drivers/usb/host/xhci-dbgcap.h 	cpu_to_le32(EP_TYPE(t) | MAX_PACKET(p) | MAX_BURST(b))
t                 967 drivers/usb/host/xhci-ring.c void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
t                 969 drivers/usb/host/xhci-ring.c 	struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
t                 475 drivers/usb/host/xhci.c static void compliance_mode_recovery(struct timer_list *t)
t                 483 drivers/usb/host/xhci.c 	xhci = from_timer(xhci, t, comp_mode_recovery_timer);
t                2122 drivers/usb/host/xhci.h void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
t                1331 drivers/usb/isp1760/isp1760-udc.c static void isp1760_udc_vbus_poll(struct timer_list *t)
t                1333 drivers/usb/isp1760/isp1760-udc.c 	struct isp1760_udc *udc = from_timer(udc, t, vbus_timer);
t                 766 drivers/usb/misc/sisusbvga/sisusb_con.c 		unsigned int t, unsigned int b, enum con_scroll dir,
t                 770 drivers/usb/misc/sisusbvga/sisusb_con.c 	int length = ((b - t) * cols) * 2;
t                 784 drivers/usb/misc/sisusbvga/sisusb_con.c 			memmove(sisusb_vaddr(sisusb, c, 0, t),
t                 785 drivers/usb/misc/sisusbvga/sisusb_con.c 					   sisusb_vaddr(sisusb, c, 0, t + lines),
t                 786 drivers/usb/misc/sisusbvga/sisusb_con.c 					   (b - t - lines) * cols * 2);
t                 792 drivers/usb/misc/sisusbvga/sisusb_con.c 			memmove(sisusb_vaddr(sisusb, c, 0, t + lines),
t                 793 drivers/usb/misc/sisusbvga/sisusb_con.c 					   sisusb_vaddr(sisusb, c, 0, t),
t                 794 drivers/usb/misc/sisusbvga/sisusb_con.c 					   (b - t - lines) * cols * 2);
t                 795 drivers/usb/misc/sisusbvga/sisusb_con.c 			sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, t), eattr,
t                 800 drivers/usb/misc/sisusbvga/sisusb_con.c 	sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, 0, t),
t                 801 drivers/usb/misc/sisusbvga/sisusb_con.c 			sisusb_haddr(sisusb, c, 0, t), length);
t                 810 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
t                 841 drivers/usb/misc/sisusbvga/sisusb_con.c 	if (t || b != c->vc_rows)
t                 842 drivers/usb/misc/sisusbvga/sisusb_con.c 		return sisusbcon_scroll_area(c, sisusb, t, b, dir, lines);
t                 593 drivers/usb/misc/usbtest.c static void sg_timeout(struct timer_list *t)
t                 595 drivers/usb/misc/usbtest.c 	struct sg_timeout *timeout = from_timer(timeout, t, timer);
t                 120 drivers/usb/musb/am35x.c static void otg_timer(struct timer_list *t)
t                 122 drivers/usb/musb/am35x.c 	struct musb		*musb = from_timer(musb, t, dev_timer);
t                 122 drivers/usb/musb/da8xx.c static void otg_timer(struct timer_list *t)
t                 124 drivers/usb/musb/da8xx.c 	struct musb		*musb = from_timer(musb, t, dev_timer);
t                 186 drivers/usb/musb/davinci.c static void otg_timer(struct timer_list *t)
t                 188 drivers/usb/musb/davinci.c 	struct musb		*musb = from_timer(musb, t, dev_timer);
t                 454 drivers/usb/musb/musb_core.c static void musb_otg_timer_func(struct timer_list *t)
t                 456 drivers/usb/musb/musb_core.c 	struct musb	*musb = from_timer(musb, t, otg_timer);
t                 279 drivers/usb/musb/musb_dsps.c static void otg_timer(struct timer_list *t)
t                 281 drivers/usb/musb/musb_dsps.c 	struct musb *musb = from_timer(musb, t, dev_timer);
t                 452 drivers/usb/musb/tusb6010.c static void musb_do_idle(struct timer_list *t)
t                 454 drivers/usb/musb/tusb6010.c 	struct musb	*musb = from_timer(musb, t, dev_timer);
t                 358 drivers/usb/phy/phy-fsl-usb.c static struct fsl_otg_timer *fsl_otg_get_timer(enum otg_fsm_timer t)
t                 363 drivers/usb/phy/phy-fsl-usb.c 	switch (t) {
t                 411 drivers/usb/phy/phy-fsl-usb.c static void fsl_otg_fsm_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
t                 415 drivers/usb/phy/phy-fsl-usb.c 	timer = fsl_otg_get_timer(t);
t                 433 drivers/usb/phy/phy-fsl-usb.c static void fsl_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
t                 437 drivers/usb/phy/phy-fsl-usb.c 	timer = fsl_otg_get_timer(t);
t                 969 drivers/usb/phy/phy-fsl-usb.c 	int t;
t                 974 drivers/usb/phy/phy-fsl-usb.c 	t = scnprintf(next, size,
t                 977 drivers/usb/phy/phy-fsl-usb.c 	size -= t;
t                 978 drivers/usb/phy/phy-fsl-usb.c 	next += t;
t                 981 drivers/usb/phy/phy-fsl-usb.c 	t = scnprintf(next, size,
t                 994 drivers/usb/phy/phy-fsl-usb.c 	size -= t;
t                 995 drivers/usb/phy/phy-fsl-usb.c 	next += t;
t                 998 drivers/usb/phy/phy-fsl-usb.c 	t = scnprintf(next, size,
t                1001 drivers/usb/phy/phy-fsl-usb.c 	size -= t;
t                1002 drivers/usb/phy/phy-fsl-usb.c 	next += t;
t                1005 drivers/usb/phy/phy-fsl-usb.c 	t = scnprintf(next, size,
t                1036 drivers/usb/phy/phy-fsl-usb.c 	size -= t;
t                1037 drivers/usb/phy/phy-fsl-usb.c 	next += t;
t                1173 drivers/usb/phy/phy-isp1301-omap.c static void isp1301_timer(struct timer_list *t)
t                1175 drivers/usb/phy/phy-isp1301-omap.c 	struct isp1301 *isp = from_timer(isp, t, timer);
t                  86 drivers/usb/phy/phy-mv-usb.c static void mv_otg_timer_await_bcon(struct timer_list *t)
t                  88 drivers/usb/phy/phy-mv-usb.c 	struct mv_otg *mvotg = from_timer(mvotg, t,
t                 205 drivers/usb/renesas_usbhs/mod_host.c 	int t = 0;
t                 228 drivers/usb/renesas_usbhs/mod_host.c 	t = len / maxp;
t                 230 drivers/usb/renesas_usbhs/mod_host.c 		t++;
t                 232 drivers/usb/renesas_usbhs/mod_host.c 		t++;
t                 233 drivers/usb/renesas_usbhs/mod_host.c 	t %= 2;
t                 235 drivers/usb/renesas_usbhs/mod_host.c 	if (t)
t                 108 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_type_is(p, t)	((p)->pipe_type == t)
t                1358 drivers/usb/serial/garmin_gps.c static void timeout_handler(struct timer_list *t)
t                1360 drivers/usb/serial/garmin_gps.c 	struct garmin_data *garmin_data_p = from_timer(garmin_data_p, t, timer);
t                 558 drivers/usb/serial/mos7840.c static void mos7840_led_off(struct timer_list *t)
t                 560 drivers/usb/serial/mos7840.c 	struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
t                 568 drivers/usb/serial/mos7840.c static void mos7840_led_flag_off(struct timer_list *t)
t                 570 drivers/usb/serial/mos7840.c 	struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
t                 556 drivers/usb/serial/whiteheat.c 	int t;
t                 580 drivers/usb/serial/whiteheat.c 	t = wait_event_timeout(command_info->wait_command,
t                 582 drivers/usb/serial/whiteheat.c 	if (!t)
t                 755 drivers/usb/storage/realtek_cr.c static void rts51x_suspend_timer_fn(struct timer_list *t)
t                 757 drivers/usb/storage/realtek_cr.c 	struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
t                 302 drivers/usb/usbip/vudc_transfer.c static void v_timer(struct timer_list *t)
t                 304 drivers/usb/usbip/vudc_transfer.c 	struct vudc *udc = from_timer(udc, t, tr_timer.timer);
t                 448 drivers/usb/usbip/vudc_transfer.c 	struct transfer_timer *t = &udc->tr_timer;
t                 450 drivers/usb/usbip/vudc_transfer.c 	timer_setup(&t->timer, v_timer, 0);
t                 451 drivers/usb/usbip/vudc_transfer.c 	t->state = VUDC_TR_STOPPED;
t                 456 drivers/usb/usbip/vudc_transfer.c 	struct transfer_timer *t = &udc->tr_timer;
t                 459 drivers/usb/usbip/vudc_transfer.c 	switch (t->state) {
t                 465 drivers/usb/usbip/vudc_transfer.c 		t->state = VUDC_TR_IDLE;
t                 466 drivers/usb/usbip/vudc_transfer.c 		t->frame_start = jiffies;
t                 467 drivers/usb/usbip/vudc_transfer.c 		t->frame_limit = get_frame_limit(udc->gadget.speed);
t                 474 drivers/usb/usbip/vudc_transfer.c 	struct transfer_timer *t = &udc->tr_timer;
t                 477 drivers/usb/usbip/vudc_transfer.c 	switch (t->state) {
t                 481 drivers/usb/usbip/vudc_transfer.c 		t->state = VUDC_TR_RUNNING;
t                 485 drivers/usb/usbip/vudc_transfer.c 		mod_timer(&t->timer, time);
t                 491 drivers/usb/usbip/vudc_transfer.c 	struct transfer_timer *t = &udc->tr_timer;
t                 495 drivers/usb/usbip/vudc_transfer.c 	t->state = VUDC_TR_STOPPED;
t                 501 drivers/vhost/scsi.c 	struct vhost_scsi_evt *evt, *t;
t                 506 drivers/vhost/scsi.c 	llist_for_each_entry_safe(evt, t, llnode, list) {
t                 524 drivers/vhost/scsi.c 	struct vhost_scsi_cmd *cmd, *t;
t                 532 drivers/vhost/scsi.c 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
t                1385 drivers/vhost/scsi.c 			struct vhost_scsi_target *t)
t                1428 drivers/vhost/scsi.c 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
t                1458 drivers/vhost/scsi.c 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
t                1488 drivers/vhost/scsi.c 			  struct vhost_scsi_target *t)
t                1526 drivers/vhost/scsi.c 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
t                1530 drivers/vhost/scsi.c 				t->vhost_wwpn, t->vhost_tpgt);
t                1647 drivers/vhost/scsi.c 	struct vhost_scsi_target t;
t                1650 drivers/vhost/scsi.c 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
t                1652 drivers/vhost/scsi.c 	vhost_scsi_clear_endpoint(vs, &t);
t                 792 drivers/vhost/vhost.c 		struct iov_iter t;
t                 805 drivers/vhost/vhost.c 		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
t                 806 drivers/vhost/vhost.c 		ret = copy_to_iter(from, size, &t);
t                 504 drivers/video/console/mdacon.c static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
t                 518 drivers/video/console/mdacon.c 		scr_memmovew(mda_addr(0, t), mda_addr(0, t + lines),
t                 519 drivers/video/console/mdacon.c 				(b-t-lines)*mda_num_columns*2);
t                 525 drivers/video/console/mdacon.c 		scr_memmovew(mda_addr(0, t + lines), mda_addr(0, t),
t                 526 drivers/video/console/mdacon.c 				(b-t-lines)*mda_num_columns*2);
t                 527 drivers/video/console/mdacon.c 		scr_memsetw(mda_addr(0, t), eattr, lines*mda_num_columns*2);
t                 578 drivers/video/console/newport_con.c static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
t                 587 drivers/video/console/newport_con.c 	if (t == 0 && b == vc->vc_rows) {
t                 602 drivers/video/console/newport_con.c 	count = (b - t - lines) * vc->vc_cols;
t                 605 drivers/video/console/newport_con.c 		y = t;
t                 607 drivers/video/console/newport_con.c 					vc->vc_size_row * (t + lines));
t                 609 drivers/video/console/newport_con.c 					vc->vc_size_row * t);
t                 658 drivers/video/console/newport_con.c 					vc->vc_size_row * t);
t                 660 drivers/video/console/newport_con.c 		y = t;
t                 156 drivers/video/console/sticon.c static bool sticon_scroll(struct vc_data *conp, unsigned int t,
t                 168 drivers/video/console/sticon.c 	sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols);
t                 173 drivers/video/console/sticon.c 	sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols);
t                 174 drivers/video/console/sticon.c 	sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char);
t                 243 drivers/video/console/vgacon.c static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
t                 251 drivers/video/console/vgacon.c 	p = (void *) (c->vc_origin + t * c->vc_size_row);
t                1367 drivers/video/console/vgacon.c static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
t                1373 drivers/video/console/vgacon.c 	if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT)
t                1383 drivers/video/console/vgacon.c 		vgacon_scrollback_update(c, t, lines);
t                 837 drivers/video/fbdev/atafb.c 	unsigned long t;	/* t/[ps] (=1/f) */
t                1051 drivers/video/fbdev/atafb.c 		if (var->pixclock > f32.t * plen)
t                1116 drivers/video/fbdev/atafb.c 				if (f25.t * i >= var->pixclock &&
t                1117 drivers/video/fbdev/atafb.c 				    f25.t * i < pcl) {
t                1118 drivers/video/fbdev/atafb.c 					pcl = f25.t * i;
t                1121 drivers/video/fbdev/atafb.c 				if (f32.t * i >= var->pixclock &&
t                1122 drivers/video/fbdev/atafb.c 				    f32.t * i < pcl) {
t                1123 drivers/video/fbdev/atafb.c 					pcl = f32.t * i;
t                1126 drivers/video/fbdev/atafb.c 				if (fext.t && fext.t * i >= var->pixclock &&
t                1127 drivers/video/fbdev/atafb.c 				    fext.t * i < pcl) {
t                1128 drivers/video/fbdev/atafb.c 					pcl = fext.t * i;
t                1134 drivers/video/fbdev/atafb.c 			plen = pcl / pclock->t;
t                1380 drivers/video/fbdev/atafb.c 	var->pixclock = hw->sync & 0x1 ? fext.t :
t                1381 drivers/video/fbdev/atafb.c 	                hw->vid_control & VCO_CLOCK25 ? f25.t : f32.t;
t                1770 drivers/video/fbdev/atafb.c 	f25.hsync = h_syncs[mon_type] / f25.t;
t                1771 drivers/video/fbdev/atafb.c 	f32.hsync = h_syncs[mon_type] / f32.t;
t                1772 drivers/video/fbdev/atafb.c 	if (fext.t)
t                1773 drivers/video/fbdev/atafb.c 		fext.hsync = h_syncs[mon_type] / fext.t;
t                3042 drivers/video/fbdev/atafb.c 			fext.t = 1000000000 / fext.f;
t                1453 drivers/video/fbdev/aty/radeon_base.c static void radeon_lvds_timer_func(struct timer_list *t)
t                1455 drivers/video/fbdev/aty/radeon_base.c 	struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer);
t                  23 drivers/video/fbdev/c2p_core.h 	u32 t = (d[i1] ^ (d[i2] >> shift)) & mask;
t                  25 drivers/video/fbdev/c2p_core.h 	d[i1] ^= t;
t                  26 drivers/video/fbdev/c2p_core.h 	d[i2] ^= t << shift;
t                 422 drivers/video/fbdev/core/fbcon.c static void cursor_timer_handler(struct timer_list *t)
t                 424 drivers/video/fbdev/core/fbcon.c 	struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
t                1081 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *t, *p = &fb_display[vc->vc_num];
t                1109 drivers/video/fbdev/core/fbcon.c 	t = &fb_display[fg_console];
t                1111 drivers/video/fbdev/core/fbcon.c 		if (t->fontdata) {
t                1118 drivers/video/fbdev/core/fbcon.c 			p->userfont = t->userfont;
t                1416 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p, *t;
t                1434 drivers/video/fbdev/core/fbcon.c 	t = &fb_display[svc->vc_num];
t                1437 drivers/video/fbdev/core/fbcon.c 		vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
t                1440 drivers/video/fbdev/core/fbcon.c 		p->userfont = t->userfont;
t                1543 drivers/video/fbdev/core/fbcon.c static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
t                1553 drivers/video/fbdev/core/fbcon.c 		fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
t                1591 drivers/video/fbdev/core/fbcon.c static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
t                1601 drivers/video/fbdev/core/fbcon.c 		fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
t                1847 drivers/video/fbdev/core/fbcon.c static inline void fbcon_softback_note(struct vc_data *vc, int t,
t                1854 drivers/video/fbdev/core/fbcon.c 	p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
t                1872 drivers/video/fbdev/core/fbcon.c static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
t                1895 drivers/video/fbdev/core/fbcon.c 			fbcon_softback_note(vc, t, count);
t                1900 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw_blit(vc, info, p, t, b - t - count,
t                1912 drivers/video/fbdev/core/fbcon.c 			if (b - t - count > 3 * vc->vc_rows >> 2) {
t                1913 drivers/video/fbdev/core/fbcon.c 				if (t > 0)
t                1914 drivers/video/fbdev/core/fbcon.c 					fbcon_bmove(vc, 0, 0, count, 0, t,
t                1922 drivers/video/fbdev/core/fbcon.c 				fbcon_bmove(vc, t + count, 0, t, 0,
t                1923 drivers/video/fbdev/core/fbcon.c 					    b - t - count, vc->vc_cols);
t                1932 drivers/video/fbdev/core/fbcon.c 			    && ((!scroll_partial && (b - t == vc->vc_rows))
t                1934 drivers/video/fbdev/core/fbcon.c 				    && (b - t - count >
t                1936 drivers/video/fbdev/core/fbcon.c 				if (t > 0)
t                1937 drivers/video/fbdev/core/fbcon.c 					fbcon_redraw_move(vc, p, 0, t, count);
t                1938 drivers/video/fbdev/core/fbcon.c 				ypan_up_redraw(vc, t, count);
t                1943 drivers/video/fbdev/core/fbcon.c 				fbcon_redraw_move(vc, p, t + count, b - t - count, t);
t                1950 drivers/video/fbdev/core/fbcon.c 			    && ((!scroll_partial && (b - t == vc->vc_rows))
t                1952 drivers/video/fbdev/core/fbcon.c 				    && (b - t - count >
t                1954 drivers/video/fbdev/core/fbcon.c 				if (t > 0)
t                1955 drivers/video/fbdev/core/fbcon.c 					fbcon_bmove(vc, 0, 0, count, 0, t,
t                1963 drivers/video/fbdev/core/fbcon.c 				fbcon_bmove(vc, t + count, 0, t, 0,
t                1964 drivers/video/fbdev/core/fbcon.c 					    b - t - count, vc->vc_cols);
t                1972 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw(vc, p, t, b - t - count,
t                1991 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
t                1993 drivers/video/fbdev/core/fbcon.c 			fbcon_clear(vc, t, 0, count, vc->vc_cols);
t                1996 drivers/video/fbdev/core/fbcon.c 							t),
t                2003 drivers/video/fbdev/core/fbcon.c 			if (b - t - count > 3 * vc->vc_rows >> 2) {
t                2009 drivers/video/fbdev/core/fbcon.c 				if (t > 0)
t                2010 drivers/video/fbdev/core/fbcon.c 					fbcon_bmove(vc, count, 0, 0, 0, t,
t                2013 drivers/video/fbdev/core/fbcon.c 				fbcon_bmove(vc, t, 0, t + count, 0,
t                2014 drivers/video/fbdev/core/fbcon.c 					    b - t - count, vc->vc_cols);
t                2017 drivers/video/fbdev/core/fbcon.c 			fbcon_clear(vc, t, 0, count, vc->vc_cols);
t                2022 drivers/video/fbdev/core/fbcon.c 			    && ((!scroll_partial && (b - t == vc->vc_rows))
t                2024 drivers/video/fbdev/core/fbcon.c 				    && (b - t - count >
t                2031 drivers/video/fbdev/core/fbcon.c 				if (t > 0)
t                2032 drivers/video/fbdev/core/fbcon.c 					fbcon_bmove(vc, count, 0, 0, 0, t,
t                2035 drivers/video/fbdev/core/fbcon.c 				fbcon_bmove(vc, t, 0, t + count, 0,
t                2036 drivers/video/fbdev/core/fbcon.c 					    b - t - count, vc->vc_cols);
t                2039 drivers/video/fbdev/core/fbcon.c 			fbcon_clear(vc, t, 0, count, vc->vc_cols);
t                2044 drivers/video/fbdev/core/fbcon.c 			    && ((!scroll_partial && (b - t == vc->vc_rows))
t                2046 drivers/video/fbdev/core/fbcon.c 				    && (b - t - count >
t                2051 drivers/video/fbdev/core/fbcon.c 				ypan_down_redraw(vc, t, count);
t                2052 drivers/video/fbdev/core/fbcon.c 				if (t > 0)
t                2053 drivers/video/fbdev/core/fbcon.c 					fbcon_redraw_move(vc, p, count, t, 0);
t                2055 drivers/video/fbdev/core/fbcon.c 				fbcon_redraw_move(vc, p, t, b - t - count, t + count);
t                2056 drivers/video/fbdev/core/fbcon.c 			fbcon_clear(vc, t, 0, count, vc->vc_cols);
t                2061 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw(vc, p, b - 1, b - t - count,
t                2063 drivers/video/fbdev/core/fbcon.c 			fbcon_clear(vc, t, 0, count, vc->vc_cols);
t                2066 drivers/video/fbdev/core/fbcon.c 							t),
t                2148 drivers/video/fbdev/core/fbcon.c 	u16 t = 0;
t                2151 drivers/video/fbdev/core/fbcon.c 	int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
t                 182 drivers/video/fbdev/fm2fb.c 	unsigned char t = FRAMEMASTER_ROM;
t                 185 drivers/video/fbdev/fm2fb.c 		t |= FRAMEMASTER_ENABLE | FRAMEMASTER_NOLACE;
t                 186 drivers/video/fbdev/fm2fb.c 	fm2fb_reg[0] = t;
t                 239 drivers/video/fbdev/gxt4500.c 	int pll_period, best_error, t, intf;
t                 260 drivers/video/fbdev/gxt4500.c 				t = par->refclk_ps * m * postdiv / n;
t                 261 drivers/video/fbdev/gxt4500.c 				t -= period_ps;
t                 262 drivers/video/fbdev/gxt4500.c 				if (t >= 0 && t < best_error) {
t                 267 drivers/video/fbdev/gxt4500.c 					best_error = t;
t                 410 drivers/video/fbdev/hyperv_fb.c 	unsigned long t;
t                 419 drivers/video/fbdev/hyperv_fb.c 	t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
t                 420 drivers/video/fbdev/hyperv_fb.c 	if (!t) {
t                 484 drivers/video/fbdev/hyperv_fb.c 	unsigned long t;
t                 495 drivers/video/fbdev/hyperv_fb.c 	t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
t                 496 drivers/video/fbdev/hyperv_fb.c 	if (!t) {
t                 170 drivers/video/fbdev/metronomefb.c static int load_waveform(u8 *mem, size_t size, int m, int t,
t                 228 drivers/video/fbdev/metronomefb.c 		if (mem[i] > t) {
t                 109 drivers/video/fbdev/mmp/hw/mmp_spi.c 	struct spi_transfer *t;
t                 112 drivers/video/fbdev/mmp/hw/mmp_spi.c 	list_for_each_entry(t, &m->transfers, transfer_list) {
t                 115 drivers/video/fbdev/mmp/hw/mmp_spi.c 			for (i = 0; i < t->len; i++)
t                 116 drivers/video/fbdev/mmp/hw/mmp_spi.c 				lcd_spi_write(spi, ((u8 *)t->tx_buf)[i]);
t                 119 drivers/video/fbdev/mmp/hw/mmp_spi.c 			for (i = 0; i < t->len/2; i++)
t                 120 drivers/video/fbdev/mmp/hw/mmp_spi.c 				lcd_spi_write(spi, ((u16 *)t->tx_buf)[i]);
t                 123 drivers/video/fbdev/mmp/hw/mmp_spi.c 			for (i = 0; i < t->len/4; i++)
t                 124 drivers/video/fbdev/mmp/hw/mmp_spi.c 				lcd_spi_write(spi, ((u32 *)t->tx_buf)[i]);
t                  17 drivers/video/fbdev/nvidia/nv_type.h #define BITMASK(t,b) (((unsigned)(1U << (((t)-(b)+1)))-1)  << (b))
t                 624 drivers/video/fbdev/omap/hwa742.c 	struct extif_timings *t;
t                 639 drivers/video/fbdev/omap/hwa742.c 	t = &hwa742.reg_timings;
t                 640 drivers/video/fbdev/omap/hwa742.c 	memset(t, 0, sizeof(*t));
t                 641 drivers/video/fbdev/omap/hwa742.c 	t->clk_div = div;
t                 642 drivers/video/fbdev/omap/hwa742.c 	t->cs_on_time = 0;
t                 643 drivers/video/fbdev/omap/hwa742.c 	t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
t                 644 drivers/video/fbdev/omap/hwa742.c 	t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
t                 645 drivers/video/fbdev/omap/hwa742.c 	t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
t                 646 drivers/video/fbdev/omap/hwa742.c 	t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
t                 647 drivers/video/fbdev/omap/hwa742.c 	t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
t                 648 drivers/video/fbdev/omap/hwa742.c 	t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
t                 649 drivers/video/fbdev/omap/hwa742.c 	t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
t                 650 drivers/video/fbdev/omap/hwa742.c 	if (t->we_cycle_time < t->we_off_time)
t                 651 drivers/video/fbdev/omap/hwa742.c 		t->we_cycle_time = t->we_off_time;
t                 652 drivers/video/fbdev/omap/hwa742.c 	t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
t                 653 drivers/video/fbdev/omap/hwa742.c 	if (t->re_cycle_time < t->re_off_time)
t                 654 drivers/video/fbdev/omap/hwa742.c 		t->re_cycle_time = t->re_off_time;
t                 655 drivers/video/fbdev/omap/hwa742.c 	t->cs_pulse_width = 0;
t                 658 drivers/video/fbdev/omap/hwa742.c 		 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
t                 660 drivers/video/fbdev/omap/hwa742.c 		 t->we_on_time, t->we_off_time, t->re_cycle_time,
t                 661 drivers/video/fbdev/omap/hwa742.c 		 t->we_cycle_time);
t                 663 drivers/video/fbdev/omap/hwa742.c 		 t->access_time, t->cs_pulse_width);
t                 665 drivers/video/fbdev/omap/hwa742.c 	return hwa742.extif->convert_timings(t);
t                 670 drivers/video/fbdev/omap/hwa742.c 	struct extif_timings *t;
t                 686 drivers/video/fbdev/omap/hwa742.c 	t = &hwa742.lut_timings;
t                 687 drivers/video/fbdev/omap/hwa742.c 	memset(t, 0, sizeof(*t));
t                 689 drivers/video/fbdev/omap/hwa742.c 	t->clk_div = div;
t                 691 drivers/video/fbdev/omap/hwa742.c 	t->cs_on_time = 0;
t                 692 drivers/video/fbdev/omap/hwa742.c 	t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
t                 693 drivers/video/fbdev/omap/hwa742.c 	t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
t                 694 drivers/video/fbdev/omap/hwa742.c 	t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
t                 696 drivers/video/fbdev/omap/hwa742.c 	t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
t                 697 drivers/video/fbdev/omap/hwa742.c 	t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
t                 699 drivers/video/fbdev/omap/hwa742.c 	t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
t                 700 drivers/video/fbdev/omap/hwa742.c 	t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
t                 701 drivers/video/fbdev/omap/hwa742.c 	if (t->we_cycle_time < t->we_off_time)
t                 702 drivers/video/fbdev/omap/hwa742.c 		t->we_cycle_time = t->we_off_time;
t                 703 drivers/video/fbdev/omap/hwa742.c 	t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
t                 704 drivers/video/fbdev/omap/hwa742.c 	if (t->re_cycle_time < t->re_off_time)
t                 705 drivers/video/fbdev/omap/hwa742.c 		t->re_cycle_time = t->re_off_time;
t                 706 drivers/video/fbdev/omap/hwa742.c 	t->cs_pulse_width = 0;
t                 709 drivers/video/fbdev/omap/hwa742.c 		 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
t                 711 drivers/video/fbdev/omap/hwa742.c 		 t->we_on_time, t->we_off_time, t->re_cycle_time,
t                 712 drivers/video/fbdev/omap/hwa742.c 		 t->we_cycle_time);
t                 714 drivers/video/fbdev/omap/hwa742.c 		 t->access_time, t->cs_pulse_width);
t                 716 drivers/video/fbdev/omap/hwa742.c 	return hwa742.extif->convert_timings(t);
t                 118 drivers/video/fbdev/omap/sossi.c static int calc_rd_timings(struct extif_timings *t)
t                 122 drivers/video/fbdev/omap/sossi.c 	int div = t->clk_div;
t                 128 drivers/video/fbdev/omap/sossi.c 	reon = ps_to_sossi_ticks(t->re_on_time, div);
t                 133 drivers/video/fbdev/omap/sossi.c 	reoff = ps_to_sossi_ticks(t->re_off_time, div);
t                 142 drivers/video/fbdev/omap/sossi.c 	recyc = ps_to_sossi_ticks(t->re_cycle_time, div);
t                 153 drivers/video/fbdev/omap/sossi.c 	actim = ps_to_sossi_ticks(t->access_time, div);
t                 163 drivers/video/fbdev/omap/sossi.c 	t->tim[0] = tw0 - 1;
t                 164 drivers/video/fbdev/omap/sossi.c 	t->tim[1] = tw1 - 1;
t                 169 drivers/video/fbdev/omap/sossi.c static int calc_wr_timings(struct extif_timings *t)
t                 173 drivers/video/fbdev/omap/sossi.c 	int div = t->clk_div;
t                 179 drivers/video/fbdev/omap/sossi.c 	weon = ps_to_sossi_ticks(t->we_on_time, div);
t                 184 drivers/video/fbdev/omap/sossi.c 	weoff = ps_to_sossi_ticks(t->we_off_time, div);
t                 191 drivers/video/fbdev/omap/sossi.c 	wecyc = ps_to_sossi_ticks(t->we_cycle_time, div);
t                 202 drivers/video/fbdev/omap/sossi.c 	t->tim[2] = tw0 - 1;
t                 203 drivers/video/fbdev/omap/sossi.c 	t->tim[3] = tw1 - 1;
t                 312 drivers/video/fbdev/omap/sossi.c static int sossi_convert_timings(struct extif_timings *t)
t                 315 drivers/video/fbdev/omap/sossi.c 	int div = t->clk_div;
t                 317 drivers/video/fbdev/omap/sossi.c 	t->converted = 0;
t                 323 drivers/video/fbdev/omap/sossi.c 	if ((r = calc_rd_timings(t)) < 0)
t                 326 drivers/video/fbdev/omap/sossi.c 	if ((r = calc_wr_timings(t)) < 0)
t                 329 drivers/video/fbdev/omap/sossi.c 	t->tim[4] = div;
t                 331 drivers/video/fbdev/omap/sossi.c 	t->converted = 1;
t                 336 drivers/video/fbdev/omap/sossi.c static void sossi_set_timings(const struct extif_timings *t)
t                 338 drivers/video/fbdev/omap/sossi.c 	BUG_ON(!t->converted);
t                 340 drivers/video/fbdev/omap/sossi.c 	sossi.clk_tw0[RD_ACCESS] = t->tim[0];
t                 341 drivers/video/fbdev/omap/sossi.c 	sossi.clk_tw1[RD_ACCESS] = t->tim[1];
t                 343 drivers/video/fbdev/omap/sossi.c 	sossi.clk_tw0[WR_ACCESS] = t->tim[2];
t                 344 drivers/video/fbdev/omap/sossi.c 	sossi.clk_tw1[WR_ACCESS] = t->tim[3];
t                 346 drivers/video/fbdev/omap/sossi.c 	sossi.clk_div = t->tim[4];
t                 451 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	unsigned long t;
t                 454 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	r = kstrtoul(buf, 0, &t);
t                 463 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 		if (t)
t                 484 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	unsigned t;
t                 487 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	t = ddata->ulps_enabled;
t                 490 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	return snprintf(buf, PAGE_SIZE, "%u\n", t);
t                 499 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	unsigned long t;
t                 502 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	r = kstrtoul(buf, 0, &t);
t                 507 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	ddata->ulps_timeout = t;
t                 529 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	unsigned t;
t                 532 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	t = ddata->ulps_timeout;
t                 535 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c 	return snprintf(buf, PAGE_SIZE, "%u\n", t);
t                 393 drivers/video/fbdev/omap2/omapfb/dss/apply.c 	unsigned long t;
t                 409 drivers/video/fbdev/omap2/omapfb/dss/apply.c 	t = msecs_to_jiffies(500);
t                 410 drivers/video/fbdev/omap2/omapfb/dss/apply.c 	r = wait_for_completion_timeout(&extra_updated_completion, t);
t                2140 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 		const struct omap_video_timings *t, u16 pos_x,
t                2150 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
t                2157 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
t                3250 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	struct omap_video_timings t = *timings;
t                3252 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
t                3254 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	if (!dispc_mgr_timings_ok(channel, &t)) {
t                3260 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 		_dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
t                3261 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 				t.vfp, t.vbp, t.vsync_level, t.hsync_level,
t                3262 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 				t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
t                3264 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 		xtot = t.x_res + t.hfp + t.hsw + t.hbp;
t                3265 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 		ytot = t.y_res + t.vfp + t.vsw + t.vbp;
t                3272 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 			t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
t                3274 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 			t.vsync_level, t.hsync_level, t.data_pclk_edge,
t                3275 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 			t.de_level, t.sync_pclk_edge);
t                3279 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 		if (t.interlace)
t                3280 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 			t.y_res /= 2;
t                3283 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	dispc_mgr_set_size(channel, t.x_res, t.y_res);
t                  89 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 	struct omap_video_timings t;
t                  94 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 	dssdev->driver->get_timings(dssdev, &t);
t                  97 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 			t.pixelclock,
t                  98 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 			t.x_res, t.hfp, t.hbp, t.hsw,
t                  99 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 			t.y_res, t.vfp, t.vbp, t.vsw);
t                 105 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 	struct omap_video_timings t = dssdev->panel.timings;
t                 114 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 		t = omap_dss_pal_timings;
t                 117 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 		t = omap_dss_ntsc_timings;
t                 122 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 				&t.pixelclock,
t                 123 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 				&t.x_res, &t.hfp, &t.hbp, &t.hsw,
t                 124 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 				&t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
t                 127 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 	r = dssdev->driver->check_timings(dssdev, &t);
t                 132 drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c 	dssdev->driver->set_timings(dssdev, &t);
t                 327 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 	struct omap_video_timings *t = &dpi->timings;
t                 334 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 		r = dpi_set_dsi_clk(dpi, mgr->id, t->pixelclock, &fck,
t                 337 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 		r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
t                 344 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 	if (pck != t->pixelclock) {
t                 346 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 			t->pixelclock, pck);
t                 348 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 		t->pixelclock = pck;
t                 351 drivers/video/fbdev/omap2/omapfb/dss/dpi.c 	dss_mgr_set_timings(mgr, t);
t                 496 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	int t;
t                 499 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t = 100;
t                 500 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	while (t-- > 0) {
t                 551 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	ktime_t t, setup_time, trans_time;
t                 558 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t = ktime_get();
t                 565 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	trans_time = ktime_sub(t, dsi->perf_start_time);
t                1341 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	int t = 0;
t                1353 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		if (++t > 1000) {
t                1745 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	int t = 0;
t                1753 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		if (++t > 1000) {
t                1817 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		unsigned t;
t                1819 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		for (t = 0; t < dsi->num_lanes_supported; ++t)
t                1820 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			if (dsi->lanes[t].function == functions[i])
t                1823 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		if (t == dsi->num_lanes_supported)
t                1826 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		lane_number = t;
t                1827 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		polarity = dsi->lanes[t].polarity;
t                1993 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	int t, i;
t                2007 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t = 100000;
t                2023 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		if (--t == 0) {
t                4317 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		const struct omap_dss_dsi_videomode_timings *t)
t                4319 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	unsigned long byteclk = t->hsclk / 4;
t                4322 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
t                4323 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
t                4324 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
t                4333 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
t                4335 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DSI_T(t->hss),
t                4336 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DSI_T(t->hsa),
t                4337 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DSI_T(t->hse),
t                4338 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DSI_T(t->hbp),
t                4340 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DSI_T(t->hfp),
t                4349 drivers/video/fbdev/omap2/omapfb/dss/dsi.c static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
t                4351 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	unsigned long pck = t->pixelclock;
t                4354 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	hact = t->x_res;
t                4355 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	bl = t->hsw + t->hbp + t->hfp;
t                4364 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			t->hsw, t->hbp, hact, t->hfp,
t                4366 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DISPC_T(t->hsw),
t                4367 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DISPC_T(t->hbp),
t                4369 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			TO_DISPC_T(t->hfp),
t                4378 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		const struct omap_dss_dsi_videomode_timings *t)
t                4381 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	unsigned long byteclk = t->hsclk / 4;
t                4386 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	dsi_tput = (u64)byteclk * t->ndl * 8;
t                4387 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	pck = (u32)div64_u64(dsi_tput, t->bitspp);
t                4388 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
t                4389 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
t                4392 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
t                4393 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
t                4394 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
t                4395 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	vm.x_res = t->hact;
t                4405 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	struct omap_video_timings *t = &ctx->dispc_vm;
t                4412 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	*t = *ctx->config->timings;
t                4413 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t->pixelclock = pck;
t                4414 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t->x_res = ctx->config->timings->x_res;
t                4415 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t->y_res = ctx->config->timings->y_res;
t                4416 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t->hsw = t->hfp = t->hbp = t->vsw = 1;
t                4417 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	t->vfp = t->vbp = 0;
t                4601 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		int t;
t                4604 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		t = 1 - hfp;
t                4605 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		hbp = max(hbp - t, 1);
t                4610 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			t = 1 - hfp;
t                4611 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			hsa = max(hsa - t, 1);
t                4660 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		int t;
t                4663 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		t = 1 - hfp;
t                4664 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		hbp = max(hbp - t, 1);
t                4669 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			t = 1 - hfp;
t                4670 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			hsa = max(hsa - t, 1);
t                4751 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	const struct omap_video_timings *t = cfg->timings;
t                4767 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	ctx->req_pck_min = t->pixelclock - 1000;
t                4768 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	ctx->req_pck_nom = t->pixelclock;
t                4769 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	ctx->req_pck_max = t->pixelclock + 1000;
t                 267 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h 	u32 t = 0, v;
t                 269 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h 		if (t++ > 10000)
t                 127 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c 		int t;
t                 135 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c 		t = 0;
t                 138 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c 			if (t++ > 10000) {
t                 191 drivers/video/fbdev/omap2/omapfb/dss/pll.c 	int t;
t                 194 drivers/video/fbdev/omap2/omapfb/dss/pll.c 	t = 100;
t                 195 drivers/video/fbdev/omap2/omapfb/dss/pll.c 	while (t-- > 0) {
t                 226 drivers/video/fbdev/omap2/omapfb/dss/pll.c 	int t = 100;
t                 228 drivers/video/fbdev/omap2/omapfb/dss/pll.c 	while (t-- > 0) {
t                 122 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 	struct omap_video_timings *t = &sdi.timings;
t                 142 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 	t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t                 143 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 	t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t                 145 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 	r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
t                 153 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 	if (pck != t->pixelclock) {
t                 155 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 			t->pixelclock, pck);
t                 157 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 		t->pixelclock = pck;
t                 161 drivers/video/fbdev/omap2/omapfb/dss/sdi.c 	dss_mgr_set_timings(out->manager, t);
t                 370 drivers/video/fbdev/omap2/omapfb/dss/venc.c 	int t = 1000;
t                 374 drivers/video/fbdev/omap2/omapfb/dss/venc.c 		if (--t == 0) {
t                 122 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 				int t = x * 3 / w;
t                 126 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 					if (t == 0)
t                 128 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 					else if (t == 1)
t                 130 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 					else if (t == 2)
t                 133 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 					if (t == 0)
t                 135 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 					else if (t == 1)
t                 137 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 					else if (t == 2)
t                2205 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		struct omap_video_timings *t)
t                2208 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		display->driver->get_timings(display, t);
t                2210 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t                2211 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		t->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
t                2212 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
t                2215 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->x_res = m->xres;
t                2216 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->y_res = m->yres;
t                2217 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->pixelclock = PICOS2KHZ(m->pixclock) * 1000;
t                2218 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->hsw = m->hsync_len;
t                2219 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->hfp = m->right_margin;
t                2220 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->hbp = m->left_margin;
t                2221 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->vsw = m->vsync_len;
t                2222 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->vfp = m->lower_margin;
t                2223 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->vbp = m->upper_margin;
t                2224 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->hsync_level = m->sync & FB_SYNC_HOR_HIGH_ACT ?
t                2227 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->vsync_level = m->sync & FB_SYNC_VERT_HIGH_ACT ?
t                2230 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	t->interlace = m->vmode & FB_VMODE_INTERLACED;
t                2264 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		struct omap_video_timings t;
t                2279 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		fb_videomode_to_omap_timings(m, display, &t);
t                2281 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		r = display->driver->check_timings(display, &t);
t                2549 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		struct omap_video_timings t;
t                2551 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		r = omapfb_find_best_mode(def_display, &t);
t                2554 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 			def_display->driver->set_timings(def_display, &t);
t                 137 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 	int t;
t                 142 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 	for (t = 0; t < ofbi->num_overlays; t++) {
t                 143 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		struct omap_overlay *ovl = ofbi->overlays[t];
t                 151 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 				t == 0 ? "" : ",", ovlnum);
t                 165 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 	int i, t;
t                 170 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		for (t = 0; t < ofbi->num_overlays; t++) {
t                 171 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			if (ofbi->overlays[t] == ovl)
t                 243 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		int t, found;
t                 249 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		for (t = 0; t < num_ovls; ++t) {
t                 250 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			if (ovl == ovls[t]) {
t                 270 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		for (t = i + 1; t < ofbi->num_overlays; t++) {
t                 271 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			ofbi->rotation[t-1] = ofbi->rotation[t];
t                 272 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			ofbi->overlays[t-1] = ofbi->overlays[t];
t                 280 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		int t, found;
t                 286 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		for (t = 0; t < ofbi->num_overlays; ++t) {
t                 287 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			if (ovl == ofbi->overlays[t]) {
t                 326 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 	int t;
t                 330 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 	for (t = 0; t < ofbi->num_overlays; t++) {
t                 332 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 				t == 0 ? "" : ",", ofbi->rotation[t]);
t                 560 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		int t;
t                 561 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) {
t                 563 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 					&omapfb_attrs[t]);
t                 578 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 	int i, t;
t                 582 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++)
t                 584 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 					&omapfb_attrs[t]);
t                 309 drivers/video/fbdev/pxafb.c #define SET_PIXFMT(v, r, g, b, t)				\
t                 311 drivers/video/fbdev/pxafb.c 	(v)->transp.offset = (t) ? (r) + (g) + (b) : 0;		\
t                 312 drivers/video/fbdev/pxafb.c 	(v)->transp.length = (t) ? (t) : 0;			\
t                1229 drivers/video/fbdev/pxafb.c 	unsigned int t = (time_ns * (lcd_clk / 1000000) / 1000);
t                1230 drivers/video/fbdev/pxafb.c 	return (t == 0) ? 1 : t;
t                  30 drivers/video/fbdev/riva/nvreg.h #define BITMASK(t,b) (((unsigned)(1U << (((t)-(b)+1)))-1)  << (b))
t                 116 drivers/video/fbdev/savage/savagefb.h #define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
t                 291 drivers/video/fbdev/sstfb.c static int sst_calc_pll(const int freq, int *freq_out, struct pll_timing *t)
t                 322 drivers/video/fbdev/sstfb.c 	t->p = p;
t                 323 drivers/video/fbdev/sstfb.c 	t->n = best_n;
t                 324 drivers/video/fbdev/sstfb.c 	t->m = best_m;
t                 325 drivers/video/fbdev/sstfb.c 	*freq_out = (DAC_FREF * (t->m + 2)) / ((1 << t->p) * (t->n + 2));
t                 327 drivers/video/fbdev/sstfb.c 		  t->m, t->n, t->p, *freq_out);
t                 967 drivers/video/fbdev/sstfb.c 		const struct pll_timing *t, const int clock)
t                 996 drivers/video/fbdev/sstfb.c 		dac_i_write(DACREG_AC0_I, t->m);
t                 997 drivers/video/fbdev/sstfb.c 		dac_i_write(DACREG_AC1_I, t->p << 6 | t->n);
t                1002 drivers/video/fbdev/sstfb.c 		dac_i_write(DACREG_BD0_I, t->m);
t                1003 drivers/video/fbdev/sstfb.c 		dac_i_write(DACREG_BD1_I, t->p << 6 | t->n);
t                1021 drivers/video/fbdev/sstfb.c 		const struct pll_timing *t, const int clock)
t                1031 drivers/video/fbdev/sstfb.c 		sst_dac_write(DACREG_ICS_PLLDATA, t->m);
t                1032 drivers/video/fbdev/sstfb.c 		sst_dac_write(DACREG_ICS_PLLDATA, t->p << 5 | t->n);
t                1042 drivers/video/fbdev/sstfb.c 		sst_dac_write(DACREG_ICS_PLLDATA, t->m);
t                1043 drivers/video/fbdev/sstfb.c 		sst_dac_write(DACREG_ICS_PLLDATA, t->p << 5 | t->n);
t                  94 drivers/video/fbdev/uvesafb.c 	if (task->t.buf_len < utask->buf_len ||
t                 103 drivers/video/fbdev/uvesafb.c 	memcpy(&task->t, utask, sizeof(*utask));
t                 105 drivers/video/fbdev/uvesafb.c 	if (task->t.buf_len && task->buf)
t                 106 drivers/video/fbdev/uvesafb.c 		memcpy(task->buf, utask + 1, task->t.buf_len);
t                 149 drivers/video/fbdev/uvesafb.c 	int len = sizeof(task->t) + task->t.buf_len;
t                 173 drivers/video/fbdev/uvesafb.c 	memcpy(m + 1, &task->t, sizeof(task->t));
t                 176 drivers/video/fbdev/uvesafb.c 	memcpy((u8 *)(m + 1) + sizeof(task->t), task->buf, task->t.buf_len);
t                 216 drivers/video/fbdev/uvesafb.c 	if (!err && !(task->t.flags & TF_EXIT))
t                 371 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f04;
t                 372 drivers/video/fbdev/uvesafb.c 	task->t.regs.ecx = 0x000f;
t                 373 drivers/video/fbdev/uvesafb.c 	task->t.regs.edx = 0x0001;
t                 374 drivers/video/fbdev/uvesafb.c 	task->t.flags = TF_BUF_RET | TF_BUF_ESBX;
t                 375 drivers/video/fbdev/uvesafb.c 	task->t.buf_len = par->vbe_state_size;
t                 379 drivers/video/fbdev/uvesafb.c 	if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
t                 381 drivers/video/fbdev/uvesafb.c 			task->t.regs.eax, err);
t                 402 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f04;
t                 403 drivers/video/fbdev/uvesafb.c 	task->t.regs.ecx = 0x000f;
t                 404 drivers/video/fbdev/uvesafb.c 	task->t.regs.edx = 0x0002;
t                 405 drivers/video/fbdev/uvesafb.c 	task->t.buf_len = par->vbe_state_size;
t                 406 drivers/video/fbdev/uvesafb.c 	task->t.flags = TF_BUF_ESBX;
t                 410 drivers/video/fbdev/uvesafb.c 	if (err || (task->t.regs.eax & 0xffff) != 0x004f)
t                 412 drivers/video/fbdev/uvesafb.c 			task->t.regs.eax, err);
t                 422 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f00;
t                 423 drivers/video/fbdev/uvesafb.c 	task->t.flags = TF_VBEIB;
t                 424 drivers/video/fbdev/uvesafb.c 	task->t.buf_len = sizeof(struct vbe_ib);
t                 429 drivers/video/fbdev/uvesafb.c 	if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
t                 431 drivers/video/fbdev/uvesafb.c 		       (u32)task->t.regs.eax, err);
t                 502 drivers/video/fbdev/uvesafb.c 		task->t.regs.eax = 0x4f01;
t                 503 drivers/video/fbdev/uvesafb.c 		task->t.regs.ecx = (u32) *mode;
t                 504 drivers/video/fbdev/uvesafb.c 		task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
t                 505 drivers/video/fbdev/uvesafb.c 		task->t.buf_len = sizeof(struct vbe_mode_ib);
t                 509 drivers/video/fbdev/uvesafb.c 		if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
t                 511 drivers/video/fbdev/uvesafb.c 				*mode, (u32)task->t.regs.eax, err);
t                 560 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f0a;
t                 561 drivers/video/fbdev/uvesafb.c 	task->t.regs.ebx = 0x0;
t                 564 drivers/video/fbdev/uvesafb.c 	if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
t                 567 drivers/video/fbdev/uvesafb.c 		par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
t                 568 drivers/video/fbdev/uvesafb.c 						+ task->t.regs.edi);
t                 572 drivers/video/fbdev/uvesafb.c 			(u16)task->t.regs.es, (u16)task->t.regs.edi);
t                 621 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f15;
t                 622 drivers/video/fbdev/uvesafb.c 	task->t.regs.ebx = 0;
t                 623 drivers/video/fbdev/uvesafb.c 	task->t.regs.ecx = 0;
t                 624 drivers/video/fbdev/uvesafb.c 	task->t.buf_len = 0;
t                 625 drivers/video/fbdev/uvesafb.c 	task->t.flags = 0;
t                 629 drivers/video/fbdev/uvesafb.c 	if ((task->t.regs.eax & 0xffff) != 0x004f || err)
t                 632 drivers/video/fbdev/uvesafb.c 	if ((task->t.regs.ebx & 0x3) == 3) {
t                 634 drivers/video/fbdev/uvesafb.c 	} else if ((task->t.regs.ebx & 0x3) == 2) {
t                 636 drivers/video/fbdev/uvesafb.c 	} else if ((task->t.regs.ebx & 0x3) == 1) {
t                 643 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f15;
t                 644 drivers/video/fbdev/uvesafb.c 	task->t.regs.ebx = 1;
t                 645 drivers/video/fbdev/uvesafb.c 	task->t.regs.ecx = task->t.regs.edx = 0;
t                 646 drivers/video/fbdev/uvesafb.c 	task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
t                 647 drivers/video/fbdev/uvesafb.c 	task->t.buf_len = EDID_LENGTH;
t                 654 drivers/video/fbdev/uvesafb.c 	if ((task->t.regs.eax & 0xffff) == 0x004f && !err) {
t                 764 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f04;
t                 765 drivers/video/fbdev/uvesafb.c 	task->t.regs.ecx = 0x000f;
t                 766 drivers/video/fbdev/uvesafb.c 	task->t.regs.edx = 0x0000;
t                 767 drivers/video/fbdev/uvesafb.c 	task->t.flags = 0;
t                 771 drivers/video/fbdev/uvesafb.c 	if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
t                 773 drivers/video/fbdev/uvesafb.c 			task->t.regs.eax, err);
t                 778 drivers/video/fbdev/uvesafb.c 	par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff);
t                 970 drivers/video/fbdev/uvesafb.c 		task->t.regs.eax = 0x4f09;
t                 971 drivers/video/fbdev/uvesafb.c 		task->t.regs.ebx = 0x0;
t                 972 drivers/video/fbdev/uvesafb.c 		task->t.regs.ecx = count;
t                 973 drivers/video/fbdev/uvesafb.c 		task->t.regs.edx = start;
t                 974 drivers/video/fbdev/uvesafb.c 		task->t.flags = TF_BUF_ESDI;
t                 975 drivers/video/fbdev/uvesafb.c 		task->t.buf_len = sizeof(struct uvesafb_pal_entry) * count;
t                 979 drivers/video/fbdev/uvesafb.c 		if ((task->t.regs.eax & 0xffff) != 0x004f)
t                1140 drivers/video/fbdev/uvesafb.c 		task->t.regs.eax = 0x4f10;
t                1143 drivers/video/fbdev/uvesafb.c 			task->t.regs.ebx = 0x0001;
t                1146 drivers/video/fbdev/uvesafb.c 			task->t.regs.ebx = 0x0101;	/* standby */
t                1149 drivers/video/fbdev/uvesafb.c 			task->t.regs.ebx = 0x0401;	/* powerdown */
t                1156 drivers/video/fbdev/uvesafb.c 		if (err || (task->t.regs.eax & 0xffff) != 0x004f)
t                1200 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x0003;
t                1237 drivers/video/fbdev/uvesafb.c 	task->t.regs.eax = 0x4f02;
t                1238 drivers/video/fbdev/uvesafb.c 	task->t.regs.ebx = mode->mode_id | 0x4000;	/* use LFB */
t                1242 drivers/video/fbdev/uvesafb.c 		task->t.regs.ebx |= 0x0800;		/* use CRTC data */
t                1243 drivers/video/fbdev/uvesafb.c 		task->t.flags = TF_BUF_ESDI;
t                1274 drivers/video/fbdev/uvesafb.c 	task->t.buf_len = sizeof(struct vbe_crtc_ib);
t                1278 drivers/video/fbdev/uvesafb.c 	if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
t                1285 drivers/video/fbdev/uvesafb.c 				task->t.regs.eax, err);
t                1293 drivers/video/fbdev/uvesafb.c 			       task->t.regs.eax, err);
t                1304 drivers/video/fbdev/uvesafb.c 		task->t.regs.eax = 0x4f08;
t                1305 drivers/video/fbdev/uvesafb.c 		task->t.regs.ebx = 0x0800;
t                1308 drivers/video/fbdev/uvesafb.c 		if (err || (task->t.regs.eax & 0xffff) != 0x004f ||
t                1309 drivers/video/fbdev/uvesafb.c 		    ((task->t.regs.ebx & 0xff00) >> 8) != 8) {
t                1929 drivers/video/fbdev/uvesafb.c 			task->t.flags = TF_EXIT;
t                 414 drivers/virt/vboxguest/vboxguest_core.c static void vbg_heartbeat_timer(struct timer_list *t)
t                 416 drivers/virt/vboxguest/vboxguest_core.c 	struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
t                 487 drivers/visorbus/visorbus_main.c static void dev_periodic_work(struct timer_list *t)
t                 489 drivers/visorbus/visorbus_main.c 	struct visor_device *dev = from_timer(dev, t, timer);
t                 412 drivers/w1/slaves/w1_therm.c 	s16 t = le16_to_cpup((__le16 *)rom);
t                 414 drivers/w1/slaves/w1_therm.c 	return t*1000/16;
t                 419 drivers/w1/slaves/w1_therm.c 	int t, h;
t                 425 drivers/w1/slaves/w1_therm.c 		t = ((s32)rom[0] >> 1)*1000;
t                 427 drivers/w1/slaves/w1_therm.c 		t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
t                 429 drivers/w1/slaves/w1_therm.c 	t -= 250;
t                 432 drivers/w1/slaves/w1_therm.c 	t += h;
t                 434 drivers/w1/slaves/w1_therm.c 	return t;
t                  97 drivers/watchdog/advantechwdt.c static int advwdt_set_heartbeat(int t)
t                  99 drivers/watchdog/advantechwdt.c 	if (t < 1 || t > 63)
t                 101 drivers/watchdog/advantechwdt.c 	timeout = t;
t                 104 drivers/watchdog/alim1535_wdt.c static int ali_settimer(int t)
t                 106 drivers/watchdog/alim1535_wdt.c 	if (t < 0)
t                 108 drivers/watchdog/alim1535_wdt.c 	else if (t < 60)
t                 109 drivers/watchdog/alim1535_wdt.c 		ali_timeout_bits = t|(1 << 6);
t                 110 drivers/watchdog/alim1535_wdt.c 	else if (t < 3600)
t                 111 drivers/watchdog/alim1535_wdt.c 		ali_timeout_bits = (t / 60)|(1 << 7);
t                 112 drivers/watchdog/alim1535_wdt.c 	else if (t < 18000)
t                 113 drivers/watchdog/alim1535_wdt.c 		ali_timeout_bits = (t / 300)|(1 << 6)|(1 << 7);
t                 117 drivers/watchdog/alim1535_wdt.c 	timeout = t;
t                  49 drivers/watchdog/at91sam9_wdt.c #define ticks_to_hz_rounddown(t)	((((t) + 1) * HZ) >> 8)
t                  50 drivers/watchdog/at91sam9_wdt.c #define ticks_to_hz_roundup(t)		(((((t) + 1) * HZ) + 255) >> 8)
t                  51 drivers/watchdog/at91sam9_wdt.c #define ticks_to_secs(t)		(((t) + 1) >> 8)
t                 120 drivers/watchdog/at91sam9_wdt.c static void at91_ping(struct timer_list *t)
t                 122 drivers/watchdog/at91sam9_wdt.c 	struct at91wdt *wdt = from_timer(wdt, t, timer);
t                 189 drivers/watchdog/ath79_wdt.c 	int t;
t                 211 drivers/watchdog/ath79_wdt.c 		err = get_user(t, p);
t                 215 drivers/watchdog/ath79_wdt.c 		err = ath79_wdt_set_timeout(t);
t                 106 drivers/watchdog/bcm47xx_wdt.c static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
t                 108 drivers/watchdog/bcm47xx_wdt.c 	struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
t                  76 drivers/watchdog/bcm7038_wdt.c 				   unsigned int t)
t                  80 drivers/watchdog/bcm7038_wdt.c 	wdog->timeout = t;
t                 210 drivers/watchdog/bcm_kona_wdt.c 	unsigned int t)
t                 212 drivers/watchdog/bcm_kona_wdt.c 	wdog->timeout = t;
t                  78 drivers/watchdog/digicolor_wdt.c static int dc_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
t                  82 drivers/watchdog/digicolor_wdt.c 	dc_wdt_set(wdt, t * clk_get_rate(wdt->clk));
t                  83 drivers/watchdog/digicolor_wdt.c 	wdog->timeout = t;
t                  42 drivers/watchdog/ebc-c384_wdt.c 	unsigned t = wdev->timeout;
t                  45 drivers/watchdog/ebc-c384_wdt.c 	if (t > 255)
t                  46 drivers/watchdog/ebc-c384_wdt.c 		t = DIV_ROUND_UP(t, 60);
t                  48 drivers/watchdog/ebc-c384_wdt.c 	outb(t, PET_ADDR);
t                  60 drivers/watchdog/ebc-c384_wdt.c static int ebc_c384_wdt_set_timeout(struct watchdog_device *wdev, unsigned t)
t                  63 drivers/watchdog/ebc-c384_wdt.c 	if (t > 255) {
t                  65 drivers/watchdog/ebc-c384_wdt.c 		wdev->timeout = roundup(t, 60);
t                  70 drivers/watchdog/ebc-c384_wdt.c 		wdev->timeout = t;
t                 338 drivers/watchdog/iTCO_wdt.c static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
t                 345 drivers/watchdog/iTCO_wdt.c 	tmrval = seconds_to_ticks(p, t);
t                 384 drivers/watchdog/iTCO_wdt.c 	wd_dev->timeout = t;
t                 131 drivers/watchdog/ib700wdt.c static int ibwdt_set_heartbeat(int t)
t                 133 drivers/watchdog/ib700wdt.c 	if (t < 0 || t > 30)
t                 136 drivers/watchdog/ib700wdt.c 	timeout = t;
t                  95 drivers/watchdog/ie6xx_wdt.c static int ie6xx_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
t                 104 drivers/watchdog/ie6xx_wdt.c 	preload = (t * clock) >> 15;
t                 128 drivers/watchdog/ie6xx_wdt.c 	wdd->timeout = t;
t                 193 drivers/watchdog/intel_scu_watchdog.c static int intel_scu_set_heartbeat(u32 t)
t                 200 drivers/watchdog/intel_scu_watchdog.c 	watchdog_device.timer_set = t;
t                 161 drivers/watchdog/it87_wdt.c static void _wdt_update_timeout(unsigned int t)
t                 168 drivers/watchdog/it87_wdt.c 	if (t <= max_units)
t                 171 drivers/watchdog/it87_wdt.c 		t /= 60;
t                 177 drivers/watchdog/it87_wdt.c 	superio_outb(t, WDTVALLSB);
t                 179 drivers/watchdog/it87_wdt.c 		superio_outb(t >> 8, WDTVALMSB);
t                 182 drivers/watchdog/it87_wdt.c static int wdt_update_timeout(unsigned int t)
t                 191 drivers/watchdog/it87_wdt.c 	_wdt_update_timeout(t);
t                 197 drivers/watchdog/it87_wdt.c static int wdt_round_time(int t)
t                 199 drivers/watchdog/it87_wdt.c 	t += 59;
t                 200 drivers/watchdog/it87_wdt.c 	t -= t % 60;
t                 201 drivers/watchdog/it87_wdt.c 	return t;
t                 226 drivers/watchdog/it87_wdt.c static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
t                 230 drivers/watchdog/it87_wdt.c 	if (t > max_units)
t                 231 drivers/watchdog/it87_wdt.c 		t = wdt_round_time(t);
t                 233 drivers/watchdog/it87_wdt.c 	wdd->timeout = t;
t                 236 drivers/watchdog/it87_wdt.c 		ret = wdt_update_timeout(t);
t                  78 drivers/watchdog/lpc18xx_wdt.c static void lpc18xx_wdt_timer_feed(struct timer_list *t)
t                  80 drivers/watchdog/lpc18xx_wdt.c 	struct lpc18xx_wdt_dev *lpc18xx_wdt = from_timer(lpc18xx_wdt, t, timer);
t                  60 drivers/watchdog/mt7621_wdt.c static int mt7621_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
t                  62 drivers/watchdog/mt7621_wdt.c 	w->timeout = t;
t                  63 drivers/watchdog/mt7621_wdt.c 	rt_wdt_w32(TIMER_REG_TMR1LOAD, t * 1000);
t                  71 drivers/watchdog/mt7621_wdt.c 	u32 t;
t                  78 drivers/watchdog/mt7621_wdt.c 	t = rt_wdt_r32(TIMER_REG_TMR1CTL);
t                  79 drivers/watchdog/mt7621_wdt.c 	t |= TMR1CTL_ENABLE;
t                  80 drivers/watchdog/mt7621_wdt.c 	rt_wdt_w32(TIMER_REG_TMR1CTL, t);
t                  87 drivers/watchdog/mt7621_wdt.c 	u32 t;
t                  91 drivers/watchdog/mt7621_wdt.c 	t = rt_wdt_r32(TIMER_REG_TMR1CTL);
t                  92 drivers/watchdog/mt7621_wdt.c 	t &= ~TMR1CTL_ENABLE;
t                  93 drivers/watchdog/mt7621_wdt.c 	rt_wdt_w32(TIMER_REG_TMR1CTL, t);
t                 108 drivers/watchdog/nv_tco.c static int tco_timer_set_heartbeat(int t)
t                 120 drivers/watchdog/nv_tco.c 	if (t < 0 || t > 0x3f)
t                 122 drivers/watchdog/nv_tco.c 	tmrval = seconds_to_ticks(t);
t                 143 drivers/watchdog/nv_tco.c 	heartbeat = t;
t                 426 drivers/watchdog/octeon-wdt-main.c static void octeon_wdt_calc_parameters(int t)
t                 437 drivers/watchdog/octeon-wdt-main.c 	while ((t % timeout_sec) != 0)
t                 440 drivers/watchdog/octeon-wdt-main.c 	periods = t / timeout_sec;
t                 448 drivers/watchdog/octeon-wdt-main.c 	heartbeat = t;
t                 453 drivers/watchdog/octeon-wdt-main.c 				  unsigned int t)
t                 460 drivers/watchdog/octeon-wdt-main.c 	if (t <= 0)
t                 463 drivers/watchdog/octeon-wdt-main.c 	octeon_wdt_calc_parameters(t);
t                 469 drivers/watchdog/pcwd.c static int pcwd_set_heartbeat(int t)
t                 471 drivers/watchdog/pcwd.c 	if (t < 2 || t > 7200) /* arbitrary upper limit */
t                 474 drivers/watchdog/pcwd.c 	heartbeat = t;
t                 319 drivers/watchdog/pcwd_pci.c static int pcipcwd_set_heartbeat(int t)
t                 321 drivers/watchdog/pcwd_pci.c 	int t_msb = t / 256;
t                 322 drivers/watchdog/pcwd_pci.c 	int t_lsb = t % 256;
t                 324 drivers/watchdog/pcwd_pci.c 	if ((t < 0x0001) || (t > 0xFFFF))
t                 330 drivers/watchdog/pcwd_pci.c 	heartbeat = t;
t                 310 drivers/watchdog/pcwd_usb.c static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
t                 312 drivers/watchdog/pcwd_usb.c 	unsigned char msb = t / 256;
t                 313 drivers/watchdog/pcwd_usb.c 	unsigned char lsb = t % 256;
t                 315 drivers/watchdog/pcwd_usb.c 	if ((t < 0x0001) || (t > 0xFFFF))
t                 321 drivers/watchdog/pcwd_usb.c 	heartbeat = t;
t                  48 drivers/watchdog/rn5t618_wdt.c 				   unsigned int t)
t                  54 drivers/watchdog/rn5t618_wdt.c 		if (rn5t618_wdt_map[i].time + 1 >= t)
t                  73 drivers/watchdog/rt2880_wdt.c 	u32 t;
t                  75 drivers/watchdog/rt2880_wdt.c 	t = rt_wdt_r32(TIMER_REG_TMR1CTL);
t                  76 drivers/watchdog/rt2880_wdt.c 	t &= ~(TMR1CTL_MODE_MASK << TMR1CTL_MODE_SHIFT |
t                  78 drivers/watchdog/rt2880_wdt.c 	t |= (TMR1CTL_MODE_WDT << TMR1CTL_MODE_SHIFT |
t                  80 drivers/watchdog/rt2880_wdt.c 	rt_wdt_w32(TIMER_REG_TMR1CTL, t);
t                  84 drivers/watchdog/rt2880_wdt.c 	t = rt_wdt_r32(TIMER_REG_TMR1CTL);
t                  85 drivers/watchdog/rt2880_wdt.c 	t |= TMR1CTL_ENABLE;
t                  86 drivers/watchdog/rt2880_wdt.c 	rt_wdt_w32(TIMER_REG_TMR1CTL, t);
t                  93 drivers/watchdog/rt2880_wdt.c 	u32 t;
t                  97 drivers/watchdog/rt2880_wdt.c 	t = rt_wdt_r32(TIMER_REG_TMR1CTL);
t                  98 drivers/watchdog/rt2880_wdt.c 	t &= ~TMR1CTL_ENABLE;
t                  99 drivers/watchdog/rt2880_wdt.c 	rt_wdt_w32(TIMER_REG_TMR1CTL, t);
t                 104 drivers/watchdog/rt2880_wdt.c static int rt288x_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
t                 106 drivers/watchdog/rt2880_wdt.c 	w->timeout = t;
t                  70 drivers/watchdog/sb_wdog.c static void sbwdog_set(char __iomem *wdog, unsigned long t)
t                  74 drivers/watchdog/sb_wdog.c 	__raw_writeq(t & 0x7fffffUL, wdog - 0x10);
t                  71 drivers/watchdog/sbc7240_wdt.c static int wdt_set_timeout(int t)
t                  73 drivers/watchdog/sbc7240_wdt.c 	if (t < 1 || t > SBC7240_MAX_TIMEOUT) {
t                  78 drivers/watchdog/sbc7240_wdt.c 	outb_p((unsigned)t, SBC7240_SET_TIMEOUT_PORT);
t                  79 drivers/watchdog/sbc7240_wdt.c 	timeout = t;
t                  80 drivers/watchdog/sbc7240_wdt.c 	pr_info("timeout set to %d seconds\n", t);
t                 205 drivers/watchdog/sc520_wdt.c static int wdt_set_heartbeat(int t)
t                 207 drivers/watchdog/sc520_wdt.c 	if ((t < 1) || (t > 3600))	/* arbitrary upper limit */
t                 210 drivers/watchdog/sc520_wdt.c 	timeout = t;
t                 109 drivers/watchdog/sch311x_wdt.c static void sch311x_wdt_set_timeout(int t)
t                 114 drivers/watchdog/sch311x_wdt.c 	if (t > 255) {
t                 116 drivers/watchdog/sch311x_wdt.c 		t /= 60;
t                 129 drivers/watchdog/sch311x_wdt.c 	outb(t, sch311x_wdt_data.runtime_reg + WDT_VAL);
t                 134 drivers/watchdog/sch311x_wdt.c 	unsigned char t;
t                 149 drivers/watchdog/sch311x_wdt.c 	t = inb(sch311x_wdt_data.runtime_reg + GP60);
t                 150 drivers/watchdog/sch311x_wdt.c 	outb((t & ~0x0d) | 0x0c, sch311x_wdt_data.runtime_reg + GP60);
t                 158 drivers/watchdog/sch311x_wdt.c 	unsigned char t;
t                 163 drivers/watchdog/sch311x_wdt.c 	t = inb(sch311x_wdt_data.runtime_reg + GP60);
t                 164 drivers/watchdog/sch311x_wdt.c 	outb((t & ~0x0d) | 0x01, sch311x_wdt_data.runtime_reg + GP60);
t                 178 drivers/watchdog/sch311x_wdt.c static int sch311x_wdt_set_heartbeat(int t)
t                 180 drivers/watchdog/sch311x_wdt.c 	if (t < 1 || t > (255*60))
t                 185 drivers/watchdog/sch311x_wdt.c 	if (t > 255)
t                 186 drivers/watchdog/sch311x_wdt.c 		t = (((t - 1) / 60) + 1) * 60;
t                 188 drivers/watchdog/sch311x_wdt.c 	timeout = t;
t                 158 drivers/watchdog/shwdt.c static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
t                 163 drivers/watchdog/shwdt.c 	if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */
t                 167 drivers/watchdog/shwdt.c 	heartbeat = t;
t                 168 drivers/watchdog/shwdt.c 	wdt_dev->timeout = t;
t                 174 drivers/watchdog/shwdt.c static void sh_wdt_ping(struct timer_list *t)
t                 176 drivers/watchdog/shwdt.c 	struct sh_wdt *wdt = from_timer(wdt, t, timer);
t                 129 drivers/watchdog/sp5100_tco.c 				 unsigned int t)
t                 134 drivers/watchdog/sp5100_tco.c 	writel(t, SP5100_WDT_COUNT(tco->tcobase));
t                 136 drivers/watchdog/sp5100_tco.c 	wdd->timeout = t;
t                  44 drivers/watchdog/tqmx86_wdt.c static int tqmx86_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
t                  49 drivers/watchdog/tqmx86_wdt.c 	t = roundup_pow_of_two(t);
t                  50 drivers/watchdog/tqmx86_wdt.c 	val = ilog2(t) | 0x90;
t                  54 drivers/watchdog/tqmx86_wdt.c 	wdd->timeout = t;
t                 142 drivers/watchdog/uniphier_wdt.c 					 unsigned int t)
t                 148 drivers/watchdog/uniphier_wdt.c 	tmp_timeout = roundup_pow_of_two(t);
t                 152 drivers/watchdog/w83627hf_wdt.c 	unsigned char t;
t                 161 drivers/watchdog/w83627hf_wdt.c 	t = superio_inb(0x30);
t                 162 drivers/watchdog/w83627hf_wdt.c 	if (!(t & 0x01))
t                 163 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x30, t | 0x01);
t                 168 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(0x2B) & ~0x10;
t                 169 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x2B, t); /* set GPIO24 to WDT0 */
t                 173 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(0x29) & ~0x60;
t                 174 drivers/watchdog/w83627hf_wdt.c 		t |= 0x20;
t                 175 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x29, t);
t                 179 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(0x2b) & ~0x04;
t                 180 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x2b, t);
t                 183 drivers/watchdog/w83627hf_wdt.c 		t = (superio_inb(0x2B) & ~0x08) | 0x04;
t                 184 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x2B, t); /* set GPIO3 to WDT0 */
t                 188 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(0x2D) & ~0x01; /* PIN77 -> WDT0# */
t                 189 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x2D, t); /* set GPIO5 to WDT0 */
t                 190 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(cr_wdt_control);
t                 191 drivers/watchdog/w83627hf_wdt.c 		t |= 0x02;	/* enable the WDTO# output low pulse
t                 193 drivers/watchdog/w83627hf_wdt.c 		superio_outb(cr_wdt_control, t);
t                 198 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(0x2C) & ~0x80; /* PIN47 -> WDT0# */
t                 199 drivers/watchdog/w83627hf_wdt.c 		superio_outb(0x2C, t);
t                 220 drivers/watchdog/w83627hf_wdt.c 		t = superio_inb(cr_wdt_control);
t                 221 drivers/watchdog/w83627hf_wdt.c 		t |= 0x02;	/* enable the WDTO# output low pulse
t                 223 drivers/watchdog/w83627hf_wdt.c 		superio_outb(cr_wdt_control, t);
t                 229 drivers/watchdog/w83627hf_wdt.c 	t = superio_inb(cr_wdt_timeout);
t                 230 drivers/watchdog/w83627hf_wdt.c 	if (t != 0) {
t                 242 drivers/watchdog/w83627hf_wdt.c 	t = superio_inb(cr_wdt_control) & ~0x0C;
t                 243 drivers/watchdog/w83627hf_wdt.c 	superio_outb(cr_wdt_control, t);
t                 246 drivers/watchdog/w83627hf_wdt.c 	t = superio_inb(cr_wdt_csr) & ~0xD0;
t                 247 drivers/watchdog/w83627hf_wdt.c 	superio_outb(cr_wdt_csr, t);
t                 220 drivers/watchdog/w83977f_wdt.c static int wdt_set_timeout(int t)
t                 231 drivers/watchdog/w83977f_wdt.c 	if (t < 15)
t                 234 drivers/watchdog/w83977f_wdt.c 	tmrval = ((t + 15) + 29) / 30;
t                 115 drivers/watchdog/watchdog_core.c 	unsigned int t = 0;
t                 133 drivers/watchdog/watchdog_core.c 	    of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
t                 134 drivers/watchdog/watchdog_core.c 		if (t && !watchdog_timeout_invalid(wdd, t)) {
t                 135 drivers/watchdog/watchdog_core.c 			wdd->timeout = t;
t                 138 drivers/watchdog/watchdog_core.c 		pr_err("%s: DT supplied timeout (%u) out of range\n", dev_str, t);
t                 102 drivers/watchdog/watchdog_dev.c 	unsigned int t = wdd->timeout * 1000;
t                 117 drivers/watchdog/watchdog_dev.c 	return (hm && watchdog_active(wdd) && t > hm) ||
t                 118 drivers/watchdog/watchdog_dev.c 		(t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
t                 156 drivers/watchdog/watchdog_dev.c 		ktime_t t = watchdog_next_keepalive(wdd);
t                 158 drivers/watchdog/watchdog_dev.c 		if (t > 0)
t                 159 drivers/watchdog/watchdog_dev.c 			hrtimer_start(&wd_data->timer, t,
t                 154 drivers/watchdog/watchdog_pretimeout.c 	struct governor_priv *priv, *t;
t                 158 drivers/watchdog/watchdog_pretimeout.c 	list_for_each_entry_safe(priv, t, &governor_list, entry) {
t                 198 drivers/watchdog/watchdog_pretimeout.c 	struct watchdog_pretimeout *p, *t;
t                 206 drivers/watchdog/watchdog_pretimeout.c 	list_for_each_entry_safe(p, t, &pretimeout_list, entry) {
t                 182 drivers/watchdog/wdt.c static int wdt_set_heartbeat(int t)
t                 184 drivers/watchdog/wdt.c 	if (t < 1 || t > 65535)
t                 187 drivers/watchdog/wdt.c 	heartbeat = t;
t                 188 drivers/watchdog/wdt.c 	wd_heartbeat = t * 100;
t                 199 drivers/watchdog/wdt977.c static int wdt977_set_timeout(int t)
t                 204 drivers/watchdog/wdt977.c 	tmrval = (t + 59) / 60;
t                 219 drivers/watchdog/wdt977.c 	timeout = t;
t                 216 drivers/watchdog/wdt_pci.c static int wdtpci_set_heartbeat(int t)
t                 219 drivers/watchdog/wdt_pci.c 	if (t < 1 || t > 65535)
t                 222 drivers/watchdog/wdt_pci.c 	heartbeat = t;
t                 223 drivers/watchdog/wdt_pci.c 	wd_heartbeat = t * 100;
t                 292 drivers/xen/xen-pciback/conf_space.c 	struct config_field_entry *cfg_entry, *t;
t                 300 drivers/xen/xen-pciback/conf_space.c 	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
t                 336 drivers/xen/xen-pciback/conf_space.c 	struct config_field_entry *cfg_entry, *t;
t                 343 drivers/xen/xen-pciback/conf_space.c 	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
t                  76 drivers/xen/xen-pciback/passthrough.c 	struct pci_dev_entry *dev_entry, *t;
t                  81 drivers/xen/xen-pciback/passthrough.c 	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
t                 162 drivers/xen/xen-pciback/passthrough.c 	struct pci_dev_entry *dev_entry, *t;
t                 164 drivers/xen/xen-pciback/passthrough.c 	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
t                1107 drivers/xen/xen-pciback/pci_stub.c 	struct pcistub_device_id *pci_dev_id, *t;
t                1112 drivers/xen/xen-pciback/pci_stub.c 	list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
t                  73 drivers/xen/xen-pciback/vpci.c 	struct pci_dev_entry *t, *dev_entry;
t                 104 drivers/xen/xen-pciback/vpci.c 			t = list_entry(list_first(&vpci_dev->dev_list[slot]),
t                 107 drivers/xen/xen-pciback/vpci.c 			if (match_slot(dev, t->dev)) {
t                 302 drivers/xen/xenbus/xenbus_xs.c static void *xs_talkv(struct xenbus_transaction t,
t                 324 drivers/xen/xenbus/xenbus_xs.c 	msg.tx_id = t.id;
t                 355 drivers/xen/xenbus/xenbus_xs.c static void *xs_single(struct xenbus_transaction t,
t                 364 drivers/xen/xenbus/xenbus_xs.c 	return xs_talkv(t, type, &iovec, 1, len);
t                 422 drivers/xen/xenbus/xenbus_xs.c char **xenbus_directory(struct xenbus_transaction t,
t                 432 drivers/xen/xenbus/xenbus_xs.c 	strings = xs_single(t, XS_DIRECTORY, path, &len);
t                 442 drivers/xen/xenbus/xenbus_xs.c int xenbus_exists(struct xenbus_transaction t,
t                 448 drivers/xen/xenbus/xenbus_xs.c 	d = xenbus_directory(t, dir, node, &dir_n);
t                 460 drivers/xen/xenbus/xenbus_xs.c void *xenbus_read(struct xenbus_transaction t,
t                 470 drivers/xen/xenbus/xenbus_xs.c 	ret = xs_single(t, XS_READ, path, len);
t                 479 drivers/xen/xenbus/xenbus_xs.c int xenbus_write(struct xenbus_transaction t,
t                 495 drivers/xen/xenbus/xenbus_xs.c 	ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
t                 502 drivers/xen/xenbus/xenbus_xs.c int xenbus_mkdir(struct xenbus_transaction t,
t                 512 drivers/xen/xenbus/xenbus_xs.c 	ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
t                 519 drivers/xen/xenbus/xenbus_xs.c int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
t                 528 drivers/xen/xenbus/xenbus_xs.c 	ret = xs_error(xs_single(t, XS_RM, path, NULL));
t                 537 drivers/xen/xenbus/xenbus_xs.c int xenbus_transaction_start(struct xenbus_transaction *t)
t                 545 drivers/xen/xenbus/xenbus_xs.c 	t->id = simple_strtoul(id_str, NULL, 0);
t                 554 drivers/xen/xenbus/xenbus_xs.c int xenbus_transaction_end(struct xenbus_transaction t, int abort)
t                 563 drivers/xen/xenbus/xenbus_xs.c 	return xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
t                 568 drivers/xen/xenbus/xenbus_xs.c int xenbus_scanf(struct xenbus_transaction t,
t                 575 drivers/xen/xenbus/xenbus_xs.c 	val = xenbus_read(t, dir, node, NULL);
t                 606 drivers/xen/xenbus/xenbus_xs.c int xenbus_printf(struct xenbus_transaction t,
t                 620 drivers/xen/xenbus/xenbus_xs.c 	ret = xenbus_write(t, dir, node, buf);
t                 629 drivers/xen/xenbus/xenbus_xs.c int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
t                 641 drivers/xen/xenbus/xenbus_xs.c 		p = xenbus_read(t, dir, name, NULL);
t                  15 fs/adfs/dir_fplus.c 	struct adfs_bigdirtail *t;
t                  86 fs/adfs/dir_fplus.c 	t = (struct adfs_bigdirtail *)
t                  89 fs/adfs/dir_fplus.c 	if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
t                  90 fs/adfs/dir_fplus.c 	    t->bigdirendmasseq != h->startmasseq ||
t                  91 fs/adfs/dir_fplus.c 	    t->reserved[0] != 0 || t->reserved[1] != 0) {
t                  79 fs/afs/inode.c 	struct timespec64 t;
t                  92 fs/afs/inode.c 	t = status->mtime_client;
t                  93 fs/afs/inode.c 	inode->i_ctime = t;
t                  94 fs/afs/inode.c 	inode->i_mtime = t;
t                  95 fs/afs/inode.c 	inode->i_atime = t;
t                 171 fs/afs/inode.c 	struct timespec64 t;
t                 203 fs/afs/inode.c 	t = status->mtime_client;
t                 204 fs/afs/inode.c 	vnode->vfs_inode.i_ctime = t;
t                 205 fs/afs/inode.c 	vnode->vfs_inode.i_mtime = t;
t                 206 fs/afs/inode.c 	vnode->vfs_inode.i_atime = t;
t                  86 fs/afs/write.c 	unsigned t, to = from + len;
t                 120 fs/afs/write.c 	t = f = 0;
t                 124 fs/afs/write.c 		t = priv >> AFS_PRIV_SHIFT;
t                 125 fs/afs/write.c 		ASSERTCMP(f, <=, t);
t                 128 fs/afs/write.c 	if (f != t) {
t                 139 fs/afs/write.c 		    (to < f || from > t))
t                 143 fs/afs/write.c 		if (to > t)
t                 144 fs/afs/write.c 			t = to;
t                 147 fs/afs/write.c 		t = to;
t                 150 fs/afs/write.c 	priv = (unsigned long)t << AFS_PRIV_SHIFT;
t                 459 fs/afs/write.c 	unsigned n, offset, to, f, t;
t                 523 fs/afs/write.c 			t = priv >> AFS_PRIV_SHIFT;
t                 529 fs/afs/write.c 			to = t;
t                 855 fs/afs/write.c 	unsigned int f, t;
t                 863 fs/afs/write.c 		t = PAGE_SIZE;
t                 866 fs/afs/write.c 			t = priv >> AFS_PRIV_SHIFT;
t                 871 fs/afs/write.c 		ret = afs_store_data(mapping, page->index, page->index, t, f);
t                  82 fs/afs/yfsclient.c static s64 linux_to_yfs_time(const struct timespec64 *t)
t                  85 fs/afs/yfsclient.c 	return (u64)t->tv_sec * 10000000 + t->tv_nsec/100;
t                 100 fs/afs/yfsclient.c static __be32 *xdr_encode_YFSStoreStatus_mtime(__be32 *bp, const struct timespec64 *t)
t                 103 fs/afs/yfsclient.c 	s64 mtime = linux_to_yfs_time(t);
t                 116 fs/afs/yfsclient.c static struct timespec64 yfs_time_to_linux(s64 t)
t                 126 fs/afs/yfsclient.c 	if (t < 0) {
t                 127 fs/afs/yfsclient.c 		abs_t = -t;
t                 132 fs/afs/yfsclient.c 		abs_t = t;
t                 142 fs/afs/yfsclient.c 	s64 t = xdr_to_u64(xdr);
t                 144 fs/afs/yfsclient.c 	return yfs_time_to_linux(t);
t                2189 fs/aio.c       	struct timespec64 t;
t                2192 fs/aio.c       	if (timeout && get_old_timespec32(&t, timeout))
t                2195 fs/aio.c       	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
t                2221 fs/aio.c       	struct timespec64 t;
t                2225 fs/aio.c       	if (timeout && get_old_timespec32(&t, timeout))
t                2235 fs/aio.c       	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
t                2256 fs/aio.c       	struct timespec64 t;
t                2260 fs/aio.c       	if (timeout && get_timespec64(&t, timeout))
t                2270 fs/aio.c       	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
t                1699 fs/binfmt_elf.c static int fill_thread_core_info(struct elf_thread_core_info *t,
t                1704 fs/binfmt_elf.c 	unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
t                1712 fs/binfmt_elf.c 	fill_prstatus(&t->prstatus, t->task, signr);
t                1713 fs/binfmt_elf.c 	(void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
t                1714 fs/binfmt_elf.c 				    &t->prstatus.pr_reg, NULL);
t                1716 fs/binfmt_elf.c 	fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
t                1717 fs/binfmt_elf.c 		  PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
t                1718 fs/binfmt_elf.c 	*total += notesize(&t->notes[0]);
t                1720 fs/binfmt_elf.c 	do_thread_regset_writeback(t->task, &view->regsets[0]);
t                1729 fs/binfmt_elf.c 		do_thread_regset_writeback(t->task, regset);
t                1731 fs/binfmt_elf.c 		    (!regset->active || regset->active(t->task, regset) > 0)) {
t                1733 fs/binfmt_elf.c 			size_t size = regset_size(t->task, regset);
t                1737 fs/binfmt_elf.c 			ret = regset->get(t->task, regset,
t                1743 fs/binfmt_elf.c 					fill_note(&t->notes[i], "LINUX",
t                1747 fs/binfmt_elf.c 					SET_PR_FPVALID(&t->prstatus,
t                1749 fs/binfmt_elf.c 					fill_note(&t->notes[i], "CORE",
t                1752 fs/binfmt_elf.c 				*total += notesize(&t->notes[i]);
t                1766 fs/binfmt_elf.c 	struct elf_thread_core_info *t;
t                1810 fs/binfmt_elf.c 		t = kzalloc(offsetof(struct elf_thread_core_info,
t                1813 fs/binfmt_elf.c 		if (unlikely(!t))
t                1816 fs/binfmt_elf.c 		t->task = ct->task;
t                1818 fs/binfmt_elf.c 			t->next = info->thread;
t                1819 fs/binfmt_elf.c 			info->thread = t;
t                1825 fs/binfmt_elf.c 			t->next = info->thread->next;
t                1826 fs/binfmt_elf.c 			info->thread->next = t;
t                1833 fs/binfmt_elf.c 	for (t = info->thread; t != NULL; t = t->next)
t                1834 fs/binfmt_elf.c 		if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
t                1868 fs/binfmt_elf.c 	struct elf_thread_core_info *t = info->thread;
t                1873 fs/binfmt_elf.c 		if (!writenote(&t->notes[0], cprm))
t                1887 fs/binfmt_elf.c 			if (t->notes[i].data &&
t                1888 fs/binfmt_elf.c 			    !writenote(&t->notes[i], cprm))
t                1892 fs/binfmt_elf.c 		t = t->next;
t                1893 fs/binfmt_elf.c 	} while (t);
t                1903 fs/binfmt_elf.c 		struct elf_thread_core_info *t = threads;
t                1904 fs/binfmt_elf.c 		threads = t->next;
t                1905 fs/binfmt_elf.c 		WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
t                1907 fs/binfmt_elf.c 			kfree(t->notes[i].data);
t                1908 fs/binfmt_elf.c 		kfree(t);
t                1935 fs/binfmt_elf.c static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
t                1938 fs/binfmt_elf.c 	struct task_struct *p = t->thread;
t                1939 fs/binfmt_elf.c 	t->num_notes = 0;
t                1941 fs/binfmt_elf.c 	fill_prstatus(&t->prstatus, p, signr);
t                1942 fs/binfmt_elf.c 	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);	
t                1944 fs/binfmt_elf.c 	fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
t                1945 fs/binfmt_elf.c 		  &(t->prstatus));
t                1946 fs/binfmt_elf.c 	t->num_notes++;
t                1947 fs/binfmt_elf.c 	sz += notesize(&t->notes[0]);
t                1949 fs/binfmt_elf.c 	if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
t                1950 fs/binfmt_elf.c 								&t->fpu))) {
t                1951 fs/binfmt_elf.c 		fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
t                1952 fs/binfmt_elf.c 			  &(t->fpu));
t                1953 fs/binfmt_elf.c 		t->num_notes++;
t                1954 fs/binfmt_elf.c 		sz += notesize(&t->notes[1]);
t                1958 fs/binfmt_elf.c 	if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
t                1959 fs/binfmt_elf.c 		fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
t                1960 fs/binfmt_elf.c 			  sizeof(t->xfpu), &t->xfpu);
t                1961 fs/binfmt_elf.c 		t->num_notes++;
t                1962 fs/binfmt_elf.c 		sz += notesize(&t->notes[2]);
t                1440 fs/binfmt_elf_fdpic.c static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
t                1442 fs/binfmt_elf_fdpic.c 	struct task_struct *p = t->thread;
t                1445 fs/binfmt_elf_fdpic.c 	t->num_notes = 0;
t                1447 fs/binfmt_elf_fdpic.c 	fill_prstatus(&t->prstatus, p, signr);
t                1448 fs/binfmt_elf_fdpic.c 	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
t                1450 fs/binfmt_elf_fdpic.c 	fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
t                1451 fs/binfmt_elf_fdpic.c 		  &t->prstatus);
t                1452 fs/binfmt_elf_fdpic.c 	t->num_notes++;
t                1453 fs/binfmt_elf_fdpic.c 	sz += notesize(&t->notes[0]);
t                1455 fs/binfmt_elf_fdpic.c 	t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu);
t                1456 fs/binfmt_elf_fdpic.c 	if (t->prstatus.pr_fpvalid) {
t                1457 fs/binfmt_elf_fdpic.c 		fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
t                1458 fs/binfmt_elf_fdpic.c 			  &t->fpu);
t                1459 fs/binfmt_elf_fdpic.c 		t->num_notes++;
t                1460 fs/binfmt_elf_fdpic.c 		sz += notesize(&t->notes[1]);
t                1464 fs/binfmt_elf_fdpic.c 	if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
t                1465 fs/binfmt_elf_fdpic.c 		fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
t                1466 fs/binfmt_elf_fdpic.c 			  sizeof(t->xfpu), &t->xfpu);
t                1467 fs/binfmt_elf_fdpic.c 		t->num_notes++;
t                1468 fs/binfmt_elf_fdpic.c 		sz += notesize(&t->notes[2]);
t                1563 fs/binfmt_elf_fdpic.c  	struct list_head *t;
t                1622 fs/binfmt_elf_fdpic.c 	list_for_each(t, &thread_list) {
t                1626 fs/binfmt_elf_fdpic.c 		tmp = list_entry(t, struct elf_thread_status, list);
t                1760 fs/binfmt_elf_fdpic.c 	list_for_each(t, &thread_list) {
t                1762 fs/binfmt_elf_fdpic.c 				list_entry(t, struct elf_thread_status, list);
t                  41 fs/btrfs/ctree.c 	u16 t = btrfs_super_csum_type(s);
t                  45 fs/btrfs/ctree.c 	return btrfs_csums[t].size;
t                2341 fs/btrfs/ctree.c 	struct extent_buffer *t;
t                2354 fs/btrfs/ctree.c 			t = path->nodes[i];
t                2355 fs/btrfs/ctree.c 			nritems = btrfs_header_nritems(t);
t                2364 fs/btrfs/ctree.c 		t = path->nodes[i];
t                2366 fs/btrfs/ctree.c 			btrfs_tree_unlock_rw(t, path->locks[i]);
t                3168 fs/btrfs/ctree.c 	struct extent_buffer *t;
t                3176 fs/btrfs/ctree.c 		t = path->nodes[i];
t                3177 fs/btrfs/ctree.c 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
t                3180 fs/btrfs/ctree.c 		btrfs_set_node_key(t, key, tslot);
t                4558 fs/btrfs/disk-io.c 	struct btrfs_transaction *t;
t                4564 fs/btrfs/disk-io.c 		t = list_first_entry(&fs_info->trans_list,
t                4566 fs/btrfs/disk-io.c 		if (t->state >= TRANS_STATE_COMMIT_START) {
t                4567 fs/btrfs/disk-io.c 			refcount_inc(&t->use_count);
t                4569 fs/btrfs/disk-io.c 			btrfs_wait_for_commit(fs_info, t->transid);
t                4570 fs/btrfs/disk-io.c 			btrfs_put_transaction(t);
t                4574 fs/btrfs/disk-io.c 		if (t == fs_info->running_transaction) {
t                4575 fs/btrfs/disk-io.c 			t->state = TRANS_STATE_COMMIT_DOING;
t                4581 fs/btrfs/disk-io.c 			wait_event(t->writer_wait,
t                4582 fs/btrfs/disk-io.c 				   atomic_read(&t->num_writers) == 0);
t                4586 fs/btrfs/disk-io.c 		btrfs_cleanup_one_transaction(t, fs_info);
t                4589 fs/btrfs/disk-io.c 		if (t == fs_info->running_transaction)
t                4591 fs/btrfs/disk-io.c 		list_del_init(&t->list);
t                4594 fs/btrfs/disk-io.c 		btrfs_put_transaction(t);
t                 146 fs/btrfs/ordered-data.h btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
t                 148 fs/btrfs/ordered-data.h 	spin_lock_init(&t->lock);
t                 149 fs/btrfs/ordered-data.h 	t->tree = RB_ROOT;
t                 150 fs/btrfs/ordered-data.h 	t->last = NULL;
t                 738 fs/btrfs/transaction.c 	struct btrfs_transaction *cur_trans = NULL, *t;
t                 747 fs/btrfs/transaction.c 		list_for_each_entry(t, &fs_info->trans_list, list) {
t                 748 fs/btrfs/transaction.c 			if (t->transid == transid) {
t                 749 fs/btrfs/transaction.c 				cur_trans = t;
t                 754 fs/btrfs/transaction.c 			if (t->transid > transid) {
t                 773 fs/btrfs/transaction.c 		list_for_each_entry_reverse(t, &fs_info->trans_list,
t                 775 fs/btrfs/transaction.c 			if (t->state >= TRANS_STATE_COMMIT_START) {
t                 776 fs/btrfs/transaction.c 				if (t->state == TRANS_STATE_COMPLETED)
t                 778 fs/btrfs/transaction.c 				cur_trans = t;
t                  23 fs/cachefiles/proc.c 	unsigned x, y, z, t;
t                  40 fs/cachefiles/proc.c 		t = (index * 1000) / HZ;
t                  42 fs/cachefiles/proc.c 		seq_printf(m, "%4lu  0.%03u %9u %9u %9u\n", index, t, x, y, z);
t                1747 fs/ceph/file.c 	u64 nearly, t;
t                1751 fs/ceph/file.c 	t = nearly;
t                1752 fs/ceph/file.c 	nearly -= do_div(t, object_set_size);
t                  53 fs/ceph/inode.c 	ino_t t = ceph_vino_to_ino(vino);
t                  55 fs/ceph/inode.c 	inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
t                 182 fs/ceph/inode.c 	u32 t = ceph_frag_make(0, 0);
t                 191 fs/ceph/inode.c 		WARN_ON(!ceph_frag_contains_value(t, v));
t                 192 fs/ceph/inode.c 		frag = __ceph_find_frag(ci, t);
t                 205 fs/ceph/inode.c 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
t                 208 fs/ceph/inode.c 			n = ceph_frag_make_child(t, frag->split_by, i);
t                 210 fs/ceph/inode.c 				t = n;
t                 216 fs/ceph/inode.c 	dout("choose_frag(%x) = %x\n", v, t);
t                 218 fs/ceph/inode.c 	return t;
t                 500 fs/ceph/super.h 	ino_t t = ceph_vino_to_ino(vino);
t                 501 fs/ceph/super.h 	return ilookup5(sb, t, ceph_ino_compare, &vino);
t                 635 fs/ceph/super.h extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
t                 113 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t, *n;
t                 115 fs/cifs/dfs_cache.c 	list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) {
t                 116 fs/cifs/dfs_cache.c 		list_del(&t->t_list);
t                 117 fs/cifs/dfs_cache.c 		kfree(t->t_name);
t                 118 fs/cifs/dfs_cache.c 		kfree(t);
t                 163 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t;
t                 181 fs/cifs/dfs_cache.c 		list_for_each_entry(t, &ce->ce_tlist, t_list) {
t                 183 fs/cifs/dfs_cache.c 				   t->t_name,
t                 184 fs/cifs/dfs_cache.c 				   ce->ce_tgthint == t ? " (target hint)" : "");
t                 231 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t;
t                 234 fs/cifs/dfs_cache.c 	list_for_each_entry(t, &ce->ce_tlist, t_list) {
t                 235 fs/cifs/dfs_cache.c 		cifs_dbg(FYI, "  %s%s\n", t->t_name,
t                 236 fs/cifs/dfs_cache.c 			 ce->ce_tgthint == t ? " (target hint)" : "");
t                 330 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t = ce->ce_tgthint;
t                 332 fs/cifs/dfs_cache.c 	return t ? t->t_name : ERR_PTR(-ENOENT);
t                 351 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t;
t                 353 fs/cifs/dfs_cache.c 	t = kmalloc(sizeof(*t), GFP_KERNEL);
t                 354 fs/cifs/dfs_cache.c 	if (!t)
t                 356 fs/cifs/dfs_cache.c 	t->t_name = kstrndup(name, strlen(name), GFP_KERNEL);
t                 357 fs/cifs/dfs_cache.c 	if (!t->t_name) {
t                 358 fs/cifs/dfs_cache.c 		kfree(t);
t                 361 fs/cifs/dfs_cache.c 	INIT_LIST_HEAD(&t->t_list);
t                 362 fs/cifs/dfs_cache.c 	return t;
t                 381 fs/cifs/dfs_cache.c 		struct dfs_cache_tgt *t;
t                 383 fs/cifs/dfs_cache.c 		t = alloc_tgt(refs[i].node_name);
t                 384 fs/cifs/dfs_cache.c 		if (IS_ERR(t)) {
t                 386 fs/cifs/dfs_cache.c 			return PTR_ERR(t);
t                 388 fs/cifs/dfs_cache.c 		if (tgthint && !strcasecmp(t->t_name, tgthint)) {
t                 389 fs/cifs/dfs_cache.c 			list_add(&t->t_list, &ce->ce_tlist);
t                 392 fs/cifs/dfs_cache.c 			list_add_tail(&t->t_list, &ce->ce_tlist);
t                 765 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t;
t                 771 fs/cifs/dfs_cache.c 	list_for_each_entry(t, &ce->ce_tlist, t_list) {
t                 778 fs/cifs/dfs_cache.c 		it->it_name = kstrndup(t->t_name, strlen(t->t_name),
t                 786 fs/cifs/dfs_cache.c 		if (ce->ce_tgthint == t)
t                 933 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t;
t                 953 fs/cifs/dfs_cache.c 	t = ce->ce_tgthint;
t                 955 fs/cifs/dfs_cache.c 	if (likely(!strcasecmp(it->it_name, t->t_name)))
t                 958 fs/cifs/dfs_cache.c 	list_for_each_entry(t, &ce->ce_tlist, t_list) {
t                 959 fs/cifs/dfs_cache.c 		if (!strcasecmp(t->t_name, it->it_name)) {
t                 960 fs/cifs/dfs_cache.c 			ce->ce_tgthint = t;
t                 993 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt *t;
t                1014 fs/cifs/dfs_cache.c 	t = ce->ce_tgthint;
t                1016 fs/cifs/dfs_cache.c 	if (unlikely(!strcasecmp(it->it_name, t->t_name)))
t                1019 fs/cifs/dfs_cache.c 	list_for_each_entry(t, &ce->ce_tlist, t_list) {
t                1020 fs/cifs/dfs_cache.c 		if (!strcasecmp(t->t_name, it->it_name)) {
t                1021 fs/cifs/dfs_cache.c 			ce->ce_tgthint = t;
t                 911 fs/cifs/netmisc.c 	s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
t                 919 fs/cifs/netmisc.c 	if (t < 0) {
t                 920 fs/cifs/netmisc.c 		abs_t = -t;
t                 925 fs/cifs/netmisc.c 		abs_t = t;
t                 935 fs/cifs/netmisc.c cifs_UnixTimeToNT(struct timespec64 t)
t                 938 fs/cifs/netmisc.c 	return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
t                 534 fs/cifs/transport.c 	long int t;
t                 537 fs/cifs/transport.c 		t = MAX_JIFFY_OFFSET;
t                 539 fs/cifs/transport.c 		t = msecs_to_jiffies(timeout);
t                 567 fs/cifs/transport.c 				has_credits(server, credits, num_credits), t);
t                 607 fs/cifs/transport.c 					t);
t                 598 fs/configfs/dir.c 	const struct config_item_type *t = item->ci_type;
t                 604 fs/configfs/dir.c 	if (!t)
t                 606 fs/configfs/dir.c 	if (t->ct_attrs) {
t                 607 fs/configfs/dir.c 		for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) {
t                 612 fs/configfs/dir.c 	if (t->ct_bin_attrs) {
t                 613 fs/configfs/dir.c 		for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) {
t                 121 fs/configfs/item.c 	const struct config_item_type *t = item->ci_type;
t                 129 fs/configfs/item.c 	if (t && t->ct_item_ops && t->ct_item_ops->release)
t                 130 fs/configfs/item.c 		t->ct_item_ops->release(item);
t                 342 fs/coredump.c  	struct task_struct *t;
t                 350 fs/coredump.c  	for_each_thread(start, t) {
t                 351 fs/coredump.c  		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
t                 352 fs/coredump.c  		if (t != current && t->mm) {
t                 353 fs/coredump.c  			sigaddset(&t->pending.signal, SIGKILL);
t                 354 fs/coredump.c  			signal_wake_up(t, 1);
t                 180 fs/erofs/zdata.c 		compressed_page_t t;
t                 189 fs/erofs/zdata.c 			t = tag_compressed_page_justfound(page);
t                 191 fs/erofs/zdata.c 			t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
t                 199 fs/erofs/zdata.c 		if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
t                 703 fs/erofs/zdata.c 	tagptr1_t t = tagptr_init(tagptr1_t, ptr);
t                 704 fs/erofs/zdata.c 	struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t);
t                 705 fs/erofs/zdata.c 	bool background = tagptr_unfold_tags(t);
t                1005 fs/erofs/zdata.c 	compressed_page_t t;
t                1025 fs/erofs/zdata.c 	t = tagptr_init(compressed_page_t, page);
t                1026 fs/erofs/zdata.c 	justfound = tagptr_unfold_tags(t);
t                1027 fs/erofs/zdata.c 	page = tagptr_unfold_ptr(t);
t                  60 fs/erofs/zpvec.h 		const erofs_vtptr_t t = ctor->pages[index];
t                  61 fs/erofs/zpvec.h 		const unsigned int tags = tagptr_unfold_tags(t);
t                  64 fs/erofs/zpvec.h 			return tagptr_unfold_ptr(t);
t                 138 fs/erofs/zpvec.h 	erofs_vtptr_t t;
t                 145 fs/erofs/zpvec.h 	t = ctor->pages[ctor->index];
t                 147 fs/erofs/zpvec.h 	*type = tagptr_unfold_tags(t);
t                 151 fs/erofs/zpvec.h 		ctor->next = tagptr_unfold_ptr(t);
t                 154 fs/erofs/zpvec.h 	return tagptr_unfold_ptr(t);
t                1486 fs/exec.c      	struct task_struct *p = current, *t;
t                1499 fs/exec.c      	t = p;
t                1503 fs/exec.c      	while_each_thread(p, t) {
t                1504 fs/exec.c      		if (t->fs == p->fs)
t                 299 fs/ext4/namei.c 	struct ext4_dir_entry_tail *t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
t                 301 fs/ext4/namei.c 	memset(t, 0, sizeof(struct ext4_dir_entry_tail));
t                 302 fs/ext4/namei.c 	t->det_rec_len = ext4_rec_len_to_disk(
t                 304 fs/ext4/namei.c 	t->det_reserved_ft = EXT4_FT_DIR_CSUM;
t                 311 fs/ext4/namei.c 	struct ext4_dir_entry_tail *t;
t                 327 fs/ext4/namei.c 	t = (struct ext4_dir_entry_tail *)d;
t                 329 fs/ext4/namei.c 	t = EXT4_DIRENT_TAIL(bh->b_data, EXT4_BLOCK_SIZE(inode->i_sb));
t                 332 fs/ext4/namei.c 	if (t->det_reserved_zero1 ||
t                 333 fs/ext4/namei.c 	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
t                 334 fs/ext4/namei.c 	    t->det_reserved_zero2 ||
t                 335 fs/ext4/namei.c 	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
t                 338 fs/ext4/namei.c 	return t;
t                 363 fs/ext4/namei.c 	struct ext4_dir_entry_tail *t;
t                 368 fs/ext4/namei.c 	t = get_dirent_tail(inode, bh);
t                 369 fs/ext4/namei.c 	if (!t) {
t                 374 fs/ext4/namei.c 	if (t->det_checksum != ext4_dirblock_csum(inode, bh->b_data,
t                 375 fs/ext4/namei.c 						  (char *)t - bh->b_data))
t                 384 fs/ext4/namei.c 	struct ext4_dir_entry_tail *t;
t                 389 fs/ext4/namei.c 	t = get_dirent_tail(inode, bh);
t                 390 fs/ext4/namei.c 	if (!t) {
t                 395 fs/ext4/namei.c 	t->det_checksum = ext4_dirblock_csum(inode, bh->b_data,
t                 396 fs/ext4/namei.c 					     (char *)t - bh->b_data);
t                 436 fs/ext4/namei.c 			   int count_offset, int count, struct dx_tail *t)
t                 447 fs/ext4/namei.c 	csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
t                 457 fs/ext4/namei.c 	struct dx_tail *t;
t                 475 fs/ext4/namei.c 	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
t                 477 fs/ext4/namei.c 	if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
t                 478 fs/ext4/namei.c 					    count, t))
t                 486 fs/ext4/namei.c 	struct dx_tail *t;
t                 504 fs/ext4/namei.c 	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
t                 506 fs/ext4/namei.c 	t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
t                1371 fs/ext4/super.c #define QTYPE2NAME(t) (quotatypes[t])
t                2176 fs/ext4/super.c 	const struct match_token *t;
t                2178 fs/ext4/super.c 	for (t = tokens; t->token != Opt_err; t++)
t                2179 fs/ext4/super.c 		if (t->token == token && !strchr(t->pattern, '='))
t                2181 fs/ext4/super.c 	return t->pattern;
t                3005 fs/ext4/super.c static void print_daily_error_info(struct timer_list *t)
t                3007 fs/ext4/super.c 	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
t                  83 fs/ext4/sysfs.c 	unsigned long t;
t                  86 fs/ext4/sysfs.c 	ret = kstrtoul(skip_spaces(buf), 0, &t);
t                  90 fs/ext4/sysfs.c 	if (t && (!is_power_of_2(t) || t > 0x40000000))
t                  93 fs/ext4/sysfs.c 	sbi->s_inode_readahead_blks = t;
t                 347 fs/ext4/sysfs.c 	unsigned long t;
t                 356 fs/ext4/sysfs.c 		ret = kstrtoul(skip_spaces(buf), 0, &t);
t                 360 fs/ext4/sysfs.c 			*((__le32 *) ptr) = cpu_to_le32(t);
t                 362 fs/ext4/sysfs.c 			*((unsigned int *) ptr) = t;
t                 510 fs/f2fs/f2fs.h 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
t                 515 fs/f2fs/f2fs.h 	d->bitmap = t->dentry_bitmap;
t                 516 fs/f2fs/f2fs.h 	d->dentry = t->dentry;
t                 517 fs/f2fs/f2fs.h 	d->filename = t->filename;
t                 521 fs/f2fs/f2fs.h 					struct f2fs_dentry_ptr *d, void *t)
t                 530 fs/f2fs/f2fs.h 	d->bitmap = t;
t                 531 fs/f2fs/f2fs.h 	d->dentry = t + bitmap_size + reserved_size;
t                 532 fs/f2fs/f2fs.h 	d->filename = t + bitmap_size + reserved_size +
t                 790 fs/f2fs/segment.c 		enum dirty_type t = sentry->type;
t                 792 fs/f2fs/segment.c 		if (unlikely(t >= DIRTY)) {
t                 796 fs/f2fs/segment.c 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
t                 797 fs/f2fs/segment.c 			dirty_i->nr_dirty[t]++;
t                 811 fs/f2fs/segment.c 		enum dirty_type t = sentry->type;
t                 813 fs/f2fs/segment.c 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
t                 814 fs/f2fs/segment.c 			dirty_i->nr_dirty[t]--;
t                  24 fs/f2fs/segment.h #define IS_DATASEG(t)	((t) <= CURSEG_COLD_DATA)
t                  25 fs/f2fs/segment.h #define IS_NODESEG(t)	((t) >= CURSEG_HOT_NODE)
t                  27 fs/f2fs/segment.h #define IS_HOT(t)	((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
t                  28 fs/f2fs/segment.h #define IS_WARM(t)	((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
t                  29 fs/f2fs/segment.h #define IS_COLD(t)	((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
t                 289 fs/f2fs/super.c #define QTYPE2NAME(t) (quotatypes[t])
t                 208 fs/f2fs/sysfs.c 	unsigned long t;
t                 253 fs/f2fs/sysfs.c 	ret = kstrtoul(skip_spaces(buf), 0, &t);
t                 257 fs/f2fs/sysfs.c 	if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
t                 259 fs/f2fs/sysfs.c 	if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
t                 264 fs/f2fs/sysfs.c 		if (t > (unsigned long)(sbi->user_block_count -
t                 269 fs/f2fs/sysfs.c 		*ui = t;
t                 277 fs/f2fs/sysfs.c 		if (t == 0 || t > MAX_PLIST_NUM)
t                 279 fs/f2fs/sysfs.c 		if (t == *ui)
t                 281 fs/f2fs/sysfs.c 		*ui = t;
t                 286 fs/f2fs/sysfs.c 		if (t == 0 || t > sbi->segs_per_sec)
t                 294 fs/f2fs/sysfs.c 		if (t >= 1) {
t                 308 fs/f2fs/sysfs.c 		if (t == GC_IDLE_CB)
t                 310 fs/f2fs/sysfs.c 		else if (t == GC_IDLE_GREEDY)
t                 319 fs/f2fs/sysfs.c 		sbi->iostat_enable = !!t;
t                 325 fs/f2fs/sysfs.c 	*ui = (unsigned int)t;
t                 193 fs/fat/dir.c   fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
t                 197 fs/fat/dir.c   	charlen = t->char2uni(c, clen, uni);
t                 206 fs/fat/dir.c   fat_short2lower_uni(struct nls_table *t, unsigned char *c,
t                 212 fs/fat/dir.c   	charlen = t->char2uni(c, clen, &wc);
t                 217 fs/fat/dir.c   		unsigned char nc = t->charset2lower[*c];
t                 222 fs/fat/dir.c   		charlen = t->char2uni(&nc, 1, uni);
t                 136 fs/fat/namei_vfat.c 	struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
t                 146 fs/fat/namei_vfat.c 		hash = partial_name_hash(nls_tolower(t, *name++), hash);
t                 158 fs/fat/namei_vfat.c 	struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
t                 165 fs/fat/namei_vfat.c 		if (nls_strnicmp(t, name->name, str, alen) == 0)
t                 305 fs/file_table.c 	struct file *f, *t;
t                 307 fs/file_table.c 	llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
t                1205 fs/fs-writeback.c static bool inode_dirtied_after(struct inode *inode, unsigned long t)
t                1207 fs/fs-writeback.c 	bool ret = time_after(inode->dirtied_when, t);
t                 378 fs/fs_parser.c 			enum fs_parameter_type t = param->type;
t                 381 fs/fs_parser.c 			if (t == __fs_param_wasnt_defined ||
t                 382 fs/fs_parser.c 			    t >= nr__fs_parameter_type) {
t                 384 fs/fs_parser.c 				       name, param->name, t);
t                 386 fs/fs_parser.c 			} else if (t == fs_param_is_enum) {
t                  26 fs/fscache/histogram.c 	unsigned n[5], t;
t                  45 fs/fscache/histogram.c 		t = (index * 1000) / HZ;
t                  48 fs/fscache/histogram.c 			   index, t, n[0], n[1], n[2], n[3], n[4]);
t                 169 fs/fscache/object.c 	const struct fscache_transition *t;
t                 192 fs/fscache/object.c 		for (t = object->oob_table; t->events; t++) {
t                 193 fs/fscache/object.c 			if (events & t->events) {
t                 194 fs/fscache/object.c 				state = t->transit_to;
t                 196 fs/fscache/object.c 				event = fls(events & t->events) - 1;
t                 208 fs/fscache/object.c 			for (t = state->transitions; t->events; t++) {
t                 209 fs/fscache/object.c 				if (events & t->events) {
t                 210 fs/fscache/object.c 					new_state = t->transit_to;
t                 211 fs/fscache/object.c 					event = fls(events & t->events) - 1;
t                 261 fs/fscache/object.c 	for (t = state->transitions; t->events; t++)
t                 262 fs/fscache/object.c 		event_mask |= t->events;
t                 305 fs/fscache/object.c 	const struct fscache_transition *t;
t                 333 fs/fscache/object.c 	for (t = object->oob_table; t->events; t++)
t                 334 fs/fscache/object.c 		object->oob_event_mask |= t->events;
t                 336 fs/fscache/object.c 	for (t = object->state->transitions; t->events; t++)
t                 337 fs/fscache/object.c 		object->event_mask |= t->events;
t                  80 fs/fuse/control.c 	unsigned long t;
t                  87 fs/fuse/control.c 	err = kstrtoul_from_user(buf, count, 0, &t);
t                  94 fs/fuse/control.c 	if (t > limit)
t                  97 fs/fuse/control.c 	*val = t;
t                 301 fs/gfs2/bmap.c 	const __be64 *t;
t                 303 fs/gfs2/bmap.c 	for (t = start; t < end; t++) {
t                 306 fs/gfs2/bmap.c 		if (!*t)
t                 309 fs/gfs2/bmap.c 		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
t                1005 fs/gfs2/log.c  	unsigned long t = 1;
t                1021 fs/gfs2/log.c  		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
t                1040 fs/gfs2/log.c  		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
t                1050 fs/gfs2/log.c  				t = schedule_timeout(t);
t                1051 fs/gfs2/log.c  		} while(t && !gfs2_ail_flush_reqd(sdp) &&
t                1486 fs/gfs2/quota.c 			       unsigned long t, unsigned long *timeo,
t                1489 fs/gfs2/quota.c 	if (t >= *timeo) {
t                1494 fs/gfs2/quota.c 		*timeo -= t;
t                1537 fs/gfs2/quota.c 	unsigned long t = 0;
t                1550 fs/gfs2/quota.c 			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
t                1555 fs/gfs2/quota.c 		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
t                1563 fs/gfs2/quota.c 		t = min(quotad_timeo, statfs_timeo);
t                1570 fs/gfs2/quota.c 			t -= schedule_timeout(t);
t                1572 fs/gfs2/quota.c 			t = 0;
t                 222 fs/gfs2/util.c 			   u16 type, u16 t, const char *function,
t                 230 fs/gfs2/util.c 			      (unsigned long long)bh->b_blocknr, type, t,
t                  94 fs/gfs2/util.h 			   u16 type, u16 t,
t                 106 fs/gfs2/util.h 	u16 t = be32_to_cpu(mh->mh_type);
t                 110 fs/gfs2/util.h         if (unlikely(t != type))
t                 111 fs/gfs2/util.h 		return gfs2_metatype_check_ii(sdp, bh, type, t, function,
t                 537 fs/hfsplus/hfsplus_fs.h #define __hfsp_mt2ut(t)		(be32_to_cpu(t) - 2082844800U)
t                 538 fs/hfsplus/hfsplus_fs.h #define __hfsp_ut2mt(t)		(cpu_to_be32(t + 2082844800U))
t                 541 fs/hfsplus/hfsplus_fs.h #define hfsp_mt2ut(t)		(struct timespec){ .tv_sec = __hfsp_mt2ut(t) }
t                 542 fs/hfsplus/hfsplus_fs.h #define hfsp_ut2mt(t)		__hfsp_ut2mt((t).tv_sec)
t                 312 fs/hfsplus/unicode.c 	int l, v, t;
t                 320 fs/hfsplus/unicode.c 	t = Hangul_TBase + index % Hangul_TCount;
t                 324 fs/hfsplus/unicode.c 	if (t != Hangul_TBase) {
t                 325 fs/hfsplus/unicode.c 		result[2] = t;
t                  86 fs/hpfs/dnode.c static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t)
t                  88 fs/hpfs/dnode.c 	if (*p == f) *p = t;
t                 277 fs/hpfs/dnode.c 		loff_t t;
t                 279 fs/hpfs/dnode.c 		t = get_pos(d, de);
t                 280 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_ins, t, 1);
t                 281 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, 4, t);
t                 282 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, 5, t + 1);
t                 441 fs/hpfs/dnode.c 	loff_t t;
t                 493 fs/hpfs/dnode.c 	t = get_pos(dnode, de);
t                 494 fs/hpfs/dnode.c 	for_all_poss(i, hpfs_pos_subst, t, 4);
t                 495 fs/hpfs/dnode.c 	for_all_poss(i, hpfs_pos_subst, t + 1, 5);
t                 716 fs/hpfs/dnode.c 	loff_t t;
t                 729 fs/hpfs/dnode.c 	for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1);
t                 735 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, 5, t);
t                 926 fs/hpfs/dnode.c 		int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last);
t                 927 fs/hpfs/dnode.c 		if (!t) {
t                 931 fs/hpfs/dnode.c 		if (t < 0) {
t                 337 fs/hpfs/hpfs_fn.h static inline time64_t local_to_gmt(struct super_block *s, time64_t t)
t                 340 fs/hpfs/hpfs_fn.h 	return t + sys_tz.tz_minuteswest * 60 + hpfs_sb(s)->sb_timeshift;
t                 343 fs/hpfs/hpfs_fn.h static inline time32_t gmt_to_local(struct super_block *s, time64_t t)
t                 346 fs/hpfs/hpfs_fn.h 	return t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift;
t                  14 fs/hpfs/namei.c 	time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb));
t                  15 fs/hpfs/namei.c 	if (t == dir->i_mtime.tv_sec &&
t                  16 fs/hpfs/namei.c 	    t == dir->i_ctime.tv_sec)
t                  18 fs/hpfs/namei.c 	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
t                2164 fs/inode.c     struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran)
t                2170 fs/inode.c     		t.tv_nsec = 0;
t                2172 fs/inode.c     		t.tv_nsec -= t.tv_nsec % gran;
t                2176 fs/inode.c     	return t;
t                2189 fs/inode.c     struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
t                2194 fs/inode.c     	t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
t                2195 fs/inode.c     	if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
t                2196 fs/inode.c     		t.tv_nsec = 0;
t                2202 fs/inode.c     		t.tv_nsec = 0;
t                2204 fs/inode.c     		t.tv_nsec -= t.tv_nsec % gran;
t                2207 fs/inode.c     	return t;
t                 262 fs/jbd2/checkpoint.c 			transaction_t *t = jh->b_transaction;
t                 263 fs/jbd2/checkpoint.c 			tid_t tid = t->t_tid;
t                 146 fs/jbd2/journal.c static void commit_timeout(struct timer_list *t)
t                 148 fs/jbd2/journal.c 	journal_t *journal = from_timer(journal, t, j_commit_timer);
t                 275 fs/jbd2/journal.c 	struct task_struct *t;
t                 277 fs/jbd2/journal.c 	t = kthread_run(kjournald2, journal, "jbd2/%s",
t                 279 fs/jbd2/journal.c 	if (IS_ERR(t))
t                 280 fs/jbd2/journal.c 		return PTR_ERR(t);
t                 204 fs/jbd2/transaction.c 	transaction_t *t = journal->j_running_transaction;
t                 212 fs/jbd2/transaction.c 	if (t->t_state != T_RUNNING) {
t                 213 fs/jbd2/transaction.c 		WARN_ON_ONCE(t->t_state >= T_FLUSH);
t                 223 fs/jbd2/transaction.c 	needed = atomic_add_return(total, &t->t_outstanding_credits);
t                 230 fs/jbd2/transaction.c 		atomic_sub(total, &t->t_outstanding_credits);
t                 262 fs/jbd2/transaction.c 		atomic_sub(total, &t->t_outstanding_credits);
t                 280 fs/jbd2/transaction.c 		atomic_sub(total, &t->t_outstanding_credits);
t                  34 fs/jffs2/os-linux.h #define JFFS2_CLAMP_TIME(t) ((uint32_t)clamp_t(time64_t, (t), 0, U32_MAX))
t                3012 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                3245 fs/jfs/jfs_dtree.c 				t = (struct dtslot *) & p->slot[next];
t                3258 fs/jfs/jfs_dtree.c 				outlen = jfs_strfromUCS_le(name_ptr, t->name,
t                3262 fs/jfs/jfs_dtree.c 				next = t->next;
t                3586 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                3625 fs/jfs/jfs_dtree.c 		t = (struct dtslot *) & p->slot[si];
t                3628 fs/jfs/jfs_dtree.c 		name = t->name;
t                3635 fs/jfs/jfs_dtree.c 		si = t->next;
t                3664 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                3728 fs/jfs/jfs_dtree.c 		t = (struct dtslot *) & p->slot[si];
t                3731 fs/jfs/jfs_dtree.c 		name = t->name;
t                3745 fs/jfs/jfs_dtree.c 		si = t->next;
t                3836 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                3874 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
t                3878 fs/jfs/jfs_dtree.c 		UniStrncpy_from_le(kname, t->name, len);
t                3880 fs/jfs/jfs_dtree.c 		si = t->next;
t                3896 fs/jfs/jfs_dtree.c 	struct dtslot *h, *t;
t                3958 fs/jfs/jfs_dtree.c 	t = h;
t                3963 fs/jfs/jfs_dtree.c 		t = &p->slot[fsi];
t                3964 fs/jfs/jfs_dtree.c 		p->header.freelist = t->next;
t                3987 fs/jfs/jfs_dtree.c 		UniStrncpy_to_le(t->name, kname, len);
t                4001 fs/jfs/jfs_dtree.c 	if (h == t) {
t                4009 fs/jfs/jfs_dtree.c 		t->next = -1;
t                4256 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                4274 fs/jfs/jfs_dtree.c 	t = &p->slot[fsi];
t                4276 fs/jfs/jfs_dtree.c 		si = ((struct ldtentry *) t)->next;
t                4278 fs/jfs/jfs_dtree.c 		si = ((struct idtentry *) t)->next;
t                4279 fs/jfs/jfs_dtree.c 	t->next = si;
t                4280 fs/jfs/jfs_dtree.c 	t->cnt = 1;
t                4309 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
t                4310 fs/jfs/jfs_dtree.c 		t->cnt = 1;
t                4311 fs/jfs/jfs_dtree.c 		si = t->next;
t                4321 fs/jfs/jfs_dtree.c 	t->next = p->header.freelist;
t                4351 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                4369 fs/jfs/jfs_dtree.c 	t = &p->slot[tsi];
t                4371 fs/jfs/jfs_dtree.c 	((struct idtentry *) t)->namlen = 0;
t                4372 fs/jfs/jfs_dtree.c 	si = ((struct idtentry *) t)->next;
t                4373 fs/jfs/jfs_dtree.c 	((struct idtentry *) t)->next = -1;
t                4404 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
t                4405 fs/jfs/jfs_dtree.c 		t->cnt = 1;
t                4406 fs/jfs/jfs_dtree.c 		si = t->next;
t                4418 fs/jfs/jfs_dtree.c 	t->next = p->header.freelist;
t                4432 fs/jfs/jfs_dtree.c 	struct dtslot *t;
t                4451 fs/jfs/jfs_dtree.c 	t = &p->slot[fsi];
t                4452 fs/jfs/jfs_dtree.c 	si = t->next;
t                4477 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
t                4478 fs/jfs/jfs_dtree.c 		si = t->next;
t                 623 fs/jfs/jfs_logmgr.c 		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
t                 644 fs/jfs/jfs_logmgr.c 	lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
t                 645 fs/jfs/jfs_logmgr.c 	lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
t                 784 fs/jfs/jfs_logmgr.c 		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
t                 792 fs/jfs/jfs_logmgr.c 		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
t                 876 fs/jfs/jfs_logmgr.c 			lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
t                1378 fs/jfs/jfs_logmgr.c 		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
t                1665 fs/jfs/jfs_logmgr.c 	lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
t                2441 fs/jfs/jfs_logmgr.c 	lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
t                2442 fs/jfs/jfs_logmgr.c 	lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE);
t                2461 fs/jfs/jfs_logmgr.c 		lp->h.page = lp->t.page = cpu_to_le32(lspn);
t                2462 fs/jfs/jfs_logmgr.c 		lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
t                 122 fs/jfs/jfs_logmgr.h 	} t;
t                 350 fs/jfs/jfs_txnmgr.c 	tid_t t;
t                 389 fs/jfs/jfs_txnmgr.c 	if ((t = TxAnchor.freetid) == 0) {
t                 396 fs/jfs/jfs_txnmgr.c 	tblk = tid_to_tblock(t);
t                 426 fs/jfs/jfs_txnmgr.c 	HIGHWATERMARK(stattx.maxtid, t);	/* statistics */
t                 431 fs/jfs/jfs_txnmgr.c 	jfs_info("txBegin: returning tid = %d", t);
t                 433 fs/jfs/jfs_txnmgr.c 	return t;
t                1116 fs/namespace.c 	struct mount *m, *t;
t                1118 fs/namespace.c 	llist_for_each_entry_safe(m, t, node, mnt_llist)
t                2991 fs/namespace.c 	char *t = to;
t                3000 fs/namespace.c 			memset(t, 0, n);
t                3003 fs/namespace.c 		*t++ = c;
t                1091 fs/nfs/filelayout/filelayout.c 	struct nfs_page *freq, *t;
t                1098 fs/nfs/filelayout/filelayout.c 		list_for_each_entry_safe(freq, t, &b->written, wb_list) {
t                1102 fs/nfs/filelayout/filelayout.c 		list_for_each_entry_safe(freq, t, &b->committing, wb_list) {
t                2328 fs/nfs/flexfilelayout/flexfilelayout.c 			 ktime_t t)
t                2334 fs/nfs/flexfilelayout/flexfilelayout.c 	ts = ktime_to_timespec64(t);
t                 162 fs/nfs/internal.h int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
t                1062 fs/nfs/nfs4xdr.c xdr_encode_nfstime4(__be32 *p, const struct timespec *t)
t                1064 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, (__s64)t->tv_sec);
t                1065 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(t->tv_nsec);
t                4068 fs/nfs/nfs4xdr.c xdr_decode_nfstime4(__be32 *p, struct timespec *t)
t                4073 fs/nfs/nfs4xdr.c 	t-> tv_sec = (time_t)sec;
t                4074 fs/nfs/nfs4xdr.c 	t->tv_nsec = be32_to_cpup(p++);
t                1774 fs/nfs/pnfs.c  	struct nfs4_threshold *t = ctx->mdsthreshold;
t                1779 fs/nfs/pnfs.c  	if (t == NULL)
t                1783 fs/nfs/pnfs.c  		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
t                1787 fs/nfs/pnfs.c  		if (t->bm & THRESHOLD_RD) {
t                1790 fs/nfs/pnfs.c  			if (fsize < t->rd_sz)
t                1793 fs/nfs/pnfs.c  		if (t->bm & THRESHOLD_RD_IO) {
t                1797 fs/nfs/pnfs.c  			if (nfsi->read_io < t->rd_io_sz)
t                1802 fs/nfs/pnfs.c  		if (t->bm & THRESHOLD_WR) {
t                1805 fs/nfs/pnfs.c  			if (fsize < t->wr_sz)
t                1808 fs/nfs/pnfs.c  		if (t->bm & THRESHOLD_WR_IO) {
t                1812 fs/nfs/pnfs.c  			if (nfsi->write_io < t->wr_io_sz)
t                 835 fs/nfs/write.c 	struct nfs_page *freq, *t;
t                 847 fs/nfs/write.c 	list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
t                 538 fs/nfsd/nfs4layouts.c 	struct nfs4_layout *lp, *t;
t                 554 fs/nfsd/nfs4layouts.c 		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
t                2233 fs/nfsd/nfs4state.c find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
t                2237 fs/nfsd/nfs4state.c 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
t                2244 fs/nfsd/nfs4state.c find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
t                2249 fs/nfsd/nfs4state.c 	s = find_stateid_locked(cl, t);
t                5219 fs/nfsd/nfs4state.c 	time_t t, new_timeo = nn->nfsd4_lease;
t                5234 fs/nfsd/nfs4state.c 			t = clp->cl_time - cutoff;
t                5235 fs/nfsd/nfs4state.c 			new_timeo = min(new_timeo, t);
t                5257 fs/nfsd/nfs4state.c 			t = dp->dl_time - cutoff;
t                5258 fs/nfsd/nfs4state.c 			new_timeo = min(new_timeo, t);
t                5278 fs/nfsd/nfs4state.c 			t = oo->oo_time - cutoff;
t                5279 fs/nfsd/nfs4state.c 			new_timeo = min(new_timeo, t);
t                5309 fs/nfsd/nfs4state.c 			t = nbl->nbl_time - cutoff;
t                5310 fs/nfsd/nfs4state.c 			new_timeo = min(new_timeo, t);
t                5335 fs/nfsd/nfs4state.c 	time_t t;
t                5340 fs/nfsd/nfs4state.c 	t = nfs4_laundromat(nn);
t                5341 fs/nfsd/nfs4state.c 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
t                5342 fs/nfsd/nfs4state.c 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
t                 296 fs/nilfs2/dir.c 				unsigned char t;
t                 299 fs/nilfs2/dir.c 					t = nilfs_filetype_table[de->file_type];
t                 301 fs/nilfs2/dir.c 					t = DT_UNKNOWN;
t                 304 fs/nilfs2/dir.c 						le64_to_cpu(de->inode), t)) {
t                  93 fs/nilfs2/segbuf.h #define nilfs_for_each_segbuf_before(s, t, h) \
t                  94 fs/nilfs2/segbuf.h 	for ((s) = NILFS_FIRST_SEGBUF(h); (s) != (t); \
t                2396 fs/nilfs2/segment.c static void nilfs_construction_timeout(struct timer_list *t)
t                2398 fs/nilfs2/segment.c 	struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
t                2613 fs/nilfs2/segment.c 	struct task_struct *t;
t                2615 fs/nilfs2/segment.c 	t = kthread_run(nilfs_segctor_thread, sci, "segctord");
t                2616 fs/nilfs2/segment.c 	if (IS_ERR(t)) {
t                2617 fs/nilfs2/segment.c 		int err = PTR_ERR(t);
t                  49 fs/nilfs2/sufile.c 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
t                  51 fs/nilfs2/sufile.c 	do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
t                  52 fs/nilfs2/sufile.c 	return (unsigned long)t;
t                  58 fs/nilfs2/sufile.c 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
t                  60 fs/nilfs2/sufile.c 	return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
t                 270 fs/nilfs2/super.c 	time64_t t;
t                 273 fs/nilfs2/super.c 	t = ktime_get_real_seconds();
t                 274 fs/nilfs2/super.c 	nilfs->ns_sbwtime = t;
t                 275 fs/nilfs2/super.c 	sbp[0]->s_wtime = cpu_to_le64(t);
t                 261 fs/nilfs2/the_nilfs.h 	u64 t = ktime_get_real_seconds();
t                 263 fs/nilfs2/the_nilfs.h 	return t < nilfs->ns_sbwtime ||
t                 264 fs/nilfs2/the_nilfs.h 		t > nilfs->ns_sbwtime + nilfs->ns_sb_update_freq;
t                  59 fs/nls/nls_base.c 	const struct utf8_table *t;
t                  64 fs/nls/nls_base.c 	for (t = utf8_table; t->cmask; t++) {
t                  66 fs/nls/nls_base.c 		if ((c0 & t->cmask) == t->cval) {
t                  67 fs/nls/nls_base.c 			l &= t->lmask;
t                  68 fs/nls/nls_base.c 			if (l < t->lval || l > UNICODE_MAX ||
t                  90 fs/nls/nls_base.c 	const struct utf8_table *t;
t                 100 fs/nls/nls_base.c 	for (t = utf8_table; t->cmask && maxout; t++, maxout--) {
t                 102 fs/nls/nls_base.c 		if (l <= t->lmask) {
t                 103 fs/nls/nls_base.c 			c = t->shift;
t                 104 fs/nls/nls_base.c 			*s = (u8) (t->cval | (l >> c));
t                  79 fs/ntfs/time.h 	u64 t = (u64)(sle64_to_cpu(time) - NTFS_TIME_OFFSET);
t                  84 fs/ntfs/time.h 	ts.tv_nsec = do_div(t, 10000000) * 100;
t                  85 fs/ntfs/time.h 	ts.tv_sec = t;
t                 129 fs/ocfs2/cluster/tcp.c static void o2net_idle_timer(struct timer_list *t);
t                1504 fs/ocfs2/cluster/tcp.c static void o2net_idle_timer(struct timer_list *t)
t                1506 fs/ocfs2/cluster/tcp.c 	struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
t                  87 fs/orangefs/orangefs-bufmap.c 		long n = left, t;
t                 102 fs/orangefs/orangefs-bufmap.c 		t = schedule_timeout(n);
t                 104 fs/orangefs/orangefs-bufmap.c 		if (unlikely(!t) && n != left && m->c < 0)
t                 105 fs/orangefs/orangefs-bufmap.c 			left = t;
t                 107 fs/orangefs/orangefs-bufmap.c 			left = t + (left - n);
t                 501 fs/proc/array.c 			struct task_struct *t = task;
t                 503 fs/proc/array.c 				min_flt += t->min_flt;
t                 504 fs/proc/array.c 				maj_flt += t->maj_flt;
t                 505 fs/proc/array.c 				gtime += task_gtime(t);
t                 506 fs/proc/array.c 			} while_each_thread(task, t);
t                2783 fs/proc/base.c 		struct task_struct *t = task;
t                2786 fs/proc/base.c 		while_each_thread(task, t)
t                2787 fs/proc/base.c 			task_io_accounting_add(&acct, &t->ioac);
t                 887 fs/reiserfs/bitmap.c 			int t = get_block_num(item, pos_in_item);
t                 888 fs/reiserfs/bitmap.c 			if (t) {
t                 889 fs/reiserfs/bitmap.c 				hint->search_start = t;
t                2812 fs/reiserfs/reiserfs.h #define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
t                 832 fs/reiserfs/super.c #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
t                  34 fs/timerfd.c   	} t;
t                  76 fs/timerfd.c   					       t.tmr);
t                  85 fs/timerfd.c   					       t.alarm);
t                 165 fs/timerfd.c   		remaining = alarm_expires_remaining(&ctx->t.alarm);
t                 167 fs/timerfd.c   		remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
t                 188 fs/timerfd.c   		alarm_init(&ctx->t.alarm,
t                 193 fs/timerfd.c   		hrtimer_init(&ctx->t.tmr, clockid, htmode);
t                 194 fs/timerfd.c   		hrtimer_set_expires(&ctx->t.tmr, texp);
t                 195 fs/timerfd.c   		ctx->t.tmr.function = timerfd_tmrproc;
t                 201 fs/timerfd.c   				alarm_start(&ctx->t.alarm, texp);
t                 203 fs/timerfd.c   				alarm_start_relative(&ctx->t.alarm, texp);
t                 205 fs/timerfd.c   			hrtimer_start(&ctx->t.tmr, texp, htmode);
t                 223 fs/timerfd.c   		alarm_cancel(&ctx->t.alarm);
t                 225 fs/timerfd.c   		hrtimer_cancel(&ctx->t.tmr);
t                 284 fs/timerfd.c   					&ctx->t.alarm, ctx->tintv) - 1;
t                 285 fs/timerfd.c   				alarm_restart(&ctx->t.alarm);
t                 287 fs/timerfd.c   				ticks += hrtimer_forward_now(&ctx->t.tmr,
t                 289 fs/timerfd.c   				hrtimer_restart(&ctx->t.tmr);
t                 305 fs/timerfd.c   	struct itimerspec t;
t                 308 fs/timerfd.c   	t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
t                 309 fs/timerfd.c   	t.it_interval = ktime_to_timespec(ctx->tintv);
t                 321 fs/timerfd.c   		   (unsigned long long)t.it_value.tv_sec,
t                 322 fs/timerfd.c   		   (unsigned long long)t.it_value.tv_nsec,
t                 323 fs/timerfd.c   		   (unsigned long long)t.it_interval.tv_sec,
t                 324 fs/timerfd.c   		   (unsigned long long)t.it_interval.tv_nsec);
t                 418 fs/timerfd.c   		alarm_init(&ctx->t.alarm,
t                 423 fs/timerfd.c   		hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
t                 467 fs/timerfd.c   			if (alarm_try_to_cancel(&ctx->t.alarm) >= 0)
t                 470 fs/timerfd.c   			if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0)
t                 476 fs/timerfd.c   			hrtimer_cancel_wait_running(&ctx->t.alarm.timer);
t                 478 fs/timerfd.c   			hrtimer_cancel_wait_running(&ctx->t.tmr);
t                 489 fs/timerfd.c   			alarm_forward_now(&ctx->t.alarm, ctx->tintv);
t                 491 fs/timerfd.c   			hrtimer_forward_now(&ctx->t.tmr, ctx->tintv);
t                 507 fs/timerfd.c   static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
t                 523 fs/timerfd.c   					&ctx->t.alarm, ctx->tintv) - 1;
t                 524 fs/timerfd.c   			alarm_restart(&ctx->t.alarm);
t                 527 fs/timerfd.c   				hrtimer_forward_now(&ctx->t.tmr, ctx->tintv)
t                 529 fs/timerfd.c   			hrtimer_restart(&ctx->t.tmr);
t                 532 fs/timerfd.c   	t->it_value = ktime_to_timespec64(timerfd_get_remaining(ctx));
t                 533 fs/timerfd.c   	t->it_interval = ktime_to_timespec64(ctx->tintv);
t                 436 fs/ubifs/key.h 	union ubifs_key *t = to;
t                 438 fs/ubifs/key.h 	t->j32[0] = cpu_to_le32(from->u32[0]);
t                 439 fs/ubifs/key.h 	t->j32[1] = cpu_to_le32(from->u32[1]);
t                 452 fs/ubifs/key.h 	union ubifs_key *t = to;
t                 454 fs/ubifs/key.h 	t->j32[0] = cpu_to_le32(from->u32[0]);
t                 455 fs/ubifs/key.h 	t->j32[1] = cpu_to_le32(from->u32[1]);
t                  92 fs/ubifs/log.c 	long long h, t;
t                  95 fs/ubifs/log.c 	t = (long long)c->ltail_lnum * c->leb_size;
t                  97 fs/ubifs/log.c 	if (h > t)
t                  98 fs/ubifs/log.c 		return c->log_bytes - h + t;
t                  99 fs/ubifs/log.c 	else if (h != t)
t                 100 fs/ubifs/log.c 		return t - h;
t                 287 fs/udf/misc.c  u8 udf_tag_checksum(const struct tag *t)
t                 289 fs/udf/misc.c  	u8 *data = (u8 *)t;
t                  69 fs/udf/udfdecl.h u8 udf_tag_checksum(const struct tag *t);
t                3177 fs/unicode/mkutf8data.c 	char *t;
t                3183 fs/unicode/mkutf8data.c 	t = buf3;
t                3187 fs/unicode/mkutf8data.c 		if (c != (unsigned char)*t++)
t                3191 fs/unicode/mkutf8data.c 	if (*t != 0)
t                3198 fs/unicode/mkutf8data.c 	t = buf3;
t                3202 fs/unicode/mkutf8data.c 		if (c != (unsigned char)*t++)
t                3206 fs/unicode/mkutf8data.c 	if (*t != 0)
t                3218 fs/unicode/mkutf8data.c 	char *t;
t                3237 fs/unicode/mkutf8data.c 		t = buf2;
t                3240 fs/unicode/mkutf8data.c 			t += utf8encode(t, unichar);
t                3242 fs/unicode/mkutf8data.c 		*t = '\0';
t                3246 fs/unicode/mkutf8data.c 		t = buf3;
t                3253 fs/unicode/mkutf8data.c 				t += utf8encode(t, unichar);
t                3255 fs/unicode/mkutf8data.c 		*t = '\0';
t                3280 fs/unicode/mkutf8data.c 	int t;
t                3304 fs/unicode/mkutf8data.c 	t = 0;
t                3307 fs/unicode/mkutf8data.c 			ages[gen], trees[t].index,
t                3309 fs/unicode/mkutf8data.c 		if (trees[t].maxage == ages[gen])
t                3310 fs/unicode/mkutf8data.c 			t += 2;
t                3315 fs/unicode/mkutf8data.c 	t = 1;
t                3318 fs/unicode/mkutf8data.c 			ages[gen], trees[t].index,
t                3320 fs/unicode/mkutf8data.c 		if (trees[t].maxage == ages[gen])
t                3321 fs/unicode/mkutf8data.c 			t += 2;
t                3327 fs/unicode/mkutf8data.c 	t = 0;
t                3329 fs/unicode/mkutf8data.c 		if (i == trees[t].index) {
t                3331 fs/unicode/mkutf8data.c 				trees[t].type, trees[t].maxage);
t                3332 fs/unicode/mkutf8data.c 			if (t < trees_count-1)
t                3333 fs/unicode/mkutf8data.c 				t++;
t                 226 fs/utimes.c    		struct old_utimbuf32 __user *, t)
t                 230 fs/utimes.c    	if (t) {
t                 231 fs/utimes.c    		if (get_user(tv[0].tv_sec, &t->actime) ||
t                 232 fs/utimes.c    		    get_user(tv[1].tv_sec, &t->modtime))
t                 237 fs/utimes.c    	return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
t                 241 fs/utimes.c    SYSCALL_DEFINE4(utimensat_time32, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags)
t                 245 fs/utimes.c    	if  (t) {
t                 246 fs/utimes.c    		if (get_old_timespec32(&tv[0], &t[0]) ||
t                 247 fs/utimes.c    		    get_old_timespec32(&tv[1], &t[1]))
t                 253 fs/utimes.c    	return do_utimes(dfd, filename, t ? tv : NULL, flags);
t                 258 fs/utimes.c    				struct old_timeval32 __user *t)
t                 262 fs/utimes.c    	if (t) {
t                 263 fs/utimes.c    		if (get_user(tv[0].tv_sec, &t[0].tv_sec) ||
t                 264 fs/utimes.c    		    get_user(tv[0].tv_nsec, &t[0].tv_usec) ||
t                 265 fs/utimes.c    		    get_user(tv[1].tv_sec, &t[1].tv_sec) ||
t                 266 fs/utimes.c    		    get_user(tv[1].tv_nsec, &t[1].tv_usec))
t                 274 fs/utimes.c    	return do_utimes(dfd, filename, t ? tv : NULL, 0);
t                 279 fs/utimes.c    		       struct old_timeval32 __user *, t)
t                 281 fs/utimes.c    	return do_compat_futimesat(dfd, filename, t);
t                 284 fs/utimes.c    SYSCALL_DEFINE2(utimes_time32, const char __user *, filename, struct old_timeval32 __user *, t)
t                 286 fs/utimes.c    	return do_compat_futimesat(AT_FDCWD, filename, t);
t                 647 fs/xfs/libxfs/xfs_dir2_data.c 	int                     t;              /* temp */
t                 691 fs/xfs/libxfs/xfs_dir2_data.c 	t = args->geo->blksize - (uint)dp->d_ops->data_entry_offset;
t                 692 fs/xfs/libxfs/xfs_dir2_data.c 	bf[0].length = cpu_to_be16(t);
t                 693 fs/xfs/libxfs/xfs_dir2_data.c 	dup->length = cpu_to_be16(t);
t                 957 fs/xfs/libxfs/xfs_rtbitmap.c #define xfs_rtcheck_alloc_range(m,t,b,l)	(0)
t                  26 fs/xfs/mrlock.h #define mrlock_init(mrp, t,n,s)	mrinit(mrp, n)
t                 139 fs/xfs/xfs_rtalloc.h # define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb)    (ENOSYS)
t                 140 fs/xfs/xfs_rtalloc.h # define xfs_rtfree_extent(t,b,l)                       (ENOSYS)
t                 141 fs/xfs/xfs_rtalloc.h # define xfs_rtpick_extent(m,t,l,rb)                    (ENOSYS)
t                 143 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_query_range(t,l,h,f,p)             (ENOSYS)
t                 144 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_query_all(t,f,p)                   (ENOSYS)
t                 145 fs/xfs/xfs_rtalloc.h # define xfs_rtbuf_get(m,t,b,i,p)                       (ENOSYS)
t                 147 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_extent_is_free(m,t,s,l,i)          (ENOSYS)
t                 501 include/acpi/actypes.h #define ACPI_CAST_PTR(t, p)             ((t *) (acpi_uintptr_t) (p))
t                 502 include/acpi/actypes.h #define ACPI_CAST_INDIRECT_PTR(t, p)    ((t **) (acpi_uintptr_t) (p))
t                 503 include/acpi/actypes.h #define ACPI_ADD_PTR(t, a, b)           ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
t                 504 include/acpi/actypes.h #define ACPI_SUB_PTR(t, a, b)           ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
t                  78 include/acpi/platform/aclinux.h #define ACPI_GLOBAL(t,a)
t                  79 include/acpi/platform/aclinux.h #define ACPI_INIT_GLOBAL(t,a,b)
t                   8 include/asm-generic/ioctl.h #define _IOC_TYPECHECK(t) (sizeof(t))
t                  12 include/asm-generic/ioctl.h #define _IOC_TYPECHECK(t) \
t                  13 include/asm-generic/ioctl.h 	((sizeof(t) == sizeof(t[1]) && \
t                  14 include/asm-generic/ioctl.h 	  sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
t                  15 include/asm-generic/ioctl.h 	  sizeof(t) : __invalid_size_argument_for_IOC)
t                 223 include/crypto/gf128mul.h 	be128 t[256];
t                 228 include/crypto/gf128mul.h void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
t                 229 include/crypto/gf128mul.h void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
t                 231 include/crypto/gf128mul.h static inline void gf128mul_free_4k(struct gf128mul_4k *t)
t                 233 include/crypto/gf128mul.h 	kzfree(t);
t                 240 include/crypto/gf128mul.h 	struct gf128mul_4k *t[16];
t                 249 include/crypto/gf128mul.h void gf128mul_free_64k(struct gf128mul_64k *t);
t                 250 include/crypto/gf128mul.h void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);
t                 134 include/drm/drm_modes.h #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
t                 135 include/drm/drm_modes.h 	.name = nm, .status = 0, .type = (t), .clock = (c), \
t                  14 include/dt-bindings/usb/pd.h #define PDO_TYPE(t)	((t) << PDO_TYPE_SHIFT)
t                  67 include/dt-bindings/usb/pd.h #define PDO_APDO_TYPE(t)	((t) << PDO_APDO_TYPE_SHIFT)
t                  98 include/kvm/arm_arch_timer.h #define vcpu_get_timer(v,t)	(&vcpu_timer(v)->timers[(t)])
t                1116 include/linux/acpi.h #define acpi_probe_device_table(t)					\
t                1118 include/linux/acpi.h 		extern struct acpi_probe_entry ACPI_PROBE_TABLE(t),	\
t                1119 include/linux/acpi.h 			                       ACPI_PROBE_TABLE_END(t);	\
t                1120 include/linux/acpi.h 		__acpi_probe_device_table(&ACPI_PROBE_TABLE(t),		\
t                1121 include/linux/acpi.h 					  (&ACPI_PROBE_TABLE_END(t) -	\
t                1122 include/linux/acpi.h 					   &ACPI_PROBE_TABLE(t)));	\
t                1224 include/linux/acpi.h #define acpi_probe_device_table(t)	({ int __r = 0; __r;})
t                 155 include/linux/assoc_array_priv.h struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
t                 157 include/linux/assoc_array_priv.h 	return (struct assoc_array_ptr *)((unsigned long)p | t);
t                 185 include/linux/audit.h extern int audit_signal_info(int sig, struct task_struct *t);
t                 241 include/linux/audit.h static inline int audit_signal_info(int sig, struct task_struct *t)
t                 280 include/linux/audit.h extern void __audit_ptrace(struct task_struct *t);
t                 355 include/linux/audit.h static inline void audit_ptrace(struct task_struct *t)
t                 358 include/linux/audit.h 		__audit_ptrace(t);
t                 646 include/linux/audit.h static inline void audit_ptrace(struct task_struct *t)
t                  40 include/linux/bch.h 	unsigned int    t;
t                  56 include/linux/bch.h struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
t                1094 include/linux/blkdev.h extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t                1096 include/linux/blkdev.h extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
t                1100 include/linux/blkdev.h extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
t                  53 include/linux/btf.h int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
t                  54 include/linux/btf.h bool btf_type_is_void(const struct btf_type *t);
t                  62 include/linux/can/dev/peak_canfd.h #define PUCAN_TSLOW_SJW_T(s, t)		(((s) & PUCAN_TSLOW_SJW_MASK) | \
t                  63 include/linux/can/dev/peak_canfd.h 								((!!(t)) << 7))
t                  64 include/linux/can/dev/peak_canfd.h #define PUCAN_TSLOW_TSEG2(t)		((t) & PUCAN_TSLOW_TSEG2_MASK)
t                  65 include/linux/can/dev/peak_canfd.h #define PUCAN_TSLOW_TSEG1(t)		((t) & PUCAN_TSLOW_TSEG1_MASK)
t                  91 include/linux/can/dev/peak_canfd.h #define PUCAN_TFAST_TSEG2(t)		((t) & PUCAN_TFAST_TSEG2_MASK)
t                  92 include/linux/can/dev/peak_canfd.h #define PUCAN_TFAST_TSEG1(t)		((t) & PUCAN_TFAST_TSEG1_MASK)
t                 204 include/linux/capability.h extern bool has_capability(struct task_struct *t, int cap);
t                 205 include/linux/capability.h extern bool has_ns_capability(struct task_struct *t,
t                 207 include/linux/capability.h extern bool has_capability_noaudit(struct task_struct *t, int cap);
t                 208 include/linux/capability.h extern bool has_ns_capability_noaudit(struct task_struct *t,
t                 215 include/linux/capability.h static inline bool has_capability(struct task_struct *t, int cap)
t                 219 include/linux/capability.h static inline bool has_ns_capability(struct task_struct *t,
t                 224 include/linux/capability.h static inline bool has_capability_noaudit(struct task_struct *t, int cap)
t                 228 include/linux/capability.h static inline bool has_ns_capability_noaudit(struct task_struct *t,
t                  51 include/linux/cb710.h #define CB710_PORT_ACCESSORS(t) \
t                  52 include/linux/cb710.h static inline void cb710_write_port_##t(struct cb710_slot *slot,	\
t                  53 include/linux/cb710.h 	unsigned port, u##t value)					\
t                  55 include/linux/cb710.h 	iowrite##t(value, slot->iobase + port);				\
t                  58 include/linux/cb710.h static inline u##t cb710_read_port_##t(struct cb710_slot *slot,		\
t                  61 include/linux/cb710.h 	return ioread##t(slot->iobase + port);				\
t                  64 include/linux/cb710.h static inline void cb710_modify_port_##t(struct cb710_slot *slot,	\
t                  65 include/linux/cb710.h 	unsigned port, u##t set, u##t clear)				\
t                  67 include/linux/cb710.h 	iowrite##t(							\
t                  68 include/linux/cb710.h 		(ioread##t(slot->iobase + port) & ~clear)|set,		\
t                 191 include/linux/ceph/libceph.h static void insert_##name(struct rb_root *root, type *t)		\
t                 196 include/linux/ceph/libceph.h 	BUG_ON(!RB_EMPTY_NODE(&t->nodefld));				\
t                 203 include/linux/ceph/libceph.h 		cmp = cmpexp(keyexp(t->keyfld), keyexp(cur->keyfld));	\
t                 212 include/linux/ceph/libceph.h 	rb_link_node(&t->nodefld, parent, n);				\
t                 213 include/linux/ceph/libceph.h 	rb_insert_color(&t->nodefld, root);				\
t                 215 include/linux/ceph/libceph.h static void erase_##name(struct rb_root *root, type *t)			\
t                 217 include/linux/ceph/libceph.h 	BUG_ON(RB_EMPTY_NODE(&t->nodefld));				\
t                 218 include/linux/ceph/libceph.h 	rb_erase(&t->nodefld, root);					\
t                 219 include/linux/ceph/libceph.h 	RB_CLEAR_NODE(&t->nodefld);					\
t                 260 include/linux/ceph/osd_client.h 	struct ceph_osd_request_target t;
t                 707 include/linux/cgroup.h 					 struct task_struct *t) { return 0; }
t                  45 include/linux/compat.h #define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
t                 495 include/linux/compat.h 	struct task_struct *t = current; \
t                 496 include/linux/compat.h 	put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
t                 497 include/linux/compat.h 	put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
t                 498 include/linux/compat.h 	put_user_ex(t->sas_ss_size, &__uss->ss_size); \
t                 499 include/linux/compat.h 	if (t->sas_ss_flags & SS_AUTODISARM) \
t                 500 include/linux/compat.h 		sas_ss_reset(t); \
t                 352 include/linux/compiler.h #define compiletime_assert_atomic_type(t)				\
t                 353 include/linux/compiler.h 	compiletime_assert(__native_word(t),				\
t                 223 include/linux/compiler_types.h #define __native_word(t) \
t                 224 include/linux/compiler_types.h 	(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
t                 225 include/linux/compiler_types.h 	 sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
t                 331 include/linux/device-mapper.h int dm_register_target(struct target_type *t);
t                 332 include/linux/device-mapper.h void dm_unregister_target(struct target_type *t);
t                 458 include/linux/device-mapper.h int dm_table_add_target(struct dm_table *t, const char *type,
t                 464 include/linux/device-mapper.h void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
t                 472 include/linux/device-mapper.h void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
t                 477 include/linux/device-mapper.h int dm_table_complete(struct dm_table *t);
t                 482 include/linux/device-mapper.h void dm_table_destroy(struct dm_table *t);
t                 499 include/linux/device-mapper.h sector_t dm_table_get_size(struct dm_table *t);
t                 500 include/linux/device-mapper.h unsigned int dm_table_get_num_targets(struct dm_table *t);
t                 501 include/linux/device-mapper.h fmode_t dm_table_get_mode(struct dm_table *t);
t                 502 include/linux/device-mapper.h struct mapped_device *dm_table_get_md(struct dm_table *t);
t                 503 include/linux/device-mapper.h const char *dm_table_device_name(struct dm_table *t);
t                 508 include/linux/device-mapper.h void dm_table_event(struct dm_table *t);
t                 513 include/linux/device-mapper.h void dm_table_run_md_queue_async(struct dm_table *t);
t                 520 include/linux/device-mapper.h 			       struct dm_table *t);
t                 621 include/linux/dma-mapping.h #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
t                  33 include/linux/elfcore.h static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
t                  36 include/linux/elfcore.h 	return ELF_CORE_COPY_TASK_REGS(t, elfregs);
t                  38 include/linux/elfcore.h 	elf_core_copy_regs(elfregs, task_pt_regs(t));
t                  45 include/linux/elfcore.h static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
t                  48 include/linux/elfcore.h 	return ELF_CORE_COPY_FPREGS(t, fpu);
t                  55 include/linux/elfcore.h static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
t                  57 include/linux/elfcore.h 	return ELF_CORE_COPY_XFPREGS(t, xfpu);
t                 433 include/linux/filter.h #define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
t                 434 include/linux/filter.h #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
t                 435 include/linux/filter.h #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
t                 436 include/linux/filter.h #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
t                 437 include/linux/filter.h #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
t                 449 include/linux/filter.h #define __BPF_CAST(t, a)						       \
t                 450 include/linux/filter.h 	(__force t)							       \
t                 452 include/linux/filter.h 	 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
t                 453 include/linux/filter.h 				      (unsigned long)0, (t)0))) a
t                 457 include/linux/filter.h #define __BPF_DECL_ARGS(t, a) t   a
t                 458 include/linux/filter.h #define __BPF_DECL_REGS(t, a) u64 a
t                 355 include/linux/firewire.h void fw_send_request(struct fw_card *card, struct fw_transaction *t,
t                  43 include/linux/freezer.h extern void __thaw_task(struct task_struct *t);
t                 263 include/linux/freezer.h static inline void __thaw_task(struct task_struct *t) {}
t                 741 include/linux/fs.h struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
t                1579 include/linux/fs.h extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran);
t                  95 include/linux/fs_parser.h #define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf))
t                  35 include/linux/fsi.h #define FSI_DEVICE(t) \
t                  36 include/linux/fsi.h 	.engine_type = (t), .version = FSI_VERSION_ANY,
t                  38 include/linux/fsi.h #define FSI_DEVICE_VERSIONED(t, v) \
t                  39 include/linux/fsi.h 	.engine_type = (t), .version = (v),
t                 810 include/linux/ftrace.h extern void ftrace_graph_init_task(struct task_struct *t);
t                 811 include/linux/ftrace.h extern void ftrace_graph_exit_task(struct task_struct *t);
t                 812 include/linux/ftrace.h extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
t                 827 include/linux/ftrace.h static inline void ftrace_graph_init_task(struct task_struct *t) { }
t                 828 include/linux/ftrace.h static inline void ftrace_graph_exit_task(struct task_struct *t) { }
t                 829 include/linux/ftrace.h static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
t                 859 include/linux/i2c.h void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
t                1220 include/linux/ide.h extern void ide_timer_expiry(struct timer_list *t);
t                  58 include/linux/iio/sw_trigger.h void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
t                  63 include/linux/iio/sw_trigger.h 	config_group_init_type_name(&t->group, name, type);
t                 615 include/linux/interrupt.h static inline int tasklet_trylock(struct tasklet_struct *t)
t                 617 include/linux/interrupt.h 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
t                 620 include/linux/interrupt.h static inline void tasklet_unlock(struct tasklet_struct *t)
t                 623 include/linux/interrupt.h 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
t                 626 include/linux/interrupt.h static inline void tasklet_unlock_wait(struct tasklet_struct *t)
t                 628 include/linux/interrupt.h 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
t                 631 include/linux/interrupt.h #define tasklet_trylock(t) 1
t                 632 include/linux/interrupt.h #define tasklet_unlock_wait(t) do { } while (0)
t                 633 include/linux/interrupt.h #define tasklet_unlock(t) do { } while (0)
t                 636 include/linux/interrupt.h extern void __tasklet_schedule(struct tasklet_struct *t);
t                 638 include/linux/interrupt.h static inline void tasklet_schedule(struct tasklet_struct *t)
t                 640 include/linux/interrupt.h 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
t                 641 include/linux/interrupt.h 		__tasklet_schedule(t);
t                 644 include/linux/interrupt.h extern void __tasklet_hi_schedule(struct tasklet_struct *t);
t                 646 include/linux/interrupt.h static inline void tasklet_hi_schedule(struct tasklet_struct *t)
t                 648 include/linux/interrupt.h 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
t                 649 include/linux/interrupt.h 		__tasklet_hi_schedule(t);
t                 652 include/linux/interrupt.h static inline void tasklet_disable_nosync(struct tasklet_struct *t)
t                 654 include/linux/interrupt.h 	atomic_inc(&t->count);
t                 658 include/linux/interrupt.h static inline void tasklet_disable(struct tasklet_struct *t)
t                 660 include/linux/interrupt.h 	tasklet_disable_nosync(t);
t                 661 include/linux/interrupt.h 	tasklet_unlock_wait(t);
t                 665 include/linux/interrupt.h static inline void tasklet_enable(struct tasklet_struct *t)
t                 668 include/linux/interrupt.h 	atomic_dec(&t->count);
t                 671 include/linux/interrupt.h extern void tasklet_kill(struct tasklet_struct *t);
t                 672 include/linux/interrupt.h extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
t                 673 include/linux/interrupt.h extern void tasklet_init(struct tasklet_struct *t,
t                  27 include/linux/kcov.h void kcov_task_init(struct task_struct *t);
t                  28 include/linux/kcov.h void kcov_task_exit(struct task_struct *t);
t                  30 include/linux/kcov.h #define kcov_prepare_switch(t)			\
t                  32 include/linux/kcov.h 	(t)->kcov_mode |= KCOV_IN_CTXSW;	\
t                  35 include/linux/kcov.h #define kcov_finish_switch(t)			\
t                  37 include/linux/kcov.h 	(t)->kcov_mode &= ~KCOV_IN_CTXSW;	\
t                  42 include/linux/kcov.h static inline void kcov_task_init(struct task_struct *t) {}
t                  43 include/linux/kcov.h static inline void kcov_task_exit(struct task_struct *t) {}
t                  44 include/linux/kcov.h static inline void kcov_prepare_switch(struct task_struct *t) {}
t                  45 include/linux/kcov.h static inline void kcov_finish_switch(struct task_struct *t) {}
t                  89 include/linux/kernel.h #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
t                  80 include/linux/kthread.h void kthread_delayed_work_timer_fn(struct timer_list *t);
t                 430 include/linux/lockdep.h # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
t                 588 include/linux/lockdep.h #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
t                 589 include/linux/lockdep.h #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
t                 590 include/linux/lockdep.h #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
t                 592 include/linux/lockdep.h #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
t                 593 include/linux/lockdep.h #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
t                 596 include/linux/lockdep.h #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
t                 597 include/linux/lockdep.h #define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
t                 600 include/linux/lockdep.h #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
t                 601 include/linux/lockdep.h #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
t                 604 include/linux/lockdep.h #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
t                 605 include/linux/lockdep.h #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
t                 608 include/linux/lockdep.h #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
t                 609 include/linux/lockdep.h #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
t                 610 include/linux/lockdep.h #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
t                 273 include/linux/mroute_base.h 	       void (*expire_func)(struct timer_list *t),
t                 489 include/linux/netfilter/ipset/ip_set.h ip_set_timeout_expired(const unsigned long *t)
t                 491 include/linux/netfilter/ipset/ip_set.h 	return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
t                 497 include/linux/netfilter/ipset/ip_set.h 	unsigned long t;
t                 504 include/linux/netfilter/ipset/ip_set.h 	t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
t                 505 include/linux/netfilter/ipset/ip_set.h 	if (t == IPSET_ELEM_PERMANENT)
t                 507 include/linux/netfilter/ipset/ip_set.h 		t--;
t                 508 include/linux/netfilter/ipset/ip_set.h 	*timeout = t;
t                 514 include/linux/netfilter/ipset/ip_set.h 	u32 t;
t                 519 include/linux/netfilter/ipset/ip_set.h 	t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
t                 521 include/linux/netfilter/ipset/ip_set.h 	return t == 0 ? 1 : t;
t                  26 include/linux/netfilter/nf_conntrack_proto_gre.h 			 struct nf_conntrack_tuple *t);
t                 299 include/linux/netfilter/x_tables.h int xt_target_to_user(const struct xt_entry_target *t,
t                 329 include/linux/netfilter/x_tables.h void xt_table_unlock(struct xt_table *t);
t                 524 include/linux/netfilter/x_tables.h void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
t                 526 include/linux/netfilter/x_tables.h int xt_compat_target_to_user(const struct xt_entry_target *t,
t                  62 include/linux/nls.h static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
t                  64 include/linux/nls.h 	unsigned char nc = t->charset2lower[c];
t                  69 include/linux/nls.h static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c)
t                  71 include/linux/nls.h 	unsigned char nc = t->charset2upper[c];
t                  76 include/linux/nls.h static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1,
t                  80 include/linux/nls.h 		if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++))
t                  75 include/linux/omap-gpmc.h extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
t                  27 include/linux/page_ref.h #define page_ref_tracepoint_active(t) static_key_false(&(t).key)
t                  39 include/linux/page_ref.h #define page_ref_tracepoint_active(t) false
t                1655 include/linux/pci.h #define _PCI_NOP(o, s, t) \
t                1657 include/linux/pci.h 						int where, t val) \
t                3115 include/linux/platform_data/cros_ec_commands.h 	int32_t t;  /* In 1/100 K */
t                 123 include/linux/profile.h static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
t                 128 include/linux/profile.h static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
t                  34 include/linux/psi.h void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
t                 133 include/linux/rcupdate.h #define rcu_tasks_qs(t) \
t                 135 include/linux/rcupdate.h 		if (READ_ONCE((t)->rcu_tasks_holdout)) \
t                 136 include/linux/rcupdate.h 			WRITE_ONCE((t)->rcu_tasks_holdout, false); \
t                 138 include/linux/rcupdate.h #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
t                 144 include/linux/rcupdate.h #define rcu_tasks_qs(t)	do { } while (0)
t                 145 include/linux/rcupdate.h #define rcu_note_voluntary_context_switch(t) do { } while (0)
t                  75 include/linux/rcutiny.h static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
t                  79 include/linux/rcutiny.h static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
t                  35 include/linux/reciprocal_div.h 	u32 t = (u32)(((u64)a * R.m) >> 32);
t                  36 include/linux/reciprocal_div.h 	return (t + ((a - t) >> R.sh1)) >> R.sh2;
t                  75 include/linux/rtmutex.h # define rt_mutex_debug_task_free(t)			do { } while (0)
t                1878 include/linux/sched.h static inline void rseq_set_notify_resume(struct task_struct *t)
t                1880 include/linux/sched.h 	if (t->rseq)
t                1881 include/linux/sched.h 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
t                1903 include/linux/sched.h static inline void rseq_preempt(struct task_struct *t)
t                1905 include/linux/sched.h 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
t                1906 include/linux/sched.h 	rseq_set_notify_resume(t);
t                1910 include/linux/sched.h static inline void rseq_migrate(struct task_struct *t)
t                1912 include/linux/sched.h 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
t                1913 include/linux/sched.h 	rseq_set_notify_resume(t);
t                1920 include/linux/sched.h static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
t                1923 include/linux/sched.h 		t->rseq = NULL;
t                1924 include/linux/sched.h 		t->rseq_sig = 0;
t                1925 include/linux/sched.h 		t->rseq_event_mask = 0;
t                1927 include/linux/sched.h 		t->rseq = current->rseq;
t                1928 include/linux/sched.h 		t->rseq_sig = current->rseq_sig;
t                1929 include/linux/sched.h 		t->rseq_event_mask = current->rseq_event_mask;
t                1933 include/linux/sched.h static inline void rseq_execve(struct task_struct *t)
t                1935 include/linux/sched.h 	t->rseq = NULL;
t                1936 include/linux/sched.h 	t->rseq_sig = 0;
t                1937 include/linux/sched.h 	t->rseq_event_mask = 0;
t                1942 include/linux/sched.h static inline void rseq_set_notify_resume(struct task_struct *t)
t                1953 include/linux/sched.h static inline void rseq_preempt(struct task_struct *t)
t                1956 include/linux/sched.h static inline void rseq_migrate(struct task_struct *t)
t                1959 include/linux/sched.h static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
t                1962 include/linux/sched.h static inline void rseq_execve(struct task_struct *t)
t                  21 include/linux/sched/cputime.h extern void task_cputime(struct task_struct *t,
t                  23 include/linux/sched/cputime.h extern u64 task_gtime(struct task_struct *t);
t                  25 include/linux/sched/cputime.h static inline void task_cputime(struct task_struct *t,
t                  28 include/linux/sched/cputime.h 	*utime = t->utime;
t                  29 include/linux/sched/cputime.h 	*stime = t->stime;
t                  32 include/linux/sched/cputime.h static inline u64 task_gtime(struct task_struct *t)
t                  34 include/linux/sched/cputime.h 	return t->gtime;
t                  39 include/linux/sched/cputime.h static inline void task_cputime_scaled(struct task_struct *t,
t                  43 include/linux/sched/cputime.h 	*utimescaled = t->utimescaled;
t                  44 include/linux/sched/cputime.h 	*stimescaled = t->stimescaled;
t                  47 include/linux/sched/cputime.h static inline void task_cputime_scaled(struct task_struct *t,
t                  51 include/linux/sched/cputime.h 	task_cputime(t, utimescaled, stimescaled);
t                  23 include/linux/sched/isolation.h extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
t                  44 include/linux/sched/isolation.h static inline void housekeeping_affine(struct task_struct *t,
t                 305 include/linux/sched/signal.h 	, struct task_struct *t);
t                 312 include/linux/sched/signal.h 	, struct task_struct *t);
t                 378 include/linux/sched/signal.h extern void recalc_sigpending_and_wake(struct task_struct *t);
t                 382 include/linux/sched/signal.h extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
t                 384 include/linux/sched/signal.h static inline void signal_wake_up(struct task_struct *t, bool resume)
t                 386 include/linux/sched/signal.h 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
t                 388 include/linux/sched/signal.h static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
t                 390 include/linux/sched/signal.h 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
t                 574 include/linux/sched/signal.h #define do_each_thread(g, t) \
t                 575 include/linux/sched/signal.h 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
t                 577 include/linux/sched/signal.h #define while_each_thread(g, t) \
t                 578 include/linux/sched/signal.h 	while ((t = next_thread(t)) != g)
t                 580 include/linux/sched/signal.h #define __for_each_thread(signal, t)	\
t                 581 include/linux/sched/signal.h 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
t                 583 include/linux/sched/signal.h #define for_each_thread(p, t)		\
t                 584 include/linux/sched/signal.h 	__for_each_thread((p)->signal, t)
t                 587 include/linux/sched/signal.h #define for_each_process_thread(p, t)	\
t                 588 include/linux/sched/signal.h 	for_each_process(p) for_each_thread(p, t)
t                 108 include/linux/sched/task.h static inline struct task_struct *get_task_struct(struct task_struct *t)
t                 110 include/linux/sched/task.h 	refcount_inc(&t->usage);
t                 111 include/linux/sched/task.h 	return t;
t                 114 include/linux/sched/task.h extern void __put_task_struct(struct task_struct *t);
t                 116 include/linux/sched/task.h static inline void put_task_struct(struct task_struct *t)
t                 118 include/linux/sched/task.h 	if (refcount_dec_and_test(&t->usage))
t                 119 include/linux/sched/task.h 		__put_task_struct(t);
t                 145 include/linux/sched/task.h static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
t                 147 include/linux/sched/task.h 	return t->stack_vm_area;
t                 150 include/linux/sched/task.h static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
t                 438 include/linux/signal.h #define sig_fatal(t, signr) \
t                 440 include/linux/signal.h 	 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
t                 449 include/linux/signal.h 	struct task_struct *t = current; \
t                 450 include/linux/signal.h 	put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
t                 451 include/linux/signal.h 	put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
t                 452 include/linux/signal.h 	put_user_ex(t->sas_ss_size, &__uss->ss_size); \
t                 453 include/linux/signal.h 	if (t->sas_ss_flags & SS_AUTODISARM) \
t                 454 include/linux/signal.h 		sas_ss_reset(t); \
t                3683 include/linux/skbuff.h static inline ktime_t net_timedelta(ktime_t t)
t                3685 include/linux/skbuff.h 	return ktime_sub(ktime_get_real(), t);
t                 927 include/linux/spi/spi.h spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
t                 929 include/linux/spi/spi.h 	list_add_tail(&t->transfer_list, &m->transfers);
t                 933 include/linux/spi/spi.h spi_transfer_del(struct spi_transfer *t)
t                 935 include/linux/spi/spi.h 	list_del(&t->transfer_list);
t                 971 include/linux/spi/spi.h 		struct spi_transfer *t = (struct spi_transfer *)(m + 1);
t                 974 include/linux/spi/spi.h 		for (i = 0; i < ntrans; i++, t++)
t                 975 include/linux/spi/spi.h 			spi_message_add_tail(t, m);
t                1142 include/linux/spi/spi.h 	struct spi_transfer	t = {
t                1147 include/linux/spi/spi.h 	return spi_sync_transfer(spi, &t, 1);
t                1165 include/linux/spi/spi.h 	struct spi_transfer	t = {
t                1170 include/linux/spi/spi.h 	return spi_sync_transfer(spi, &t, 1);
t                  19 include/linux/spi/spi_bitbang.h 			struct spi_transfer *t);
t                  28 include/linux/spi/spi_bitbang.h 	int	(*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
t                  43 include/linux/spi/spi_bitbang.h 				      struct spi_transfer *t);
t                  18 include/linux/stackleak.h static inline void stackleak_task_init(struct task_struct *t)
t                  20 include/linux/stackleak.h 	t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
t                  22 include/linux/stackleak.h 	t->prev_lowest_stack = t->lowest_stack;
t                  32 include/linux/stackleak.h static inline void stackleak_task_init(struct task_struct *t) { }
t                 136 include/linux/sunrpc/sched.h #define RPC_IS_ASYNC(t)		((t)->tk_flags & RPC_TASK_ASYNC)
t                 137 include/linux/sunrpc/sched.h #define RPC_IS_SWAPPER(t)	((t)->tk_flags & RPC_TASK_SWAPPER)
t                 138 include/linux/sunrpc/sched.h #define RPC_IS_SOFT(t)		((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
t                 139 include/linux/sunrpc/sched.h #define RPC_IS_SOFTCONN(t)	((t)->tk_flags & RPC_TASK_SOFTCONN)
t                 140 include/linux/sunrpc/sched.h #define RPC_WAS_SENT(t)		((t)->tk_flags & RPC_TASK_SENT)
t                 150 include/linux/sunrpc/sched.h #define RPC_IS_RUNNING(t)	test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
t                 151 include/linux/sunrpc/sched.h #define rpc_set_running(t)	set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
t                 152 include/linux/sunrpc/sched.h #define rpc_test_and_set_running(t) \
t                 153 include/linux/sunrpc/sched.h 				test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
t                 154 include/linux/sunrpc/sched.h #define rpc_clear_running(t)	\
t                 157 include/linux/sunrpc/sched.h 		clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
t                 161 include/linux/sunrpc/sched.h #define RPC_IS_QUEUED(t)	test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
t                 162 include/linux/sunrpc/sched.h #define rpc_set_queued(t)	set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
t                 163 include/linux/sunrpc/sched.h #define rpc_clear_queued(t)	\
t                 166 include/linux/sunrpc/sched.h 		clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
t                 170 include/linux/sunrpc/sched.h #define RPC_IS_ACTIVATED(t)	test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
t                 172 include/linux/sunrpc/sched.h #define RPC_SIGNALLED(t)	test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
t                  29 include/linux/sunrpc/timer.h 	int *t;
t                  32 include/linux/sunrpc/timer.h 	t = &rt->ntimeouts[timer-1];
t                  33 include/linux/sunrpc/timer.h 	if (ntimeo < *t) {
t                  34 include/linux/sunrpc/timer.h 		if (*t > 0)
t                  35 include/linux/sunrpc/timer.h 			(*t)--;
t                  39 include/linux/sunrpc/timer.h 		*t = ntimeo;
t                 108 include/linux/syscalls.h #define __MAP1(m,t,a,...) m(t,a)
t                 109 include/linux/syscalls.h #define __MAP2(m,t,a,...) m(t,a), __MAP1(m,__VA_ARGS__)
t                 110 include/linux/syscalls.h #define __MAP3(m,t,a,...) m(t,a), __MAP2(m,__VA_ARGS__)
t                 111 include/linux/syscalls.h #define __MAP4(m,t,a,...) m(t,a), __MAP3(m,__VA_ARGS__)
t                 112 include/linux/syscalls.h #define __MAP5(m,t,a,...) m(t,a), __MAP4(m,__VA_ARGS__)
t                 113 include/linux/syscalls.h #define __MAP6(m,t,a,...) m(t,a), __MAP5(m,__VA_ARGS__)
t                 116 include/linux/syscalls.h #define __SC_DECL(t, a)	t a
t                 117 include/linux/syscalls.h #define __TYPE_AS(t, v)	__same_type((__force t)0, v)
t                 118 include/linux/syscalls.h #define __TYPE_IS_L(t)	(__TYPE_AS(t, 0L))
t                 119 include/linux/syscalls.h #define __TYPE_IS_UL(t)	(__TYPE_AS(t, 0UL))
t                 120 include/linux/syscalls.h #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
t                 121 include/linux/syscalls.h #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
t                 122 include/linux/syscalls.h #define __SC_CAST(t, a)	(__force t) a
t                 123 include/linux/syscalls.h #define __SC_ARGS(t, a)	a
t                 124 include/linux/syscalls.h #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
t                 127 include/linux/syscalls.h #define __SC_STR_ADECL(t, a)	#a
t                 128 include/linux/syscalls.h #define __SC_STR_TDECL(t, a)	#t
t                 549 include/linux/syscalls.h 				struct old_timespec32 __user *t, int flags);
t                1091 include/linux/syscalls.h 				     struct old_timeval32 __user *t);
t                1093 include/linux/syscalls.h 				 struct old_utimbuf32 __user *t);
t                1095 include/linux/syscalls.h 				  struct old_timeval32 __user *t);
t                 423 include/linux/sysfs.h static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
t                 430 include/linux/sysfs.h static inline void sysfs_delete_link(struct kobject *k, struct kobject *t,
t                  36 include/linux/thermal.h #define DECI_KELVIN_TO_CELSIUS(t)	({			\
t                  37 include/linux/thermal.h 	long _t = (t);						\
t                  40 include/linux/thermal.h #define CELSIUS_TO_DECI_KELVIN(t)	((t)*10+2732)
t                  41 include/linux/thermal.h #define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
t                  42 include/linux/thermal.h #define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
t                  43 include/linux/thermal.h #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
t                  44 include/linux/thermal.h #define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
t                 111 include/linux/time.h #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
t                 189 include/linux/timer.h # define del_timer_sync(t)		del_timer(t)
t                 192 include/linux/timer.h #define del_singleshot_timer_sync(t) del_timer_sync(t)
t                 122 include/linux/uprobes.h extern void uprobe_free_utask(struct task_struct *t);
t                 123 include/linux/uprobes.h extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
t                 194 include/linux/uprobes.h static inline void uprobe_free_utask(struct task_struct *t)
t                 197 include/linux/uprobes.h static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
t                 215 include/linux/usb/pd.h #define PDO_TYPE(t)	((t) << PDO_TYPE_SHIFT)
t                 270 include/linux/usb/pd.h #define PDO_APDO_TYPE(t)	((t) << PDO_APDO_TYPE_SHIFT)
t                 160 include/linux/watchdog.h static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
t                 173 include/linux/watchdog.h 	return t > UINT_MAX / 1000 || t < wdd->min_timeout ||
t                 175 include/linux/watchdog.h 		 t > wdd->max_timeout);
t                 180 include/linux/watchdog.h 					       unsigned int t)
t                 182 include/linux/watchdog.h 	return t && wdd->timeout && t >= wdd->timeout;
t                  13 include/linux/win_minmax.h 	u32	t;	/* time measurement was taken */
t                  27 include/linux/win_minmax.h static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
t                  29 include/linux/win_minmax.h 	struct minmax_sample val = { .t = t, .v = meas };
t                  35 include/linux/win_minmax.h u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
t                  36 include/linux/win_minmax.h u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
t                  22 include/linux/workqueue.h void delayed_work_timer_fn(struct timer_list *t);
t                 341 include/linux/writeback.h void laptop_mode_timer_fn(struct timer_list *t);
t                 128 include/media/drv-intf/msp3400.h #define MSP_INPUT(sc, t, main_aux_src, sc_i2s_src) \
t                 130 include/media/drv-intf/msp3400.h 	 MSP_TUNER_TO_DSP(t) | \
t                 187 include/media/drv-intf/saa7146_vv.h void saa7146_buffer_timeout(struct timer_list *t);
t                  41 include/media/i2c/ov772x.h #define OV772X_MANUAL_EDGECTRL(s, t)			\
t                  45 include/media/i2c/ov772x.h 	.threshold = (t & OV772X_EDGE_THRESHOLD_MASK),	\
t                  22 include/media/v4l2-dv-timings.h struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t);
t                  37 include/media/v4l2-dv-timings.h typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
t                  51 include/media/v4l2-dv-timings.h bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
t                  72 include/media/v4l2-dv-timings.h int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
t                  95 include/media/v4l2-dv-timings.h bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
t                 109 include/media/v4l2-dv-timings.h bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic);
t                 135 include/media/v4l2-dv-timings.h 			   const struct v4l2_dv_timings *t, bool detailed);
t                 202 include/media/v4l2-dv-timings.h struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
t                 160 include/net/af_vsock.h int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
t                 161 include/net/af_vsock.h static inline int vsock_core_init(const struct vsock_transport *t)
t                 163 include/net/af_vsock.h 	return __vsock_core_init(t, THIS_MODULE);
t                 843 include/net/bluetooth/l2cap.h #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
t                  93 include/net/codel_impl.h static codel_time_t codel_control_law(codel_time_t t,
t                  97 include/net/codel_impl.h 	return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
t                  82 include/net/dn_fib.h 	int (*insert)(struct dn_fib_table *t, struct rtmsg *r, 
t                  85 include/net/dn_fib.h 	int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
t                  88 include/net/dn_fib.h 	int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
t                  90 include/net/dn_fib.h 	int (*flush)(struct dn_fib_table *t);
t                  91 include/net/dn_fib.h 	int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
t                  89 include/net/erspan.h 		t:1,
t                  99 include/net/erspan.h 		t:1,
t                 193 include/net/erspan.h 	ershdr->t = truncate;
t                 287 include/net/erspan.h 	ershdr->t = truncate;
t                 299 include/net/ieee802154_netdev.h 			  struct ieee802154_llsec_table **t);
t                 104 include/net/inet_frag.h 	void			(*frag_expire)(struct timer_list *t);
t                  85 include/net/ip6_tunnel.h int ip6_tnl_encap_setup(struct ip6_tnl *t,
t                 108 include/net/ip6_tunnel.h static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t,
t                 114 include/net/ip6_tunnel.h 	if (t->encap.type == TUNNEL_ENCAP_NONE)
t                 117 include/net/ip6_tunnel.h 	if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
t                 121 include/net/ip6_tunnel.h 	ops = rcu_dereference(ip6tun_encaps[t->encap.type]);
t                 123 include/net/ip6_tunnel.h 		ret = ops->build_header(skb, &t->encap, protocol, fl6);
t                 137 include/net/ip6_tunnel.h int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
t                 142 include/net/ip6_tunnel.h int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
t                 147 include/net/ip6_tunnel.h __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
t                 309 include/net/ip_tunnels.h int ip_tunnel_encap_setup(struct ip_tunnel *t,
t                 352 include/net/ip_tunnels.h static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
t                 358 include/net/ip_tunnels.h 	if (t->encap.type == TUNNEL_ENCAP_NONE)
t                 361 include/net/ip_tunnels.h 	if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
t                 365 include/net/ip_tunnels.h 	ops = rcu_dereference(iptun_encaps[t->encap.type]);
t                 367 include/net/ip_tunnels.h 		ret = ops->build_header(skb, &t->encap, protocol, fl4);
t                1167 include/net/ip_vs.h #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
t                 174 include/net/llc_c_ac.h void llc_conn_busy_tmr_cb(struct timer_list *t);
t                 175 include/net/llc_c_ac.h void llc_conn_pf_cycle_tmr_cb(struct timer_list *t);
t                 176 include/net/llc_c_ac.h void llc_conn_ack_tmr_cb(struct timer_list *t);
t                 177 include/net/llc_c_ac.h void llc_conn_rej_tmr_cb(struct timer_list *t);
t                  41 include/net/netfilter/nf_conntrack_l4proto.h 			       const struct nf_conntrack_tuple *t);
t                  45 include/net/netfilter/nf_conntrack_l4proto.h 			       struct nf_conntrack_tuple *t);
t                 155 include/net/netfilter/nf_conntrack_l4proto.h 			       struct nf_conntrack_tuple *t);
t                  33 include/net/netfilter/nf_conntrack_timeout.h nf_ct_timeout_data(const struct nf_conn_timeout *t)
t                  38 include/net/netfilter/nf_conntrack_timeout.h 	timeout = rcu_dereference(t->timeout);
t                  82 include/net/netfilter/nf_conntrack_tuple.h static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t)
t                  86 include/net/netfilter/nf_conntrack_tuple.h 	       t, t->dst.protonum,
t                  87 include/net/netfilter/nf_conntrack_tuple.h 	       &t->src.u3.ip, ntohs(t->src.u.all),
t                  88 include/net/netfilter/nf_conntrack_tuple.h 	       &t->dst.u3.ip, ntohs(t->dst.u.all));
t                  92 include/net/netfilter/nf_conntrack_tuple.h static inline void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t)
t                  96 include/net/netfilter/nf_conntrack_tuple.h 	       t, t->dst.protonum,
t                  97 include/net/netfilter/nf_conntrack_tuple.h 	       t->src.u3.all, ntohs(t->src.u.all),
t                  98 include/net/netfilter/nf_conntrack_tuple.h 	       t->dst.u3.all, ntohs(t->dst.u.all));
t                 102 include/net/netfilter/nf_conntrack_tuple.h static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
t                 104 include/net/netfilter/nf_conntrack_tuple.h 	switch (t->src.l3num) {
t                 106 include/net/netfilter/nf_conntrack_tuple.h 		nf_ct_dump_tuple_ip(t);
t                 109 include/net/netfilter/nf_conntrack_tuple.h 		nf_ct_dump_tuple_ipv6(t);
t                 179 include/net/netfilter/nf_conntrack_tuple.h nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
t                 183 include/net/netfilter/nf_conntrack_tuple.h 	return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
t                 184 include/net/netfilter/nf_conntrack_tuple.h 	       __nf_ct_tuple_dst_equal(t, tuple);
t                 455 include/net/pkt_cls.h #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
t                 456 include/net/pkt_cls.h #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
t                 457 include/net/pkt_cls.h #define tcf_em_tree_dump(skb, t, tlv) (0)
t                 458 include/net/pkt_cls.h #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
t                 147 include/net/sctp/sctp.h 			   struct sctp_transport *t, __u32 pmtu);
t                 152 include/net/sctp/sctp.h 				 struct sctp_transport *t);
t                 157 include/net/sctp/sctp.h int sctp_hash_transport(struct sctp_transport *t);
t                 158 include/net/sctp/sctp.h void sctp_unhash_transport(struct sctp_transport *t);
t                 563 include/net/sctp/sctp.h static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
t                 565 include/net/sctp/sctp.h 	if (t->dst && !dst_check(t->dst, t->dst_cookie))
t                 566 include/net/sctp/sctp.h 		sctp_transport_dst_release(t);
t                 568 include/net/sctp/sctp.h 	return t->dst;
t                 596 include/net/sctp/sctp.h static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
t                 598 include/net/sctp/sctp.h 	__u32 pmtu = sctp_dst_mtu(t->dst);
t                 600 include/net/sctp/sctp.h 	if (t->pathmtu == pmtu)
t                 603 include/net/sctp/sctp.h 	t->pathmtu = pmtu;
t                 449 include/net/sctp/structs.h 	void		(*get_dst)	(struct sctp_transport *t,
t                 454 include/net/sctp/structs.h 					 struct sctp_transport *t,
t                 999 include/net/sctp/structs.h void sctp_transport_lower_cwnd(struct sctp_transport *t,
t                1004 include/net/sctp/structs.h void sctp_transport_reset(struct sctp_transport *t);
t                1005 include/net/sctp/structs.h bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
t                1007 include/net/sctp/structs.h void sctp_transport_dst_release(struct sctp_transport *t);
t                1008 include/net/sctp/structs.h void sctp_transport_dst_confirm(struct sctp_transport *t);
t                  30 include/net/tc_act/tc_tunnel_key.h 	struct tcf_tunnel_key *t = to_tunnel_key(a);
t                  31 include/net/tc_act/tc_tunnel_key.h 	struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
t                  42 include/net/tc_act/tc_tunnel_key.h 	struct tcf_tunnel_key *t = to_tunnel_key(a);
t                  43 include/net/tc_act/tc_tunnel_key.h 	struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
t                  54 include/net/tc_act/tc_tunnel_key.h 	struct tcf_tunnel_key *t = to_tunnel_key(a);
t                  55 include/net/tc_act/tc_tunnel_key.h 	struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
t                 343 include/net/xfrm.h int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
t                1588 include/net/xfrm.h 		  struct ip6_tnl *t);
t                1590 include/net/xfrm.h int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
t                 890 include/rdma/rdmavt_qp.h enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
t                 374 include/scsi/libfcoe.h void fcoe_queue_timer(struct timer_list *t);
t                 746 include/scsi/scsi_host.h extern void scsi_host_put(struct Scsi_Host *t);
t                  57 include/scsi/scsi_transport.h scsi_transport_reserve_target(struct scsi_transport_template * t, int space)
t                  59 include/scsi/scsi_transport.h 	BUG_ON(t->target_private_offset != 0);
t                  60 include/scsi/scsi_transport.h 	t->target_private_offset = ALIGN(t->target_size, sizeof(void *));
t                  61 include/scsi/scsi_transport.h 	t->target_size = t->target_private_offset + space;
t                  64 include/scsi/scsi_transport.h scsi_transport_reserve_device(struct scsi_transport_template * t, int space)
t                  66 include/scsi/scsi_transport.h 	BUG_ON(t->device_private_offset != 0);
t                  67 include/scsi/scsi_transport.h 	t->device_private_offset = ALIGN(t->device_size, sizeof(void *));
t                  68 include/scsi/scsi_transport.h 	t->device_size = t->device_private_offset + space;
t                 419 include/scsi/scsi_transport_iscsi.h 						struct iscsi_transport *t,
t                 435 include/scsi/scsi_transport_iscsi.h 					      struct iscsi_transport *t,
t                  28 include/trace/bpf_probe.h #define __perf_task(t)	(t)
t                  74 include/trace/events/f2fs.h #define F2FS_BIO_FLAG_MASK(t)	(t & F2FS_OP_FLAGS)
t                  13 include/trace/events/rseq.h 	TP_PROTO(struct task_struct *t),
t                  15 include/trace/events/rseq.h 	TP_ARGS(t),
t                  17 include/trace/events/sched.h 	TP_PROTO(struct task_struct *t),
t                  19 include/trace/events/sched.h 	TP_ARGS(t),
t                  27 include/trace/events/sched.h 		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
t                  28 include/trace/events/sched.h 		__entry->pid	= t->pid;
t                  28 include/trace/perf.h #define __perf_task(t)	(__task = (t))
t                 693 include/trace/trace_events.h #define __perf_task(t)	(t)
t                  76 include/uapi/asm-generic/ioctl.h #define _IOC_TYPECHECK(t) (sizeof(t))
t                  58 include/uapi/linux/atm.h #define __SO_ENCODE(l,n,t)	((((l) & 0x1FF) << 22) | ((n) << 16) | \
t                  59 include/uapi/linux/atm.h 				sizeof(t))
t                  45 include/uapi/linux/keyboard.h #define K(t,v)		(((t)<<8)|(v))
t                  66 include/uapi/linux/mroute6.h #define IF_COPY(f, t)   bcopy(f, t, sizeof(*(f)))
t                 200 include/uapi/linux/ptp_clock.h 	struct ptp_clock_time t; /* Time event occured. */
t                 144 include/uapi/linux/serial.h #define SER_ISO7816_T(t)		(((t) & 0x0f) << 4)
t                 116 include/uapi/linux/soundcard.h #define	_SIOR(x,y,t)	((int)(SIOC_OUT|((sizeof(t)&SIOCPARM_MASK)<<16)|(x<<8)|y))
t                 117 include/uapi/linux/soundcard.h #define	_SIOW(x,y,t)	((int)(SIOC_IN|((sizeof(t)&SIOCPARM_MASK)<<16)|(x<<8)|y))
t                 119 include/uapi/linux/soundcard.h #define	_SIOWR(x,y,t)	((int)(SIOC_INOUT|((sizeof(t)&SIOCPARM_MASK)<<16)|(x<<8)|y))
t                  99 include/uapi/scsi/scsi_netlink.h #define INIT_SCSI_NL_HDR(hdr, t, mtype, mlen)			\
t                 102 include/uapi/scsi/scsi_netlink.h 	(hdr)->transport = t;					\
t                 455 include/video/newport.h 	int t = BUSY_TIMEOUT;
t                 457 include/video/newport.h 	while (--t)
t                 460 include/video/newport.h 	return !t;
t                 465 include/video/newport.h 	int t = BUSY_TIMEOUT;
t                 467 include/video/newport.h 	while (--t)
t                 470 include/video/newport.h 	return !t;
t                 327 include/video/sstfb.h 	int (*set_pll) (struct fb_info *info, const struct pll_timing *t, const int clock);
t                 106 include/video/uvesafb.h 	struct uvesafb_task t;
t                 136 include/xen/xenbus.h char **xenbus_directory(struct xenbus_transaction t,
t                 138 include/xen/xenbus.h void *xenbus_read(struct xenbus_transaction t,
t                 140 include/xen/xenbus.h int xenbus_write(struct xenbus_transaction t,
t                 142 include/xen/xenbus.h int xenbus_mkdir(struct xenbus_transaction t,
t                 144 include/xen/xenbus.h int xenbus_exists(struct xenbus_transaction t,
t                 146 include/xen/xenbus.h int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
t                 147 include/xen/xenbus.h int xenbus_transaction_start(struct xenbus_transaction *t);
t                 148 include/xen/xenbus.h int xenbus_transaction_end(struct xenbus_transaction t, int abort);
t                 152 include/xen/xenbus.h int xenbus_scanf(struct xenbus_transaction t,
t                 161 include/xen/xenbus.h int xenbus_printf(struct xenbus_transaction t,
t                 166 include/xen/xenbus.h int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
t                 104 init/initramfs.c 	struct timespec64 t[2];
t                 106 init/initramfs.c 	t[0].tv_sec = mtime;
t                 107 init/initramfs.c 	t[0].tv_nsec = 0;
t                 108 init/initramfs.c 	t[1].tv_sec = mtime;
t                 109 init/initramfs.c 	t[1].tv_nsec = 0;
t                 111 init/initramfs.c 	return do_utimes(AT_FDCWD, filename, t, AT_SYMLINK_NOFOLLOW);
t                 200 ipc/msg.c      	struct msg_sender *mss, *t;
t                 204 ipc/msg.c      	list_for_each_entry_safe(mss, t, h, list) {
t                 237 ipc/msg.c      	struct msg_receiver *msr, *t;
t                 239 ipc/msg.c      	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
t                 255 ipc/msg.c      	struct msg_msg *msg, *t;
t                 266 ipc/msg.c      	list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
t                 792 ipc/msg.c      	struct msg_receiver *msr, *t;
t                 794 ipc/msg.c      	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
t                 908 kernel/audit.c 	int		t     = done  ? NLMSG_DONE  : type;
t                 914 kernel/audit.c 	nlh	= nlmsg_put(skb, 0, seq, t, size, flags);
t                1447 kernel/audit.c 		unsigned int t;
t                1449 kernel/audit.c 		t = READ_ONCE(current->signal->audit_tty);
t                1450 kernel/audit.c 		s.enabled = t & AUDIT_TTY_ENABLE;
t                1451 kernel/audit.c 		s.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
t                1459 kernel/audit.c 		unsigned int t;
t                1470 kernel/audit.c 			t = READ_ONCE(current->signal->audit_tty);
t                1472 kernel/audit.c 			t = s.enabled | (-s.log_passwd & AUDIT_TTY_LOG_PASSWD);
t                1473 kernel/audit.c 			t = xchg(&current->signal->audit_tty, t);
t                1475 kernel/audit.c 		old.enabled = t & AUDIT_TTY_ENABLE;
t                1476 kernel/audit.c 		old.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
t                1726 kernel/audit.c 				   struct timespec64 *t, unsigned int *serial)
t                1728 kernel/audit.c 	if (!ctx || !auditsc_get_stamp(ctx, t, serial)) {
t                1729 kernel/audit.c 		ktime_get_coarse_real_ts64(t);
t                1753 kernel/audit.c 	struct timespec64 t;
t                1804 kernel/audit.c 	audit_get_stamp(ab->ctx, &t, &serial);
t                1806 kernel/audit.c 			 (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial);
t                2276 kernel/audit.c int audit_signal_info(int sig, struct task_struct *t)
t                2280 kernel/audit.c 	if (auditd_test_task(t) &&
t                2292 kernel/audit.c 	return audit_signal_info_syscall(t);
t                 253 kernel/audit.h 			      struct timespec64 *t, unsigned int *serial);
t                 289 kernel/audit.h extern int audit_signal_info_syscall(struct task_struct *t);
t                 294 kernel/audit.h #define auditsc_get_stamp(c, t, s) 0
t                 308 kernel/audit.h #define audit_exe_compare(t, m) (-EINVAL)
t                 320 kernel/audit.h static inline int audit_signal_info_syscall(struct task_struct *t)
t                 325 kernel/audit.h #define audit_filter_inodes(t, c) AUDIT_DISABLED
t                2170 kernel/auditsc.c 		       struct timespec64 *t, unsigned int *serial)
t                2176 kernel/auditsc.c 	t->tv_sec  = ctx->ctime.tv_sec;
t                2177 kernel/auditsc.c 	t->tv_nsec = ctx->ctime.tv_nsec;
t                2367 kernel/auditsc.c void __audit_ptrace(struct task_struct *t)
t                2371 kernel/auditsc.c 	context->target_pid = task_tgid_nr(t);
t                2372 kernel/auditsc.c 	context->target_auid = audit_get_loginuid(t);
t                2373 kernel/auditsc.c 	context->target_uid = task_uid(t);
t                2374 kernel/auditsc.c 	context->target_sessionid = audit_get_sessionid(t);
t                2375 kernel/auditsc.c 	security_task_getsecid(t, &context->target_sid);
t                2376 kernel/auditsc.c 	memcpy(context->target_comm, t->comm, TASK_COMM_LEN);
t                2386 kernel/auditsc.c int audit_signal_info_syscall(struct task_struct *t)
t                2390 kernel/auditsc.c 	kuid_t t_uid = task_uid(t);
t                2398 kernel/auditsc.c 		ctx->target_pid = task_tgid_nr(t);
t                2399 kernel/auditsc.c 		ctx->target_auid = audit_get_loginuid(t);
t                2401 kernel/auditsc.c 		ctx->target_sessionid = audit_get_sessionid(t);
t                2402 kernel/auditsc.c 		security_task_getsecid(t, &ctx->target_sid);
t                2403 kernel/auditsc.c 		memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN);
t                2419 kernel/auditsc.c 	axp->target_pid[axp->pid_count] = task_tgid_nr(t);
t                2420 kernel/auditsc.c 	axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
t                2422 kernel/auditsc.c 	axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
t                2423 kernel/auditsc.c 	security_task_getsecid(t, &axp->target_sid[axp->pid_count]);
t                2424 kernel/auditsc.c 	memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN);
t                  17 kernel/bpf/bpf_lru_list.c #define LOCAL_LIST_IDX(t)	((t) - BPF_LOCAL_LIST_T_OFFSET)
t                  20 kernel/bpf/bpf_lru_list.c #define IS_LOCAL_LIST_TYPE(t)	((t) >= BPF_LOCAL_LIST_T_OFFSET)
t                 223 kernel/bpf/btf.c 	const struct btf_type *t;
t                 281 kernel/bpf/btf.c 			  const struct btf_type *t,
t                 294 kernel/bpf/btf.c 			    const struct btf_type *t);
t                 295 kernel/bpf/btf.c 	void (*seq_show)(const struct btf *btf, const struct btf_type *t,
t                 304 kernel/bpf/btf.c 		       const struct btf_type *t, u32 type_id);
t                 306 kernel/bpf/btf.c static bool btf_type_is_modifier(const struct btf_type *t)
t                 318 kernel/bpf/btf.c 	switch (BTF_INFO_KIND(t->info)) {
t                 329 kernel/bpf/btf.c bool btf_type_is_void(const struct btf_type *t)
t                 331 kernel/bpf/btf.c 	return t == &btf_void;
t                 334 kernel/bpf/btf.c static bool btf_type_is_fwd(const struct btf_type *t)
t                 336 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
t                 339 kernel/bpf/btf.c static bool btf_type_is_func(const struct btf_type *t)
t                 341 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
t                 344 kernel/bpf/btf.c static bool btf_type_is_func_proto(const struct btf_type *t)
t                 346 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
t                 349 kernel/bpf/btf.c static bool btf_type_nosize(const struct btf_type *t)
t                 351 kernel/bpf/btf.c 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
t                 352 kernel/bpf/btf.c 	       btf_type_is_func(t) || btf_type_is_func_proto(t);
t                 355 kernel/bpf/btf.c static bool btf_type_nosize_or_null(const struct btf_type *t)
t                 357 kernel/bpf/btf.c 	return !t || btf_type_nosize(t);
t                 363 kernel/bpf/btf.c static bool btf_type_is_struct(const struct btf_type *t)
t                 365 kernel/bpf/btf.c 	u8 kind = BTF_INFO_KIND(t->info);
t                 370 kernel/bpf/btf.c static bool __btf_type_is_struct(const struct btf_type *t)
t                 372 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
t                 375 kernel/bpf/btf.c static bool btf_type_is_array(const struct btf_type *t)
t                 377 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
t                 380 kernel/bpf/btf.c static bool btf_type_is_ptr(const struct btf_type *t)
t                 382 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
t                 385 kernel/bpf/btf.c static bool btf_type_is_int(const struct btf_type *t)
t                 387 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
t                 390 kernel/bpf/btf.c static bool btf_type_is_var(const struct btf_type *t)
t                 392 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
t                 395 kernel/bpf/btf.c static bool btf_type_is_datasec(const struct btf_type *t)
t                 397 kernel/bpf/btf.c 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
t                 403 kernel/bpf/btf.c static bool btf_type_is_resolve_source_only(const struct btf_type *t)
t                 405 kernel/bpf/btf.c 	return btf_type_is_var(t) ||
t                 406 kernel/bpf/btf.c 	       btf_type_is_datasec(t);
t                 425 kernel/bpf/btf.c static bool btf_type_needs_resolve(const struct btf_type *t)
t                 427 kernel/bpf/btf.c 	return btf_type_is_modifier(t) ||
t                 428 kernel/bpf/btf.c 	       btf_type_is_ptr(t) ||
t                 429 kernel/bpf/btf.c 	       btf_type_is_struct(t) ||
t                 430 kernel/bpf/btf.c 	       btf_type_is_array(t) ||
t                 431 kernel/bpf/btf.c 	       btf_type_is_var(t) ||
t                 432 kernel/bpf/btf.c 	       btf_type_is_datasec(t);
t                 436 kernel/bpf/btf.c static bool btf_type_has_size(const struct btf_type *t)
t                 438 kernel/bpf/btf.c 	switch (BTF_INFO_KIND(t->info)) {
t                 464 kernel/bpf/btf.c static u16 btf_type_vlen(const struct btf_type *t)
t                 466 kernel/bpf/btf.c 	return BTF_INFO_VLEN(t->info);
t                 469 kernel/bpf/btf.c static bool btf_type_kflag(const struct btf_type *t)
t                 471 kernel/bpf/btf.c 	return BTF_INFO_KFLAG(t->info);
t                 488 kernel/bpf/btf.c static u32 btf_type_int(const struct btf_type *t)
t                 490 kernel/bpf/btf.c 	return *(u32 *)(t + 1);
t                 493 kernel/bpf/btf.c static const struct btf_array *btf_type_array(const struct btf_type *t)
t                 495 kernel/bpf/btf.c 	return (const struct btf_array *)(t + 1);
t                 498 kernel/bpf/btf.c static const struct btf_member *btf_type_member(const struct btf_type *t)
t                 500 kernel/bpf/btf.c 	return (const struct btf_member *)(t + 1);
t                 503 kernel/bpf/btf.c static const struct btf_enum *btf_type_enum(const struct btf_type *t)
t                 505 kernel/bpf/btf.c 	return (const struct btf_enum *)(t + 1);
t                 508 kernel/bpf/btf.c static const struct btf_var *btf_type_var(const struct btf_type *t)
t                 510 kernel/bpf/btf.c 	return (const struct btf_var *)(t + 1);
t                 513 kernel/bpf/btf.c static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
t                 515 kernel/bpf/btf.c 	return (const struct btf_var_secinfo *)(t + 1);
t                 518 kernel/bpf/btf.c static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
t                 520 kernel/bpf/btf.c 	return kind_ops[BTF_INFO_KIND(t->info)];
t                 604 kernel/bpf/btf.c static bool btf_type_int_is_regular(const struct btf_type *t)
t                 609 kernel/bpf/btf.c 	int_data = btf_type_int(t);
t                 631 kernel/bpf/btf.c 	const struct btf_type *t;
t                 636 kernel/bpf/btf.c 	t = btf_type_id_size(btf, &id, NULL);
t                 637 kernel/bpf/btf.c 	if (!t || !btf_type_is_int(t))
t                 640 kernel/bpf/btf.c 	int_data = btf_type_int(t);
t                 689 kernel/bpf/btf.c 						   const struct btf_type *t,
t                 694 kernel/bpf/btf.c 	u8 kind = BTF_INFO_KIND(t->info);
t                 704 kernel/bpf/btf.c 			   __btf_name_by_offset(btf, t->name_off),
t                 708 kernel/bpf/btf.c 		btf_type_ops(t)->log_details(env, t);
t                 720 kernel/bpf/btf.c #define btf_verifier_log_type(env, t, ...) \
t                 721 kernel/bpf/btf.c 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
t                 722 kernel/bpf/btf.c #define btf_verifier_log_basic(env, t, ...) \
t                 723 kernel/bpf/btf.c 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
t                 817 kernel/bpf/btf.c static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
t                 856 kernel/bpf/btf.c 	btf->types[++(btf->nr_types)] = t;
t                 996 kernel/bpf/btf.c 			  const struct btf_type *t, u32 type_id)
t                1009 kernel/bpf/btf.c 	v->t = t;
t                1014 kernel/bpf/btf.c 		if (btf_type_is_ptr(t))
t                1016 kernel/bpf/btf.c 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
t                1142 kernel/bpf/btf.c 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
t                1146 kernel/bpf/btf.c static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
t                1150 kernel/bpf/btf.c 	seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
t                1248 kernel/bpf/btf.c 			      const struct btf_type *t,
t                1255 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                1261 kernel/bpf/btf.c 	if (btf_type_vlen(t)) {
t                1262 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen != 0");
t                1266 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                1267 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                1271 kernel/bpf/btf.c 	int_data = btf_type_int(t);
t                1273 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
t                1281 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
t                1286 kernel/bpf/btf.c 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
t                1287 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
t                1302 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Unsupported encoding");
t                1306 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                1312 kernel/bpf/btf.c 			const struct btf_type *t)
t                1314 kernel/bpf/btf.c 	int int_data = btf_type_int(t);
t                1318 kernel/bpf/btf.c 			 t->size, BTF_INT_OFFSET(int_data),
t                1414 kernel/bpf/btf.c 				  const struct btf_type *t,
t                1418 kernel/bpf/btf.c 	u32 int_data = btf_type_int(t);
t                1432 kernel/bpf/btf.c static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
t                1436 kernel/bpf/btf.c 	u32 int_data = btf_type_int(t);
t                1443 kernel/bpf/btf.c 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
t                1476 kernel/bpf/btf.c 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
t                1566 kernel/bpf/btf.c 				   const struct btf_type *t,
t                1569 kernel/bpf/btf.c 	if (btf_type_vlen(t)) {
t                1570 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen != 0");
t                1574 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                1575 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                1579 kernel/bpf/btf.c 	if (!BTF_TYPE_ID_VALID(t->type)) {
t                1580 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid type_id");
t                1587 kernel/bpf/btf.c 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
t                1588 kernel/bpf/btf.c 		if (!t->name_off ||
t                1589 kernel/bpf/btf.c 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
t                1590 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid name");
t                1594 kernel/bpf/btf.c 		if (t->name_off) {
t                1595 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid name");
t                1600 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                1608 kernel/bpf/btf.c 	const struct btf_type *t = v->t;
t                1610 kernel/bpf/btf.c 	u32 next_type_id = t->type;
t                1615 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid type_id");
t                1637 kernel/bpf/btf.c 			btf_verifier_log_type(env, v->t, "Invalid type_id");
t                1651 kernel/bpf/btf.c 	const struct btf_type *t = v->t;
t                1652 kernel/bpf/btf.c 	u32 next_type_id = t->type;
t                1657 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid type_id");
t                1684 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid type_id");
t                1697 kernel/bpf/btf.c 	const struct btf_type *t = v->t;
t                1698 kernel/bpf/btf.c 	u32 next_type_id = t->type;
t                1703 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid type_id");
t                1740 kernel/bpf/btf.c 			btf_verifier_log_type(env, v->t, "Invalid type_id");
t                1751 kernel/bpf/btf.c 				  const struct btf_type *t,
t                1755 kernel/bpf/btf.c 	t = btf_type_id_resolve(btf, &type_id);
t                1757 kernel/bpf/btf.c 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
t                1760 kernel/bpf/btf.c static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
t                1764 kernel/bpf/btf.c 	t = btf_type_id_resolve(btf, &type_id);
t                1766 kernel/bpf/btf.c 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
t                1769 kernel/bpf/btf.c static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
t                1778 kernel/bpf/btf.c 			     const struct btf_type *t)
t                1780 kernel/bpf/btf.c 	btf_verifier_log(env, "type_id=%u", t->type);
t                1802 kernel/bpf/btf.c 			      const struct btf_type *t,
t                1805 kernel/bpf/btf.c 	if (btf_type_vlen(t)) {
t                1806 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen != 0");
t                1810 kernel/bpf/btf.c 	if (t->type) {
t                1811 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "type != 0");
t                1816 kernel/bpf/btf.c 	if (!t->name_off ||
t                1817 kernel/bpf/btf.c 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
t                1818 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                1822 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                1828 kernel/bpf/btf.c 			     const struct btf_type *t)
t                1830 kernel/bpf/btf.c 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
t                1872 kernel/bpf/btf.c 				const struct btf_type *t,
t                1875 kernel/bpf/btf.c 	const struct btf_array *array = btf_type_array(t);
t                1879 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                1886 kernel/bpf/btf.c 	if (t->name_off) {
t                1887 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                1891 kernel/bpf/btf.c 	if (btf_type_vlen(t)) {
t                1892 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen != 0");
t                1896 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                1897 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                1901 kernel/bpf/btf.c 	if (t->size) {
t                1902 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "size != 0");
t                1910 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid elem");
t                1915 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid index");
t                1919 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                1927 kernel/bpf/btf.c 	const struct btf_array *array = btf_type_array(v->t);
t                1938 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid index");
t                1949 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid index");
t                1958 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t,
t                1969 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid elem");
t                1974 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t, "Invalid array of int");
t                1979 kernel/bpf/btf.c 		btf_verifier_log_type(env, v->t,
t                1990 kernel/bpf/btf.c 			  const struct btf_type *t)
t                1992 kernel/bpf/btf.c 	const struct btf_array *array = btf_type_array(t);
t                1998 kernel/bpf/btf.c static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
t                2002 kernel/bpf/btf.c 	const struct btf_array *array = btf_type_array(t);
t                2057 kernel/bpf/btf.c 				 const struct btf_type *t,
t                2060 kernel/bpf/btf.c 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
t                2064 kernel/bpf/btf.c 	u32 struct_size = t->size;
t                2068 kernel/bpf/btf.c 	meta_needed = btf_type_vlen(t) * sizeof(*member);
t                2070 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                2077 kernel/bpf/btf.c 	if (t->name_off &&
t                2078 kernel/bpf/btf.c 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
t                2079 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                2083 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                2086 kernel/bpf/btf.c 	for_each_member(i, t, member) {
t                2088 kernel/bpf/btf.c 			btf_verifier_log_member(env, t, member,
t                2097 kernel/bpf/btf.c 			btf_verifier_log_member(env, t, member, "Invalid name");
t                2102 kernel/bpf/btf.c 			btf_verifier_log_member(env, t, member,
t                2107 kernel/bpf/btf.c 		offset = btf_member_bit_offset(t, member);
t                2109 kernel/bpf/btf.c 			btf_verifier_log_member(env, t, member,
t                2119 kernel/bpf/btf.c 			btf_verifier_log_member(env, t, member,
t                2125 kernel/bpf/btf.c 			btf_verifier_log_member(env, t, member,
t                2130 kernel/bpf/btf.c 		btf_verifier_log_member(env, t, member, NULL);
t                2153 kernel/bpf/btf.c 		last_member = btf_type_member(v->t) + v->next_member - 1;
t                2161 kernel/bpf/btf.c 		if (btf_type_kflag(v->t))
t                2162 kernel/bpf/btf.c 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
t                2166 kernel/bpf/btf.c 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
t                2173 kernel/bpf/btf.c 	for_each_member_from(i, v->next_member, v->t, member) {
t                2180 kernel/bpf/btf.c 			btf_verifier_log_member(env, v->t, member,
t                2191 kernel/bpf/btf.c 		if (btf_type_kflag(v->t))
t                2192 kernel/bpf/btf.c 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
t                2196 kernel/bpf/btf.c 			err = btf_type_ops(member_type)->check_member(env, v->t,
t                2209 kernel/bpf/btf.c 			   const struct btf_type *t)
t                2211 kernel/bpf/btf.c 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
t                2218 kernel/bpf/btf.c int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
t                2223 kernel/bpf/btf.c 	if (!__btf_type_is_struct(t))
t                2226 kernel/bpf/btf.c 	for_each_member(i, t, member) {
t                2239 kernel/bpf/btf.c 		off = btf_member_bit_offset(t, member);
t                2251 kernel/bpf/btf.c static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
t                2255 kernel/bpf/btf.c 	const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
t                2260 kernel/bpf/btf.c 	for_each_member(i, t, member) {
t                2271 kernel/bpf/btf.c 		member_offset = btf_member_bit_offset(t, member);
t                2272 kernel/bpf/btf.c 		bitfield_size = btf_member_bitfield_size(t, member);
t                2357 kernel/bpf/btf.c 			       const struct btf_type *t,
t                2360 kernel/bpf/btf.c 	const struct btf_enum *enums = btf_type_enum(t);
t                2365 kernel/bpf/btf.c 	nr_enums = btf_type_vlen(t);
t                2369 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                2375 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                2376 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                2380 kernel/bpf/btf.c 	if (t->size > 8 || !is_power_of_2(t->size)) {
t                2381 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Unexpected size");
t                2386 kernel/bpf/btf.c 	if (t->name_off &&
t                2387 kernel/bpf/btf.c 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
t                2388 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                2392 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                2404 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid name");
t                2418 kernel/bpf/btf.c 			 const struct btf_type *t)
t                2420 kernel/bpf/btf.c 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
t                2423 kernel/bpf/btf.c static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
t                2427 kernel/bpf/btf.c 	const struct btf_enum *enums = btf_type_enum(t);
t                2428 kernel/bpf/btf.c 	u32 i, nr_enums = btf_type_vlen(t);
t                2453 kernel/bpf/btf.c 				     const struct btf_type *t,
t                2456 kernel/bpf/btf.c 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
t                2459 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                2465 kernel/bpf/btf.c 	if (t->name_off) {
t                2466 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                2470 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                2471 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                2475 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                2481 kernel/bpf/btf.c 			       const struct btf_type *t)
t                2483 kernel/bpf/btf.c 	const struct btf_param *args = (const struct btf_param *)(t + 1);
t                2484 kernel/bpf/btf.c 	u16 nr_args = btf_type_vlen(t), i;
t                2486 kernel/bpf/btf.c 	btf_verifier_log(env, "return=%u args=(", t->type);
t                2540 kernel/bpf/btf.c 			       const struct btf_type *t,
t                2543 kernel/bpf/btf.c 	if (!t->name_off ||
t                2544 kernel/bpf/btf.c 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
t                2545 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                2549 kernel/bpf/btf.c 	if (btf_type_vlen(t)) {
t                2550 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen != 0");
t                2554 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                2555 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                2559 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                2574 kernel/bpf/btf.c 			      const struct btf_type *t,
t                2581 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                2587 kernel/bpf/btf.c 	if (btf_type_vlen(t)) {
t                2588 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen != 0");
t                2592 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                2593 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                2597 kernel/bpf/btf.c 	if (!t->name_off ||
t                2598 kernel/bpf/btf.c 	    !__btf_name_valid(env->btf, t->name_off, true)) {
t                2599 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                2604 kernel/bpf/btf.c 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
t                2605 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid type_id");
t                2609 kernel/bpf/btf.c 	var = btf_type_var(t);
t                2612 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Linkage not supported");
t                2616 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                2621 kernel/bpf/btf.c static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
t                2623 kernel/bpf/btf.c 	const struct btf_var *var = btf_type_var(t);
t                2625 kernel/bpf/btf.c 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
t                2638 kernel/bpf/btf.c 				  const struct btf_type *t,
t                2645 kernel/bpf/btf.c 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
t                2647 kernel/bpf/btf.c 		btf_verifier_log_basic(env, t,
t                2653 kernel/bpf/btf.c 	if (!btf_type_vlen(t)) {
t                2654 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "vlen == 0");
t                2658 kernel/bpf/btf.c 	if (!t->size) {
t                2659 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "size == 0");
t                2663 kernel/bpf/btf.c 	if (btf_type_kflag(t)) {
t                2664 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
t                2668 kernel/bpf/btf.c 	if (!t->name_off ||
t                2669 kernel/bpf/btf.c 	    !btf_name_valid_section(env->btf, t->name_off)) {
t                2670 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid name");
t                2674 kernel/bpf/btf.c 	btf_verifier_log_type(env, t, NULL);
t                2676 kernel/bpf/btf.c 	for_each_vsi(i, t, vsi) {
t                2679 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, t, vsi,
t                2684 kernel/bpf/btf.c 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
t                2685 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, t, vsi,
t                2690 kernel/bpf/btf.c 		if (!vsi->size || vsi->size > t->size) {
t                2691 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, t, vsi,
t                2697 kernel/bpf/btf.c 		if (last_vsi_end_off > t->size) {
t                2698 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, t, vsi,
t                2703 kernel/bpf/btf.c 		btf_verifier_log_vsi(env, t, vsi, NULL);
t                2707 kernel/bpf/btf.c 	if (t->size < sum) {
t                2708 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid btf_info size");
t                2722 kernel/bpf/btf.c 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
t                2727 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, v->t, vsi,
t                2740 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
t                2745 kernel/bpf/btf.c 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
t                2755 kernel/bpf/btf.c 			    const struct btf_type *t)
t                2757 kernel/bpf/btf.c 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
t                2761 kernel/bpf/btf.c 				 const struct btf_type *t, u32 type_id,
t                2769 kernel/bpf/btf.c 	seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
t                2770 kernel/bpf/btf.c 	for_each_vsi(i, t, vsi) {
t                2790 kernel/bpf/btf.c 				const struct btf_type *t)
t                2799 kernel/bpf/btf.c 	args = (const struct btf_param *)(t + 1);
t                2800 kernel/bpf/btf.c 	nr_args = btf_type_vlen(t);
t                2803 kernel/bpf/btf.c 	if (t->type) {
t                2804 kernel/bpf/btf.c 		u32 ret_type_id = t->type;
t                2808 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid return type");
t                2821 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid return type");
t                2832 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid arg#%u",
t                2847 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
t                2855 kernel/bpf/btf.c 			btf_verifier_log_type(env, t,
t                2869 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
t                2879 kernel/bpf/btf.c 			  const struct btf_type *t)
t                2887 kernel/bpf/btf.c 	proto_type = btf_type_by_id(btf, t->type);
t                2890 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid type_id");
t                2898 kernel/bpf/btf.c 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
t                2925 kernel/bpf/btf.c 			  const struct btf_type *t,
t                2931 kernel/bpf/btf.c 	if (meta_left < sizeof(*t)) {
t                2933 kernel/bpf/btf.c 				 env->log_type_id, meta_left, sizeof(*t));
t                2936 kernel/bpf/btf.c 	meta_left -= sizeof(*t);
t                2938 kernel/bpf/btf.c 	if (t->info & ~BTF_INFO_MASK) {
t                2940 kernel/bpf/btf.c 				 env->log_type_id, t->info);
t                2944 kernel/bpf/btf.c 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
t                2945 kernel/bpf/btf.c 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
t                2947 kernel/bpf/btf.c 				 env->log_type_id, BTF_INFO_KIND(t->info));
t                2951 kernel/bpf/btf.c 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
t                2953 kernel/bpf/btf.c 				 env->log_type_id, t->name_off);
t                2957 kernel/bpf/btf.c 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
t                2978 kernel/bpf/btf.c 		struct btf_type *t = cur;
t                2981 kernel/bpf/btf.c 		meta_size = btf_check_meta(env, t, end - cur);
t                2985 kernel/bpf/btf.c 		btf_add_type(env, t);
t                2994 kernel/bpf/btf.c 			      const struct btf_type *t,
t                3002 kernel/bpf/btf.c 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
t                3006 kernel/bpf/btf.c 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
t                3007 kernel/bpf/btf.c 	    btf_type_is_var(t)) {
t                3008 kernel/bpf/btf.c 		t = btf_type_id_resolve(btf, &type_id);
t                3009 kernel/bpf/btf.c 		return t &&
t                3010 kernel/bpf/btf.c 		       !btf_type_is_modifier(t) &&
t                3011 kernel/bpf/btf.c 		       !btf_type_is_var(t) &&
t                3012 kernel/bpf/btf.c 		       !btf_type_is_datasec(t);
t                3015 kernel/bpf/btf.c 	if (btf_type_is_array(t)) {
t                3016 kernel/bpf/btf.c 		const struct btf_array *array = btf_type_array(t);
t                3031 kernel/bpf/btf.c 		       const struct btf_type *t, u32 type_id)
t                3038 kernel/bpf/btf.c 	env_stack_push(env, t, type_id);
t                3041 kernel/bpf/btf.c 		err = btf_type_ops(v->t)->resolve(env, v);
t                3046 kernel/bpf/btf.c 		btf_verifier_log_type(env, t,
t                3050 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Loop detected");
t                3054 kernel/bpf/btf.c 	if (!err && !btf_resolve_valid(env, t, type_id)) {
t                3055 kernel/bpf/btf.c 		btf_verifier_log_type(env, t, "Invalid resolve state");
t                3075 kernel/bpf/btf.c 		const struct btf_type *t = btf_type_by_id(btf, type_id);
t                3078 kernel/bpf/btf.c 		if (btf_type_needs_resolve(t) &&
t                3080 kernel/bpf/btf.c 			err = btf_resolve(env, t, type_id);
t                3085 kernel/bpf/btf.c 		if (btf_type_is_func_proto(t)) {
t                3086 kernel/bpf/btf.c 			err = btf_func_proto_check(env, t);
t                3091 kernel/bpf/btf.c 		if (btf_type_is_func(t)) {
t                3092 kernel/bpf/btf.c 			err = btf_func_check(env, t);
t                3373 kernel/bpf/btf.c 	const struct btf_type *t = btf_type_by_id(btf, type_id);
t                3375 kernel/bpf/btf.c 	btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
t                 436 kernel/bpf/verifier.c 	enum bpf_reg_type t;
t                 443 kernel/bpf/verifier.c 		t = reg->type;
t                 444 kernel/bpf/verifier.c 		if (t == NOT_INIT)
t                 448 kernel/bpf/verifier.c 		verbose(env, "=%s", reg_type_str[t]);
t                 449 kernel/bpf/verifier.c 		if (t == SCALAR_VALUE && reg->precise)
t                 451 kernel/bpf/verifier.c 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
t                 457 kernel/bpf/verifier.c 			if (reg_type_may_be_refcounted_or_null(t))
t                 459 kernel/bpf/verifier.c 			if (t != SCALAR_VALUE)
t                 461 kernel/bpf/verifier.c 			if (type_is_pkt_pointer(t))
t                 463 kernel/bpf/verifier.c 			else if (t == CONST_PTR_TO_MAP ||
t                 464 kernel/bpf/verifier.c 				 t == PTR_TO_MAP_VALUE ||
t                 465 kernel/bpf/verifier.c 				 t == PTR_TO_MAP_VALUE_OR_NULL)
t                 518 kernel/bpf/verifier.c 			t = reg->type;
t                 519 kernel/bpf/verifier.c 			verbose(env, "=%s", reg_type_str[t]);
t                 520 kernel/bpf/verifier.c 			if (t == SCALAR_VALUE && reg->precise)
t                 522 kernel/bpf/verifier.c 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
t                1248 kernel/bpf/verifier.c 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
t                1272 kernel/bpf/verifier.c 			if (t == SRC_OP)
t                1288 kernel/bpf/verifier.c 		if (t != SRC_OP)
t                1308 kernel/bpf/verifier.c 		if (t != SRC_OP)
t                1359 kernel/bpf/verifier.c 			 enum reg_arg_type t)
t                1373 kernel/bpf/verifier.c 	rw64 = is_reg64(env, insn, regno, reg, t);
t                1374 kernel/bpf/verifier.c 	if (t == SRC_OP) {
t                1397 kernel/bpf/verifier.c 		if (t == DST_OP)
t                2259 kernel/bpf/verifier.c 				       enum bpf_access_type t)
t                2269 kernel/bpf/verifier.c 		if (t == BPF_WRITE)
t                2287 kernel/bpf/verifier.c 		if (t == BPF_WRITE)
t                2353 kernel/bpf/verifier.c 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
t                2360 kernel/bpf/verifier.c 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
t                2395 kernel/bpf/verifier.c 			     enum bpf_access_type t)
t                2410 kernel/bpf/verifier.c 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
t                2413 kernel/bpf/verifier.c 		valid = bpf_sock_is_valid_access(off, size, t, &info);
t                2416 kernel/bpf/verifier.c 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
t                2419 kernel/bpf/verifier.c 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
t                2759 kernel/bpf/verifier.c 			    int off, int bpf_size, enum bpf_access_type t,
t                2780 kernel/bpf/verifier.c 		if (t == BPF_WRITE && value_regno >= 0 &&
t                2785 kernel/bpf/verifier.c 		err = check_map_access_type(env, regno, off, size, t);
t                2789 kernel/bpf/verifier.c 		if (!err && t == BPF_READ && value_regno >= 0)
t                2795 kernel/bpf/verifier.c 		if (t == BPF_WRITE && value_regno >= 0 &&
t                2805 kernel/bpf/verifier.c 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
t                2806 kernel/bpf/verifier.c 		if (!err && t == BPF_READ && value_regno >= 0) {
t                2839 kernel/bpf/verifier.c 		if (t == BPF_WRITE)
t                2846 kernel/bpf/verifier.c 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
t                2850 kernel/bpf/verifier.c 		if (t == BPF_WRITE && value_regno >= 0 &&
t                2857 kernel/bpf/verifier.c 		if (!err && t == BPF_READ && value_regno >= 0)
t                2860 kernel/bpf/verifier.c 		if (t == BPF_WRITE && value_regno >= 0 &&
t                2868 kernel/bpf/verifier.c 		if (!err && t == BPF_READ && value_regno >= 0)
t                2871 kernel/bpf/verifier.c 		if (t == BPF_WRITE) {
t                2876 kernel/bpf/verifier.c 		err = check_sock_access(env, insn_idx, regno, off, size, t);
t                2881 kernel/bpf/verifier.c 		if (!err && t == BPF_READ && value_regno >= 0)
t                2889 kernel/bpf/verifier.c 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
t                5382 kernel/bpf/verifier.c 		struct tnum t = tnum_range(0, bound);
t                5384 kernel/bpf/verifier.c 		t.mask |= ~0xffffffffULL; /* upper half is unknown */
t                5385 kernel/bpf/verifier.c 		reg->var_off = tnum_intersect(reg->var_off, t);
t                5414 kernel/bpf/verifier.c 		struct tnum t = tnum_range(bound, U32_MAX);
t                5416 kernel/bpf/verifier.c 		t.mask |= ~0xffffffffULL; /* upper half is unknown */
t                5417 kernel/bpf/verifier.c 		reg->var_off = tnum_intersect(reg->var_off, t);
t                6306 kernel/bpf/verifier.c static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
t                6312 kernel/bpf/verifier.c 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
t                6315 kernel/bpf/verifier.c 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
t                6319 kernel/bpf/verifier.c 		verbose_linfo(env, t, "%d: ", t);
t                6320 kernel/bpf/verifier.c 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
t                6330 kernel/bpf/verifier.c 		insn_state[t] = DISCOVERED | e;
t                6339 kernel/bpf/verifier.c 		verbose_linfo(env, t, "%d: ", t);
t                6341 kernel/bpf/verifier.c 		verbose(env, "back-edge from insn %d to %d\n", t, w);
t                6345 kernel/bpf/verifier.c 		insn_state[t] = DISCOVERED | e;
t                6362 kernel/bpf/verifier.c 	int i, t;
t                6381 kernel/bpf/verifier.c 	t = insn_stack[env->cfg.cur_stack - 1];
t                6383 kernel/bpf/verifier.c 	if (BPF_CLASS(insns[t].code) == BPF_JMP ||
t                6384 kernel/bpf/verifier.c 	    BPF_CLASS(insns[t].code) == BPF_JMP32) {
t                6385 kernel/bpf/verifier.c 		u8 opcode = BPF_OP(insns[t].code);
t                6390 kernel/bpf/verifier.c 			ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
t                6395 kernel/bpf/verifier.c 			if (t + 1 < insn_cnt)
t                6396 kernel/bpf/verifier.c 				init_explored_state(env, t + 1);
t                6397 kernel/bpf/verifier.c 			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
t                6398 kernel/bpf/verifier.c 				init_explored_state(env, t);
t                6399 kernel/bpf/verifier.c 				ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
t                6407 kernel/bpf/verifier.c 			if (BPF_SRC(insns[t].code) != BPF_K) {
t                6412 kernel/bpf/verifier.c 			ret = push_insn(t, t + insns[t].off + 1,
t                6422 kernel/bpf/verifier.c 			init_explored_state(env, t + insns[t].off + 1);
t                6426 kernel/bpf/verifier.c 			if (t + 1 < insn_cnt)
t                6427 kernel/bpf/verifier.c 				init_explored_state(env, t + 1);
t                6430 kernel/bpf/verifier.c 			init_explored_state(env, t);
t                6431 kernel/bpf/verifier.c 			ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
t                6437 kernel/bpf/verifier.c 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
t                6447 kernel/bpf/verifier.c 		ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
t                6455 kernel/bpf/verifier.c 	insn_state[t] = EXPLORED;
t                 294 kernel/capability.c bool has_ns_capability(struct task_struct *t,
t                 300 kernel/capability.c 	ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NONE);
t                 316 kernel/capability.c bool has_capability(struct task_struct *t, int cap)
t                 318 kernel/capability.c 	return has_ns_capability(t, &init_user_ns, cap);
t                 335 kernel/capability.c bool has_ns_capability_noaudit(struct task_struct *t,
t                 341 kernel/capability.c 	ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NOAUDIT);
t                 359 kernel/capability.c bool has_capability_noaudit(struct task_struct *t, int cap)
t                 361 kernel/capability.c 	return has_ns_capability_noaudit(t, &init_user_ns, cap);
t                 828 kernel/cpu.c   		struct task_struct *t;
t                 834 kernel/cpu.c   		t = find_lock_task_mm(p);
t                 835 kernel/cpu.c   		if (!t)
t                 837 kernel/cpu.c   		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
t                 838 kernel/cpu.c   		task_unlock(t);
t                 712 kernel/events/core.c 	struct perf_cgroup_info *t;
t                 714 kernel/events/core.c 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
t                 715 kernel/events/core.c 	return t->time;
t                 938 kernel/events/core.c 	struct perf_cgroup_info *t;
t                 939 kernel/events/core.c 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
t                 940 kernel/events/core.c 	event->shadow_ctx_time = now - t->timestamp;
t                1305 kernel/events/uprobes.c 	struct rb_node *n, *t;
t                1315 kernel/events/uprobes.c 		for (t = n; t; t = rb_prev(t)) {
t                1316 kernel/events/uprobes.c 			u = rb_entry(t, struct uprobe, rb_node);
t                1322 kernel/events/uprobes.c 		for (t = n; (t = rb_next(t)); ) {
t                1323 kernel/events/uprobes.c 			u = rb_entry(t, struct uprobe, rb_node);
t                1723 kernel/events/uprobes.c void uprobe_free_utask(struct task_struct *t)
t                1725 kernel/events/uprobes.c 	struct uprobe_task *utask = t->utask;
t                1738 kernel/events/uprobes.c 	xol_free_insn_slot(t);
t                1740 kernel/events/uprobes.c 	t->utask = NULL;
t                1758 kernel/events/uprobes.c static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
t                1766 kernel/events/uprobes.c 	t->utask = n_utask;
t                1786 kernel/events/uprobes.c static void uprobe_warn(struct task_struct *t, const char *msg)
t                1805 kernel/events/uprobes.c void uprobe_copy_process(struct task_struct *t, unsigned long flags)
t                1811 kernel/events/uprobes.c 	t->utask = NULL;
t                1816 kernel/events/uprobes.c 	if (mm == t->mm && !(flags & CLONE_VFORK))
t                1819 kernel/events/uprobes.c 	if (dup_utask(t, utask))
t                1820 kernel/events/uprobes.c 		return uprobe_warn(t, "dup ret instances");
t                1825 kernel/events/uprobes.c 		return uprobe_warn(t, "dup xol area");
t                1827 kernel/events/uprobes.c 	if (mm == t->mm)
t                1830 kernel/events/uprobes.c 	t->utask->dup_xol_addr = area->vaddr;
t                1831 kernel/events/uprobes.c 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
t                1832 kernel/events/uprobes.c 	task_work_add(t, &t->utask->dup_xol_work, true);
t                1974 kernel/events/uprobes.c 	struct task_struct *t = current;
t                1975 kernel/events/uprobes.c 	struct uprobe_task *utask = t->utask;
t                1982 kernel/events/uprobes.c 	if (signal_pending(t)) {
t                1983 kernel/events/uprobes.c 		spin_lock_irq(&t->sighand->siglock);
t                1984 kernel/events/uprobes.c 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
t                1985 kernel/events/uprobes.c 		spin_unlock_irq(&t->sighand->siglock);
t                1987 kernel/events/uprobes.c 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
t                1989 kernel/events/uprobes.c 			set_tsk_thread_flag(t, TIF_UPROBE);
t                 492 kernel/exit.c  	struct task_struct *t;
t                 494 kernel/exit.c  	for_each_thread(p, t) {
t                 495 kernel/exit.c  		if (!(t->flags & PF_EXITING))
t                 496 kernel/exit.c  			return t;
t                 609 kernel/exit.c  	struct task_struct *p, *t, *reaper;
t                 621 kernel/exit.c  		for_each_thread(p, t) {
t                 622 kernel/exit.c  			t->real_parent = reaper;
t                 623 kernel/exit.c  			BUG_ON((!t->ptrace) != (t->parent == father));
t                 624 kernel/exit.c  			if (likely(!t->ptrace))
t                 625 kernel/exit.c  				t->parent = t->real_parent;
t                 626 kernel/exit.c  			if (t->pdeath_signal)
t                 627 kernel/exit.c  				group_send_sig_info(t->pdeath_signal,
t                 628 kernel/exit.c  						    SEND_SIG_NOINFO, t,
t                2957 kernel/fork.c  	struct ctl_table t;
t                2963 kernel/fork.c  	t = *table;
t                2964 kernel/fork.c  	t.data = &threads;
t                2965 kernel/fork.c  	t.extra1 = &min;
t                2966 kernel/fork.c  	t.extra2 = &max;
t                2968 kernel/fork.c  	ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
t                2871 kernel/futex.c 	ktime_t t, *tp = NULL;
t                2874 kernel/futex.c 		t = restart->futex.time;
t                2875 kernel/futex.c 		tp = &t;
t                3932 kernel/futex.c 	ktime_t t, *tp = NULL;
t                3946 kernel/futex.c 		t = timespec64_to_ktime(ts);
t                3948 kernel/futex.c 			t = ktime_add_safe(ktime_get(), t);
t                3949 kernel/futex.c 		tp = &t;
t                4126 kernel/futex.c 	ktime_t t, *tp = NULL;
t                4138 kernel/futex.c 		t = timespec64_to_ktime(ts);
t                4140 kernel/futex.c 			t = ktime_add_safe(ktime_get(), t);
t                4141 kernel/futex.c 		tp = &t;
t                  88 kernel/hung_task.c static void check_hung_task(struct task_struct *t, unsigned long timeout)
t                  90 kernel/hung_task.c 	unsigned long switch_count = t->nvcsw + t->nivcsw;
t                  96 kernel/hung_task.c 	if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
t                 107 kernel/hung_task.c 	if (switch_count != t->last_switch_count) {
t                 108 kernel/hung_task.c 		t->last_switch_count = switch_count;
t                 109 kernel/hung_task.c 		t->last_switch_time = jiffies;
t                 112 kernel/hung_task.c 	if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
t                 115 kernel/hung_task.c 	trace_sched_process_hang(t);
t                 131 kernel/hung_task.c 		       t->comm, t->pid, (jiffies - t->last_switch_time) / HZ);
t                 138 kernel/hung_task.c 		sched_show_task(t);
t                 152 kernel/hung_task.c static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
t                 157 kernel/hung_task.c 	get_task_struct(t);
t                 161 kernel/hung_task.c 	can_cont = pid_alive(g) && pid_alive(t);
t                 162 kernel/hung_task.c 	put_task_struct(t);
t                 177 kernel/hung_task.c 	struct task_struct *g, *t;
t                 188 kernel/hung_task.c 	for_each_process_thread(g, t) {
t                 192 kernel/hung_task.c 			if (!rcu_lock_break(g, t))
t                 197 kernel/hung_task.c 		if (t->state == TASK_UNINTERRUPTIBLE)
t                 198 kernel/hung_task.c 			check_hung_task(t, timeout);
t                 280 kernel/hung_task.c 		long t;
t                 285 kernel/hung_task.c 		t = hung_timeout_jiffies(hung_last_checked, interval);
t                 286 kernel/hung_task.c 		if (t <= 0) {
t                 293 kernel/hung_task.c 		schedule_timeout_interruptible(t);
t                1227 kernel/irq/manage.c 	struct task_struct *t;
t                1233 kernel/irq/manage.c 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
t                1236 kernel/irq/manage.c 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
t                1241 kernel/irq/manage.c 	if (IS_ERR(t))
t                1242 kernel/irq/manage.c 		return PTR_ERR(t);
t                1244 kernel/irq/manage.c 	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
t                1251 kernel/irq/manage.c 	new->thread = get_task_struct(t);
t                1629 kernel/irq/manage.c 		struct task_struct *t = new->thread;
t                1632 kernel/irq/manage.c 		kthread_stop(t);
t                1633 kernel/irq/manage.c 		put_task_struct(t);
t                1636 kernel/irq/manage.c 		struct task_struct *t = new->secondary->thread;
t                1639 kernel/irq/manage.c 		kthread_stop(t);
t                1640 kernel/irq/manage.c 		put_task_struct(t);
t                  57 kernel/kcov.c  	struct task_struct	*t;
t                  60 kernel/kcov.c  static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
t                  70 kernel/kcov.c  	mode = READ_ONCE(t->kcov_mode);
t                  96 kernel/kcov.c  	struct task_struct *t;
t                 101 kernel/kcov.c  	t = current;
t                 102 kernel/kcov.c  	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
t                 105 kernel/kcov.c  	area = t->kcov_area;
t                 108 kernel/kcov.c  	if (likely(pos < t->kcov_size)) {
t                 118 kernel/kcov.c  	struct task_struct *t;
t                 122 kernel/kcov.c  	t = current;
t                 123 kernel/kcov.c  	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
t                 132 kernel/kcov.c  	area = (u64 *)t->kcov_area;
t                 133 kernel/kcov.c  	max_pos = t->kcov_size * sizeof(unsigned long);
t                 243 kernel/kcov.c  void kcov_task_init(struct task_struct *t)
t                 245 kernel/kcov.c  	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
t                 247 kernel/kcov.c  	t->kcov_size = 0;
t                 248 kernel/kcov.c  	t->kcov_area = NULL;
t                 249 kernel/kcov.c  	t->kcov = NULL;
t                 252 kernel/kcov.c  void kcov_task_exit(struct task_struct *t)
t                 256 kernel/kcov.c  	kcov = t->kcov;
t                 260 kernel/kcov.c  	if (WARN_ON(kcov->t != t)) {
t                 265 kernel/kcov.c  	kcov_task_init(t);
t                 266 kernel/kcov.c  	kcov->t = NULL;
t                 346 kernel/kcov.c  	struct task_struct *t;
t                 378 kernel/kcov.c  		t = current;
t                 379 kernel/kcov.c  		if (kcov->t != NULL || t->kcov != NULL)
t                 393 kernel/kcov.c  		t->kcov_size = kcov->size;
t                 394 kernel/kcov.c  		t->kcov_area = kcov->area;
t                 397 kernel/kcov.c  		WRITE_ONCE(t->kcov_mode, kcov->mode);
t                 398 kernel/kcov.c  		t->kcov = kcov;
t                 399 kernel/kcov.c  		kcov->t = t;
t                 408 kernel/kcov.c  		t = current;
t                 409 kernel/kcov.c  		if (WARN_ON(kcov->t != t))
t                 411 kernel/kcov.c  		kcov_task_init(t);
t                 412 kernel/kcov.c  		kcov->t = NULL;
t                 842 kernel/kthread.c void kthread_delayed_work_timer_fn(struct timer_list *t)
t                 844 kernel/kthread.c 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
t                 401 kernel/panic.c 			const struct taint_flag *t = &taint_flags[i];
t                 403 kernel/panic.c 					t->c_true : t->c_false;
t                 433 kernel/rcu/rcu.h static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
t                 440 kernel/rcu/rcu.h void rcu_request_urgent_qs_task(struct task_struct *t);
t                 365 kernel/rcu/rcuperf.c 	u64 t;
t                 386 kernel/rcu/rcuperf.c 	t = ktime_get_mono_fast_ns();
t                 388 kernel/rcu/rcuperf.c 		t_rcu_perf_writer_started = t;
t                 426 kernel/rcu/rcuperf.c 		t = ktime_get_mono_fast_ns();
t                 427 kernel/rcu/rcuperf.c 		*wdp = t - *wdp;
t                 444 kernel/rcu/rcuperf.c 				t_rcu_perf_writer_finished = t;
t                1360 kernel/rcu/rcutorture.c 	struct timer_list t;
t                1365 kernel/rcu/rcutorture.c 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
t                1369 kernel/rcu/rcutorture.c 			if (!timer_pending(&t))
t                1370 kernel/rcu/rcutorture.c 				mod_timer(&t, jiffies + 1);
t                1383 kernel/rcu/rcutorture.c 		del_timer_sync(&t);
t                1384 kernel/rcu/rcutorture.c 		destroy_timer_on_stack(&t);
t                1542 kernel/rcu/rcutorture.c 	struct task_struct *t;
t                1547 kernel/rcu/rcutorture.c 	t = boost_tasks[cpu];
t                1553 kernel/rcu/rcutorture.c 	torture_stop_kthread(rcu_torture_boost, t);
t                2454 kernel/rcu/rcutorture.c 		int t;
t                2456 kernel/rcu/rcutorture.c 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
t                2457 kernel/rcu/rcutorture.c 		firsterr = torture_stutter_init(stutter * HZ, t);
t                  48 kernel/rcu/srcutree.c static void srcu_delay_timer(struct timer_list *t);
t                 458 kernel/rcu/srcutree.c static void srcu_delay_timer(struct timer_list *t)
t                 460 kernel/rcu/srcutree.c 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
t                 764 kernel/rcu/srcutree.c 	unsigned long t;
t                 783 kernel/rcu/srcutree.c 	t = ktime_get_mono_fast_ns();
t                 786 kernel/rcu/srcutree.c 	    time_in_range_open(t, tlast, tlast + exp_holdoff))
t                 915 kernel/rcu/tree.c void rcu_request_urgent_qs_task(struct task_struct *t)
t                 920 kernel/rcu/tree.c 	cpu = task_cpu(t);
t                 921 kernel/rcu/tree.c 	if (!task_curr(t))
t                2390 kernel/rcu/tree.c static void rcu_wake_cond(struct task_struct *t, int status)
t                2396 kernel/rcu/tree.c 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
t                2397 kernel/rcu/tree.c 		wake_up_process(t);
t                2402 kernel/rcu/tree.c 	struct task_struct *t;
t                2407 kernel/rcu/tree.c 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
t                2408 kernel/rcu/tree.c 	if (t != NULL && t != current)
t                2409 kernel/rcu/tree.c 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
t                3280 kernel/rcu/tree.c 	struct task_struct *t;
t                3298 kernel/rcu/tree.c 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
t                3299 kernel/rcu/tree.c 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
t                3303 kernel/rcu/tree.c 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
t                3307 kernel/rcu/tree.c 	rcu_state.gp_kthread = t;
t                3309 kernel/rcu/tree.c 	wake_up_process(t);
t                 428 kernel/rcu/tree.h static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
t                 429 kernel/rcu/tree.h static void rcu_preempt_deferred_qs(struct task_struct *t);
t                 604 kernel/rcu/tree_exp.h 	struct task_struct *t = current;
t                 611 kernel/rcu/tree_exp.h 	if (!t->rcu_read_lock_nesting) {
t                 617 kernel/rcu/tree_exp.h 			set_tsk_need_resched(t);
t                 635 kernel/rcu/tree_exp.h 	if (t->rcu_read_lock_nesting > 0) {
t                 639 kernel/rcu/tree_exp.h 			t->rcu_read_unlock_special.b.exp_hint = true;
t                 664 kernel/rcu/tree_exp.h 		rcu_preempt_deferred_qs(t);
t                 666 kernel/rcu/tree_exp.h 		set_tsk_need_resched(t);
t                 683 kernel/rcu/tree_exp.h 	struct task_struct *t;
t                 688 kernel/rcu/tree_exp.h 	t = list_entry(rnp->exp_tasks->prev,
t                 690 kernel/rcu/tree_exp.h 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
t                 691 kernel/rcu/tree_exp.h 		pr_cont(" P%d", t->pid);
t                  85 kernel/rcu/tree_plugin.h static void rcu_read_unlock_special(struct task_struct *t);
t                 137 kernel/rcu/tree_plugin.h 	struct task_struct *t = current;
t                 164 kernel/rcu/tree_plugin.h 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
t                 182 kernel/rcu/tree_plugin.h 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
t                 195 kernel/rcu/tree_plugin.h 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
t                 206 kernel/rcu/tree_plugin.h 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
t                 223 kernel/rcu/tree_plugin.h 		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
t                 227 kernel/rcu/tree_plugin.h 		rnp->exp_tasks = &t->rcu_node_entry;
t                 287 kernel/rcu/tree_plugin.h 	struct task_struct *t = current;
t                 293 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
t                 294 kernel/rcu/tree_plugin.h 	if (t->rcu_read_lock_nesting > 0 &&
t                 295 kernel/rcu/tree_plugin.h 	    !t->rcu_read_unlock_special.b.blocked) {
t                 300 kernel/rcu/tree_plugin.h 		t->rcu_read_unlock_special.b.blocked = true;
t                 301 kernel/rcu/tree_plugin.h 		t->rcu_blocked_node = rnp;
t                 309 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
t                 311 kernel/rcu/tree_plugin.h 				       t->pid,
t                 317 kernel/rcu/tree_plugin.h 		rcu_preempt_deferred_qs(t);
t                 374 kernel/rcu/tree_plugin.h 	struct task_struct *t = current;
t                 376 kernel/rcu/tree_plugin.h 	if (t->rcu_read_lock_nesting != 1) {
t                 377 kernel/rcu/tree_plugin.h 		--t->rcu_read_lock_nesting;
t                 380 kernel/rcu/tree_plugin.h 		t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
t                 382 kernel/rcu/tree_plugin.h 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
t                 383 kernel/rcu/tree_plugin.h 			rcu_read_unlock_special(t);
t                 385 kernel/rcu/tree_plugin.h 		t->rcu_read_lock_nesting = 0;
t                 388 kernel/rcu/tree_plugin.h 		int rrln = t->rcu_read_lock_nesting;
t                 399 kernel/rcu/tree_plugin.h static struct list_head *rcu_next_node_entry(struct task_struct *t,
t                 404 kernel/rcu/tree_plugin.h 	np = t->rcu_node_entry.next;
t                 425 kernel/rcu/tree_plugin.h rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
t                 441 kernel/rcu/tree_plugin.h 	special = t->rcu_read_unlock_special;
t                 447 kernel/rcu/tree_plugin.h 	t->rcu_read_unlock_special.b.deferred_qs = false;
t                 450 kernel/rcu/tree_plugin.h 		t->rcu_read_unlock_special.b.need_qs = false;
t                 451 kernel/rcu/tree_plugin.h 		if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
t                 465 kernel/rcu/tree_plugin.h 		if (!t->rcu_read_unlock_special.s) {
t                 473 kernel/rcu/tree_plugin.h 		t->rcu_read_unlock_special.b.blocked = false;
t                 481 kernel/rcu/tree_plugin.h 		rnp = t->rcu_blocked_node;
t                 483 kernel/rcu/tree_plugin.h 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
t                 490 kernel/rcu/tree_plugin.h 		np = rcu_next_node_entry(t, rnp);
t                 491 kernel/rcu/tree_plugin.h 		list_del_init(&t->rcu_node_entry);
t                 492 kernel/rcu/tree_plugin.h 		t->rcu_blocked_node = NULL;
t                 494 kernel/rcu/tree_plugin.h 						rnp->gp_seq, t->pid);
t                 495 kernel/rcu/tree_plugin.h 		if (&t->rcu_node_entry == rnp->gp_tasks)
t                 497 kernel/rcu/tree_plugin.h 		if (&t->rcu_node_entry == rnp->exp_tasks)
t                 501 kernel/rcu/tree_plugin.h 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
t                 502 kernel/rcu/tree_plugin.h 			if (&t->rcu_node_entry == rnp->boost_tasks)
t                 550 kernel/rcu/tree_plugin.h static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
t                 553 kernel/rcu/tree_plugin.h 		READ_ONCE(t->rcu_read_unlock_special.s)) &&
t                 554 kernel/rcu/tree_plugin.h 	       t->rcu_read_lock_nesting <= 0;
t                 564 kernel/rcu/tree_plugin.h static void rcu_preempt_deferred_qs(struct task_struct *t)
t                 567 kernel/rcu/tree_plugin.h 	bool couldrecurse = t->rcu_read_lock_nesting >= 0;
t                 569 kernel/rcu/tree_plugin.h 	if (!rcu_preempt_need_deferred_qs(t))
t                 572 kernel/rcu/tree_plugin.h 		t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
t                 574 kernel/rcu/tree_plugin.h 	rcu_preempt_deferred_qs_irqrestore(t, flags);
t                 576 kernel/rcu/tree_plugin.h 		t->rcu_read_lock_nesting += RCU_NEST_BIAS;
t                 595 kernel/rcu/tree_plugin.h static void rcu_read_unlock_special(struct task_struct *t)
t                 613 kernel/rcu/tree_plugin.h 		t->rcu_read_unlock_special.b.exp_hint = false;
t                 614 kernel/rcu/tree_plugin.h 		exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) ||
t                 620 kernel/rcu/tree_plugin.h 		     (exp && !t->rcu_read_unlock_special.b.deferred_qs))) {
t                 639 kernel/rcu/tree_plugin.h 		t->rcu_read_unlock_special.b.deferred_qs = true;
t                 643 kernel/rcu/tree_plugin.h 	WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
t                 644 kernel/rcu/tree_plugin.h 	rcu_preempt_deferred_qs_irqrestore(t, flags);
t                 659 kernel/rcu/tree_plugin.h 	struct task_struct *t;
t                 667 kernel/rcu/tree_plugin.h 		t = container_of(rnp->gp_tasks, struct task_struct,
t                 670 kernel/rcu/tree_plugin.h 						rnp->gp_seq, t->pid);
t                 684 kernel/rcu/tree_plugin.h 	struct task_struct *t = current;
t                 689 kernel/rcu/tree_plugin.h 	if (t->rcu_read_lock_nesting > 0 ||
t                 692 kernel/rcu/tree_plugin.h 		if (rcu_preempt_need_deferred_qs(t)) {
t                 693 kernel/rcu/tree_plugin.h 			set_tsk_need_resched(t);
t                 696 kernel/rcu/tree_plugin.h 	} else if (rcu_preempt_need_deferred_qs(t)) {
t                 697 kernel/rcu/tree_plugin.h 		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
t                 699 kernel/rcu/tree_plugin.h 	} else if (!t->rcu_read_lock_nesting) {
t                 705 kernel/rcu/tree_plugin.h 	if (t->rcu_read_lock_nesting > 0 &&
t                 708 kernel/rcu/tree_plugin.h 	    !t->rcu_read_unlock_special.b.need_qs &&
t                 710 kernel/rcu/tree_plugin.h 		t->rcu_read_unlock_special.b.need_qs = true;
t                 723 kernel/rcu/tree_plugin.h 	struct task_struct *t = current;
t                 726 kernel/rcu/tree_plugin.h 		t->rcu_read_lock_nesting = 1;
t                 728 kernel/rcu/tree_plugin.h 		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
t                 729 kernel/rcu/tree_plugin.h 	} else if (unlikely(t->rcu_read_lock_nesting)) {
t                 730 kernel/rcu/tree_plugin.h 		t->rcu_read_lock_nesting = 1;
t                 882 kernel/rcu/tree_plugin.h static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
t                 886 kernel/rcu/tree_plugin.h static void rcu_preempt_deferred_qs(struct task_struct *t) { }
t                 967 kernel/rcu/tree_plugin.h 	struct task_struct *t;
t                1012 kernel/rcu/tree_plugin.h 	t = container_of(tb, struct task_struct, rcu_node_entry);
t                1013 kernel/rcu/tree_plugin.h 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
t                1119 kernel/rcu/tree_plugin.h 	struct task_struct *t;
t                1132 kernel/rcu/tree_plugin.h 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
t                1134 kernel/rcu/tree_plugin.h 	if (WARN_ON_ONCE(IS_ERR(t)))
t                1138 kernel/rcu/tree_plugin.h 	rnp->boost_kthread_task = t;
t                1141 kernel/rcu/tree_plugin.h 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
t                1142 kernel/rcu/tree_plugin.h 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
t                1156 kernel/rcu/tree_plugin.h 	struct task_struct *t = rnp->boost_kthread_task;
t                1161 kernel/rcu/tree_plugin.h 	if (!t)
t                1171 kernel/rcu/tree_plugin.h 	set_cpus_allowed_ptr(t, cm);
t                1879 kernel/rcu/tree_plugin.h 	struct task_struct *t;
t                1882 kernel/rcu/tree_plugin.h 	t = READ_ONCE(rdp->nocb_gp_kthread);
t                1883 kernel/rcu/tree_plugin.h 	if (rcu_nocb_poll || !t) {
t                1928 kernel/rcu/tree_plugin.h static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
t                1931 kernel/rcu/tree_plugin.h 	struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
t                2175 kernel/rcu/tree_plugin.h static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
t                2177 kernel/rcu/tree_plugin.h 	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
t                2262 kernel/rcu/tree_plugin.h 	struct task_struct *t;
t                2274 kernel/rcu/tree_plugin.h 		t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
t                2276 kernel/rcu/tree_plugin.h 		if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
t                2278 kernel/rcu/tree_plugin.h 		WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
t                2282 kernel/rcu/tree_plugin.h 	t = kthread_run(rcu_nocb_cb_kthread, rdp,
t                2284 kernel/rcu/tree_plugin.h 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
t                2286 kernel/rcu/tree_plugin.h 	WRITE_ONCE(rdp->nocb_cb_kthread, t);
t                 175 kernel/rcu/tree_stall.h 	struct task_struct *t;
t                 182 kernel/rcu/tree_stall.h 	t = list_entry(rnp->gp_tasks->prev,
t                 184 kernel/rcu/tree_stall.h 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
t                 190 kernel/rcu/tree_stall.h 		sched_show_task(t);
t                 201 kernel/rcu/tree_stall.h 	struct task_struct *t;
t                 208 kernel/rcu/tree_stall.h 	t = list_entry(rnp->gp_tasks->prev,
t                 210 kernel/rcu/tree_stall.h 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
t                 211 kernel/rcu/tree_stall.h 		pr_cont(" P%d", t->pid);
t                 598 kernel/rcu/update.c static void check_holdout_task(struct task_struct *t,
t                 603 kernel/rcu/update.c 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
t                 604 kernel/rcu/update.c 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
t                 605 kernel/rcu/update.c 	    !READ_ONCE(t->on_rq) ||
t                 607 kernel/rcu/update.c 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
t                 608 kernel/rcu/update.c 		WRITE_ONCE(t->rcu_tasks_holdout, false);
t                 609 kernel/rcu/update.c 		list_del_init(&t->rcu_tasks_holdout_list);
t                 610 kernel/rcu/update.c 		put_task_struct(t);
t                 613 kernel/rcu/update.c 	rcu_request_urgent_qs_task(t);
t                 620 kernel/rcu/update.c 	cpu = task_cpu(t);
t                 622 kernel/rcu/update.c 		 t, ".I"[is_idle_task(t)],
t                 624 kernel/rcu/update.c 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
t                 625 kernel/rcu/update.c 		 t->rcu_tasks_idle_cpu, cpu);
t                 626 kernel/rcu/update.c 	sched_show_task(t);
t                 633 kernel/rcu/update.c 	struct task_struct *g, *t;
t                 693 kernel/rcu/update.c 		for_each_process_thread(g, t) {
t                 694 kernel/rcu/update.c 			if (t != current && READ_ONCE(t->on_rq) &&
t                 695 kernel/rcu/update.c 			    !is_idle_task(t)) {
t                 696 kernel/rcu/update.c 				get_task_struct(t);
t                 697 kernel/rcu/update.c 				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
t                 698 kernel/rcu/update.c 				WRITE_ONCE(t->rcu_tasks_holdout, true);
t                 699 kernel/rcu/update.c 				list_add(&t->rcu_tasks_holdout_list,
t                 746 kernel/rcu/update.c 			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
t                 748 kernel/rcu/update.c 				check_holdout_task(t, needreport, &firstreport);
t                 792 kernel/rcu/update.c 	struct task_struct *t;
t                 794 kernel/rcu/update.c 	t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
t                 795 kernel/rcu/update.c 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
t                 798 kernel/rcu/update.c 	WRITE_ONCE(rcu_tasks_kthread_ptr, t);
t                  84 kernel/rseq.c  static int rseq_update_cpu_id(struct task_struct *t)
t                  88 kernel/rseq.c  	if (put_user(cpu_id, &t->rseq->cpu_id_start))
t                  90 kernel/rseq.c  	if (put_user(cpu_id, &t->rseq->cpu_id))
t                  92 kernel/rseq.c  	trace_rseq_update(t);
t                  96 kernel/rseq.c  static int rseq_reset_rseq_cpu_id(struct task_struct *t)
t                 103 kernel/rseq.c  	if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
t                 110 kernel/rseq.c  	if (put_user(cpu_id, &t->rseq->cpu_id))
t                 115 kernel/rseq.c  static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
t                 123 kernel/rseq.c  	if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
t                 161 kernel/rseq.c  static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
t                 167 kernel/rseq.c  	ret = get_user(flags, &t->rseq->flags);
t                 190 kernel/rseq.c  	event_mask = t->rseq_event_mask;
t                 191 kernel/rseq.c  	t->rseq_event_mask = 0;
t                 197 kernel/rseq.c  static int clear_rseq_cs(struct task_struct *t)
t                 207 kernel/rseq.c  	if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
t                 224 kernel/rseq.c  	struct task_struct *t = current;
t                 228 kernel/rseq.c  	ret = rseq_get_rseq_cs(t, &rseq_cs);
t                 238 kernel/rseq.c  		return clear_rseq_cs(t);
t                 239 kernel/rseq.c  	ret = rseq_need_restart(t, rseq_cs.flags);
t                 242 kernel/rseq.c  	ret = clear_rseq_cs(t);
t                 264 kernel/rseq.c  	struct task_struct *t = current;
t                 267 kernel/rseq.c  	if (unlikely(t->flags & PF_EXITING))
t                 269 kernel/rseq.c  	if (unlikely(!access_ok(t->rseq, sizeof(*t->rseq))))
t                 274 kernel/rseq.c  	if (unlikely(rseq_update_cpu_id(t)))
t                 292 kernel/rseq.c  	struct task_struct *t = current;
t                 295 kernel/rseq.c  	if (!t->rseq)
t                 297 kernel/rseq.c  	if (!access_ok(t->rseq, sizeof(*t->rseq)) ||
t                 298 kernel/rseq.c  	    rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs))
t                 139 kernel/sched/autogroup.c 	struct task_struct *t;
t                 162 kernel/sched/autogroup.c 	for_each_thread(p, t)
t                 163 kernel/sched/autogroup.c 		sched_move_task(t);
t                 204 kernel/sched/completion.c 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
t                 205 kernel/sched/completion.c 	if (t == -ERESTARTSYS)
t                 206 kernel/sched/completion.c 		return t;
t                 241 kernel/sched/completion.c 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
t                 242 kernel/sched/completion.c 	if (t == -ERESTARTSYS)
t                 243 kernel/sched/completion.c 		return t;
t                2293 kernel/sched/core.c 	struct task_struct *p, *t;
t                2302 kernel/sched/core.c 	llist_for_each_entry_safe(p, t, llist, wake_entry)
t                2736 kernel/sched/core.c 	struct ctl_table t;
t                2743 kernel/sched/core.c 	t = *table;
t                2744 kernel/sched/core.c 	t.data = &state;
t                2745 kernel/sched/core.c 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
t                2811 kernel/sched/core.c 	struct ctl_table t;
t                2818 kernel/sched/core.c 	t = *table;
t                2819 kernel/sched/core.c 	t.data = &state;
t                2820 kernel/sched/core.c 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
t                5846 kernel/sched/core.c static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
t                5874 kernel/sched/core.c 	jiffies_to_timespec64(time_slice, t);
t                5896 kernel/sched/core.c 	struct timespec64 t;
t                5897 kernel/sched/core.c 	int retval = sched_rr_get_interval(pid, &t);
t                5900 kernel/sched/core.c 		retval = put_timespec64(&t, interval);
t                5909 kernel/sched/core.c 	struct timespec64 t;
t                5910 kernel/sched/core.c 	int retval = sched_rr_get_interval(pid, &t);
t                5913 kernel/sched/core.c 		retval = put_old_timespec32(&t, interval);
t                 270 kernel/sched/cputime.c static inline u64 read_sum_exec_runtime(struct task_struct *t)
t                 272 kernel/sched/cputime.c 	return t->se.sum_exec_runtime;
t                 275 kernel/sched/cputime.c static u64 read_sum_exec_runtime(struct task_struct *t)
t                 281 kernel/sched/cputime.c 	rq = task_rq_lock(t, &rf);
t                 282 kernel/sched/cputime.c 	ns = t->se.sum_exec_runtime;
t                 283 kernel/sched/cputime.c 	task_rq_unlock(rq, t, &rf);
t                 297 kernel/sched/cputime.c 	struct task_struct *t;
t                 322 kernel/sched/cputime.c 		for_each_thread(tsk, t) {
t                 323 kernel/sched/cputime.c 			task_cputime(t, &utime, &stime);
t                 326 kernel/sched/cputime.c 			times->sum_exec_runtime += read_sum_exec_runtime(t);
t                 823 kernel/sched/cputime.c void vtime_init_idle(struct task_struct *t, int cpu)
t                 825 kernel/sched/cputime.c 	struct vtime *vtime = &t->vtime;
t                 836 kernel/sched/cputime.c u64 task_gtime(struct task_struct *t)
t                 838 kernel/sched/cputime.c 	struct vtime *vtime = &t->vtime;
t                 843 kernel/sched/cputime.c 		return t->gtime;
t                 848 kernel/sched/cputime.c 		gtime = t->gtime;
t                 849 kernel/sched/cputime.c 		if (vtime->state == VTIME_SYS && t->flags & PF_VCPU)
t                 862 kernel/sched/cputime.c void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
t                 864 kernel/sched/cputime.c 	struct vtime *vtime = &t->vtime;
t                 869 kernel/sched/cputime.c 		*utime = t->utime;
t                 870 kernel/sched/cputime.c 		*stime = t->stime;
t                 877 kernel/sched/cputime.c 		*utime = t->utime;
t                 878 kernel/sched/cputime.c 		*stime = t->stime;
t                 881 kernel/sched/cputime.c 		if (vtime->state == VTIME_INACTIVE || is_idle_task(t))
t                 890 kernel/sched/cputime.c 		if (vtime->state == VTIME_USER || t->flags & PF_VCPU)
t                 775 kernel/sched/deadline.c 			       struct sched_dl_entity *pi_se, u64 t)
t                 798 kernel/sched/deadline.c 	right = ((dl_se->deadline - t) >> DL_SCALE) *
t                  49 kernel/sched/isolation.c void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
t                  53 kernel/sched/isolation.c 			set_cpus_allowed_ptr(t, housekeeping_mask);
t                 493 kernel/sched/psi.c 	struct psi_trigger *t;
t                 495 kernel/sched/psi.c 	list_for_each_entry(t, &group->triggers, node)
t                 496 kernel/sched/psi.c 		window_reset(&t->win, now,
t                 497 kernel/sched/psi.c 				group->total[PSI_POLL][t->state], 0);
t                 505 kernel/sched/psi.c 	struct psi_trigger *t;
t                 513 kernel/sched/psi.c 	list_for_each_entry(t, &group->triggers, node) {
t                 517 kernel/sched/psi.c 		if (group->polling_total[t->state] == total[t->state])
t                 529 kernel/sched/psi.c 		growth = window_update(&t->win, now, total[t->state]);
t                 530 kernel/sched/psi.c 		if (growth < t->threshold)
t                 534 kernel/sched/psi.c 		if (now < t->last_event_time + t->win.size)
t                 538 kernel/sched/psi.c 		if (cmpxchg(&t->event, 0, 1) == 0)
t                 539 kernel/sched/psi.c 			wake_up_interruptible(&t->event_wait);
t                 540 kernel/sched/psi.c 		t->last_event_time = now;
t                 676 kernel/sched/psi.c 	unsigned int t, m;
t                 694 kernel/sched/psi.c 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
t                 695 kernel/sched/psi.c 		if (!(m & (1 << t)))
t                 697 kernel/sched/psi.c 		if (groupc->tasks[t] == 0 && !psi_bug) {
t                 699 kernel/sched/psi.c 					cpu, t, groupc->tasks[0],
t                 704 kernel/sched/psi.c 		groupc->tasks[t]--;
t                 707 kernel/sched/psi.c 	for (t = 0; set; set &= ~(1 << t), t++)
t                 708 kernel/sched/psi.c 		if (set & (1 << t))
t                 709 kernel/sched/psi.c 			groupc->tasks[t]++;
t                1010 kernel/sched/psi.c 	struct psi_trigger *t;
t                1036 kernel/sched/psi.c 	t = kmalloc(sizeof(*t), GFP_KERNEL);
t                1037 kernel/sched/psi.c 	if (!t)
t                1040 kernel/sched/psi.c 	t->group = group;
t                1041 kernel/sched/psi.c 	t->state = state;
t                1042 kernel/sched/psi.c 	t->threshold = threshold_us * NSEC_PER_USEC;
t                1043 kernel/sched/psi.c 	t->win.size = window_us * NSEC_PER_USEC;
t                1044 kernel/sched/psi.c 	window_reset(&t->win, 0, 0, 0);
t                1046 kernel/sched/psi.c 	t->event = 0;
t                1047 kernel/sched/psi.c 	t->last_event_time = 0;
t                1048 kernel/sched/psi.c 	init_waitqueue_head(&t->event_wait);
t                1049 kernel/sched/psi.c 	kref_init(&t->refcount);
t                1061 kernel/sched/psi.c 			kfree(t);
t                1071 kernel/sched/psi.c 	list_add(&t->node, &group->triggers);
t                1073 kernel/sched/psi.c 		div_u64(t->win.size, UPDATES_PER_WINDOW));
t                1074 kernel/sched/psi.c 	group->nr_triggers[t->state]++;
t                1075 kernel/sched/psi.c 	group->poll_states |= (1 << t->state);
t                1079 kernel/sched/psi.c 	return t;
t                1084 kernel/sched/psi.c 	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
t                1085 kernel/sched/psi.c 	struct psi_group *group = t->group;
t                1095 kernel/sched/psi.c 	wake_up_interruptible(&t->event_wait);
t                1099 kernel/sched/psi.c 	if (!list_empty(&t->node)) {
t                1103 kernel/sched/psi.c 		list_del(&t->node);
t                1104 kernel/sched/psi.c 		group->nr_triggers[t->state]--;
t                1105 kernel/sched/psi.c 		if (!group->nr_triggers[t->state])
t                1106 kernel/sched/psi.c 			group->poll_states &= ~(1 << t->state);
t                1146 kernel/sched/psi.c 	kfree(t);
t                1165 kernel/sched/psi.c 	struct psi_trigger *t;
t                1172 kernel/sched/psi.c 	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
t                1173 kernel/sched/psi.c 	if (!t) {
t                1177 kernel/sched/psi.c 	kref_get(&t->refcount);
t                1181 kernel/sched/psi.c 	poll_wait(file, &t->event_wait, wait);
t                1183 kernel/sched/psi.c 	if (cmpxchg(&t->event, 1, 0) == 1)
t                1186 kernel/sched/psi.c 	kref_put(&t->refcount, psi_trigger_destroy);
t                1302 kernel/sched/sched.h extern int migrate_swap(struct task_struct *p, struct task_struct *t,
t                 145 kernel/sched/stats.h static inline void sched_info_reset_dequeued(struct task_struct *t)
t                 147 kernel/sched/stats.h 	t->sched_info.last_queued = 0;
t                 156 kernel/sched/stats.h static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
t                 161 kernel/sched/stats.h 		if (t->sched_info.last_queued)
t                 162 kernel/sched/stats.h 			delta = now - t->sched_info.last_queued;
t                 164 kernel/sched/stats.h 	sched_info_reset_dequeued(t);
t                 165 kernel/sched/stats.h 	t->sched_info.run_delay += delta;
t                 175 kernel/sched/stats.h static void sched_info_arrive(struct rq *rq, struct task_struct *t)
t                 179 kernel/sched/stats.h 	if (t->sched_info.last_queued)
t                 180 kernel/sched/stats.h 		delta = now - t->sched_info.last_queued;
t                 181 kernel/sched/stats.h 	sched_info_reset_dequeued(t);
t                 182 kernel/sched/stats.h 	t->sched_info.run_delay += delta;
t                 183 kernel/sched/stats.h 	t->sched_info.last_arrival = now;
t                 184 kernel/sched/stats.h 	t->sched_info.pcount++;
t                 194 kernel/sched/stats.h static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
t                 197 kernel/sched/stats.h 		if (!t->sched_info.last_queued)
t                 198 kernel/sched/stats.h 			t->sched_info.last_queued = rq_clock(rq);
t                 210 kernel/sched/stats.h static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
t                 212 kernel/sched/stats.h 	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
t                 216 kernel/sched/stats.h 	if (t->state == TASK_RUNNING)
t                 217 kernel/sched/stats.h 		sched_info_queued(rq, t);
t                 248 kernel/sched/stats.h # define sched_info_queued(rq, t)	do { } while (0)
t                 249 kernel/sched/stats.h # define sched_info_reset_dequeued(t)	do { } while (0)
t                 250 kernel/sched/stats.h # define sched_info_dequeued(rq, t)	do { } while (0)
t                 251 kernel/sched/stats.h # define sched_info_depart(rq, t)	do { } while (0)
t                 253 kernel/sched/stats.h # define sched_info_switch(rq, t, next)	do { } while (0)
t                1381 kernel/sched/topology.c 		struct sched_domain *t = sd;
t                1389 kernel/sched/topology.c 		for_each_lower_domain(t)
t                1390 kernel/sched/topology.c 			t->flags |= SD_BALANCE_WAKE;
t                  67 kernel/signal.c static void __user *sig_handler(struct task_struct *t, int sig)
t                  69 kernel/signal.c 	return t->sighand->action[sig - 1].sa.sa_handler;
t                  79 kernel/signal.c static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
t                  83 kernel/signal.c 	handler = sig_handler(t, sig);
t                  86 kernel/signal.c 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
t                  89 kernel/signal.c 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
t                  94 kernel/signal.c 	if (unlikely((t->flags & PF_KTHREAD) &&
t                 101 kernel/signal.c static bool sig_ignored(struct task_struct *t, int sig, bool force)
t                 108 kernel/signal.c 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
t                 116 kernel/signal.c 	if (t->ptrace && sig != SIGKILL)
t                 119 kernel/signal.c 	return sig_task_ignored(t, sig, force);
t                 154 kernel/signal.c static bool recalc_sigpending_tsk(struct task_struct *t)
t                 156 kernel/signal.c 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
t                 157 kernel/signal.c 	    PENDING(&t->pending, &t->blocked) ||
t                 158 kernel/signal.c 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
t                 159 kernel/signal.c 	    cgroup_task_frozen(t)) {
t                 160 kernel/signal.c 		set_tsk_thread_flag(t, TIF_SIGPENDING);
t                 176 kernel/signal.c void recalc_sigpending_and_wake(struct task_struct *t)
t                 178 kernel/signal.c 	if (recalc_sigpending_tsk(t))
t                 179 kernel/signal.c 		signal_wake_up(t, 0);
t                 412 kernel/signal.c __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
t                 427 kernel/signal.c 	user = __task_cred(t)->user;
t                 433 kernel/signal.c 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
t                 475 kernel/signal.c void flush_signals(struct task_struct *t)
t                 479 kernel/signal.c 	spin_lock_irqsave(&t->sighand->siglock, flags);
t                 480 kernel/signal.c 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
t                 481 kernel/signal.c 	flush_sigqueue(&t->pending);
t                 482 kernel/signal.c 	flush_sigqueue(&t->signal->shared_pending);
t                 483 kernel/signal.c 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
t                 523 kernel/signal.c void ignore_signals(struct task_struct *t)
t                 528 kernel/signal.c 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
t                 530 kernel/signal.c 	flush_signals(t);
t                 538 kernel/signal.c flush_signal_handlers(struct task_struct *t, int force_default)
t                 541 kernel/signal.c 	struct k_sigaction *ka = &t->sighand->action[0];
t                 759 kernel/signal.c void signal_wake_up_state(struct task_struct *t, unsigned int state)
t                 761 kernel/signal.c 	set_tsk_thread_flag(t, TIF_SIGPENDING);
t                 769 kernel/signal.c 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
t                 770 kernel/signal.c 		kick_process(t);
t                 811 kernel/signal.c static bool kill_ok_by_cred(struct task_struct *t)
t                 814 kernel/signal.c 	const struct cred *tcred = __task_cred(t);
t                 828 kernel/signal.c 				 struct task_struct *t)
t                 839 kernel/signal.c 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
t                 843 kernel/signal.c 	if (!same_thread_group(current, t) &&
t                 844 kernel/signal.c 	    !kill_ok_by_cred(t)) {
t                 847 kernel/signal.c 			sid = task_session(t);
t                 860 kernel/signal.c 	return security_task_kill(t, info, sig, NULL);
t                 880 kernel/signal.c static void ptrace_trap_notify(struct task_struct *t)
t                 882 kernel/signal.c 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
t                 883 kernel/signal.c 	assert_spin_locked(&t->sighand->siglock);
t                 885 kernel/signal.c 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
t                 886 kernel/signal.c 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
t                 902 kernel/signal.c 	struct task_struct *t;
t                 917 kernel/signal.c 		for_each_thread(p, t)
t                 918 kernel/signal.c 			flush_sigqueue_mask(&flush, &t->pending);
t                 926 kernel/signal.c 		for_each_thread(p, t) {
t                 927 kernel/signal.c 			flush_sigqueue_mask(&flush, &t->pending);
t                 928 kernel/signal.c 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
t                 929 kernel/signal.c 			if (likely(!(t->ptrace & PT_SEIZED)))
t                 930 kernel/signal.c 				wake_up_state(t, __TASK_STOPPED);
t                 932 kernel/signal.c 				ptrace_trap_notify(t);
t                 992 kernel/signal.c 	struct task_struct *t;
t                1001 kernel/signal.c 		t = p;
t                1012 kernel/signal.c 		t = signal->curr_target;
t                1013 kernel/signal.c 		while (!wants_signal(sig, t)) {
t                1014 kernel/signal.c 			t = next_thread(t);
t                1015 kernel/signal.c 			if (t == signal->curr_target)
t                1023 kernel/signal.c 		signal->curr_target = t;
t                1032 kernel/signal.c 	    !sigismember(&t->real_blocked, sig) &&
t                1047 kernel/signal.c 			t = p;
t                1049 kernel/signal.c 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
t                1050 kernel/signal.c 				sigaddset(&t->pending.signal, SIGKILL);
t                1051 kernel/signal.c 				signal_wake_up(t, 1);
t                1052 kernel/signal.c 			} while_each_thread(p, t);
t                1061 kernel/signal.c 	signal_wake_up(t, sig == SIGKILL);
t                1070 kernel/signal.c static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
t                1078 kernel/signal.c 	assert_spin_locked(&t->sighand->siglock);
t                1081 kernel/signal.c 	if (!prepare_signal(sig, t, force))
t                1084 kernel/signal.c 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
t                1098 kernel/signal.c 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
t                1115 kernel/signal.c 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
t                1125 kernel/signal.c 							task_active_pid_ns(t));
t                1128 kernel/signal.c 				from_kuid_munged(task_cred_xxx(t, user_ns),
t                1163 kernel/signal.c 	signalfd_notify(t, sig);
t                1169 kernel/signal.c 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
t                1180 kernel/signal.c 	complete_signal(sig, t, type);
t                1182 kernel/signal.c 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
t                1208 kernel/signal.c static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
t                1216 kernel/signal.c 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
t                1225 kernel/signal.c 		t_user_ns = task_cred_xxx(t, user_ns);
t                1236 kernel/signal.c 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
t                1241 kernel/signal.c 	return __send_signal(sig, info, t, type, force);
t                1309 kernel/signal.c force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
t                1316 kernel/signal.c 	spin_lock_irqsave(&t->sighand->siglock, flags);
t                1317 kernel/signal.c 	action = &t->sighand->action[sig-1];
t                1319 kernel/signal.c 	blocked = sigismember(&t->blocked, sig);
t                1323 kernel/signal.c 			sigdelset(&t->blocked, sig);
t                1324 kernel/signal.c 			recalc_sigpending_and_wake(t);
t                1331 kernel/signal.c 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
t                1332 kernel/signal.c 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
t                1333 kernel/signal.c 	ret = send_signal(sig, info, t, PIDTYPE_PID);
t                1334 kernel/signal.c 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
t                1349 kernel/signal.c 	struct task_struct *t = p;
t                1354 kernel/signal.c 	while_each_thread(p, t) {
t                1355 kernel/signal.c 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
t                1359 kernel/signal.c 		if (t->exit_state)
t                1361 kernel/signal.c 		sigaddset(&t->pending.signal, SIGKILL);
t                1362 kernel/signal.c 		signal_wake_up(t, 1);
t                1659 kernel/signal.c 	, struct task_struct *t)
t                1676 kernel/signal.c 	return force_sig_info_to_task(&info, t);
t                1691 kernel/signal.c 	, struct task_struct *t)
t                1708 kernel/signal.c 	return send_sig_info(info.si_signo, &info, t);
t                1725 kernel/signal.c int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
t                1736 kernel/signal.c 	return send_sig_info(info.si_signo, &info, t);
t                1850 kernel/signal.c 	struct task_struct *t;
t                1858 kernel/signal.c 	t = pid_task(pid, type);
t                1859 kernel/signal.c 	if (!t || !likely(lock_task_sighand(t, &flags)))
t                1864 kernel/signal.c 	if (!prepare_signal(sig, t, false))
t                1880 kernel/signal.c 	signalfd_notify(t, sig);
t                1881 kernel/signal.c 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
t                1884 kernel/signal.c 	complete_signal(sig, t, type);
t                1887 kernel/signal.c 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
t                1888 kernel/signal.c 	unlock_task_sighand(t, &flags);
t                2315 kernel/signal.c 		struct task_struct *t;
t                2350 kernel/signal.c 		t = current;
t                2351 kernel/signal.c 		while_each_thread(current, t) {
t                2357 kernel/signal.c 			if (!task_is_stopped(t) &&
t                2358 kernel/signal.c 			    task_set_jobctl_pending(t, signr | gstop)) {
t                2360 kernel/signal.c 				if (likely(!(t->ptrace & PT_SEIZED)))
t                2361 kernel/signal.c 					signal_wake_up(t, 0);
t                2363 kernel/signal.c 					ptrace_trap_notify(t);
t                2795 kernel/signal.c 	struct task_struct *t;
t                2801 kernel/signal.c 	t = tsk;
t                2802 kernel/signal.c 	while_each_thread(tsk, t) {
t                2803 kernel/signal.c 		if (t->flags & PF_EXITING)
t                2806 kernel/signal.c 		if (!has_pending_signals(&retarget, &t->blocked))
t                2809 kernel/signal.c 		sigandsets(&retarget, &retarget, &t->blocked);
t                2811 kernel/signal.c 		if (!signal_pending(t))
t                2812 kernel/signal.c 			signal_wake_up(t, 0);
t                3570 kernel/signal.c 	struct timespec64 t;
t                3581 kernel/signal.c 		if (get_timespec64(&t, uts))
t                3585 kernel/signal.c 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
t                3601 kernel/signal.c 	struct timespec64 t;
t                3612 kernel/signal.c 		if (get_old_timespec32(&t, uts))
t                3616 kernel/signal.c 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
t                3960 kernel/signal.c 	struct task_struct *p = current, *t;
t                3994 kernel/signal.c 			for_each_thread(p, t)
t                3995 kernel/signal.c 				flush_sigqueue_mask(&mask, &t->pending);
t                4007 kernel/signal.c 	struct task_struct *t = current;
t                4011 kernel/signal.c 		oss->ss_sp = (void __user *) t->sas_ss_sp;
t                4012 kernel/signal.c 		oss->ss_size = t->sas_ss_size;
t                4039 kernel/signal.c 		t->sas_ss_sp = (unsigned long) ss_sp;
t                4040 kernel/signal.c 		t->sas_ss_size = ss_size;
t                4041 kernel/signal.c 		t->sas_ss_flags = ss_flags;
t                4073 kernel/signal.c 	struct task_struct *t = current;
t                4074 kernel/signal.c 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
t                4075 kernel/signal.c 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
t                4076 kernel/signal.c 		__put_user(t->sas_ss_size, &uss->ss_size);
t                4079 kernel/signal.c 	if (t->sas_ss_flags & SS_AUTODISARM)
t                4080 kernel/signal.c 		sas_ss_reset(t);
t                4131 kernel/signal.c 	struct task_struct *t = current;
t                4132 kernel/signal.c 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
t                4134 kernel/signal.c 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
t                4135 kernel/signal.c 		__put_user(t->sas_ss_size, &uss->ss_size);
t                4138 kernel/signal.c 	if (t->sas_ss_flags & SS_AUTODISARM)
t                4139 kernel/signal.c 		sas_ss_reset(t);
t                4594 kernel/signal.c void kdb_send_sig(struct task_struct *t, int sig)
t                4598 kernel/signal.c 	if (!spin_trylock(&t->sighand->siglock)) {
t                4604 kernel/signal.c 	new_t = kdb_prev_t != t;
t                4605 kernel/signal.c 	kdb_prev_t = t;
t                4606 kernel/signal.c 	if (t->state != TASK_RUNNING && new_t) {
t                4607 kernel/signal.c 		spin_unlock(&t->sighand->siglock);
t                4616 kernel/signal.c 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
t                4617 kernel/signal.c 	spin_unlock(&t->sighand->siglock);
t                4620 kernel/signal.c 			   sig, t->pid);
t                4622 kernel/signal.c 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
t                 471 kernel/softirq.c static void __tasklet_schedule_common(struct tasklet_struct *t,
t                 480 kernel/softirq.c 	t->next = NULL;
t                 481 kernel/softirq.c 	*head->tail = t;
t                 482 kernel/softirq.c 	head->tail = &(t->next);
t                 487 kernel/softirq.c void __tasklet_schedule(struct tasklet_struct *t)
t                 489 kernel/softirq.c 	__tasklet_schedule_common(t, &tasklet_vec,
t                 494 kernel/softirq.c void __tasklet_hi_schedule(struct tasklet_struct *t)
t                 496 kernel/softirq.c 	__tasklet_schedule_common(t, &tasklet_hi_vec,
t                 514 kernel/softirq.c 		struct tasklet_struct *t = list;
t                 518 kernel/softirq.c 		if (tasklet_trylock(t)) {
t                 519 kernel/softirq.c 			if (!atomic_read(&t->count)) {
t                 521 kernel/softirq.c 							&t->state))
t                 523 kernel/softirq.c 				t->func(t->data);
t                 524 kernel/softirq.c 				tasklet_unlock(t);
t                 527 kernel/softirq.c 			tasklet_unlock(t);
t                 531 kernel/softirq.c 		t->next = NULL;
t                 532 kernel/softirq.c 		*tl_head->tail = t;
t                 533 kernel/softirq.c 		tl_head->tail = &t->next;
t                 549 kernel/softirq.c void tasklet_init(struct tasklet_struct *t,
t                 552 kernel/softirq.c 	t->next = NULL;
t                 553 kernel/softirq.c 	t->state = 0;
t                 554 kernel/softirq.c 	atomic_set(&t->count, 0);
t                 555 kernel/softirq.c 	t->func = func;
t                 556 kernel/softirq.c 	t->data = data;
t                 560 kernel/softirq.c void tasklet_kill(struct tasklet_struct *t)
t                 565 kernel/softirq.c 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
t                 568 kernel/softirq.c 		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
t                 570 kernel/softirq.c 	tasklet_unlock_wait(t);
t                 571 kernel/softirq.c 	clear_bit(TASKLET_STATE_SCHED, &t->state);
t                 621 kernel/softirq.c void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
t                 626 kernel/softirq.c 	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
t                 628 kernel/softirq.c 	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
t                 633 kernel/softirq.c 		if (*i == t) {
t                 634 kernel/softirq.c 			*i = t->next;
t                1695 kernel/sys.c   static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
t                1697 kernel/sys.c   	r->ru_nvcsw += t->nvcsw;
t                1698 kernel/sys.c   	r->ru_nivcsw += t->nivcsw;
t                1699 kernel/sys.c   	r->ru_minflt += t->min_flt;
t                1700 kernel/sys.c   	r->ru_majflt += t->maj_flt;
t                1701 kernel/sys.c   	r->ru_inblock += task_io_get_inblock(t);
t                1702 kernel/sys.c   	r->ru_oublock += task_io_get_oublock(t);
t                1707 kernel/sys.c   	struct task_struct *t;
t                1754 kernel/sys.c   		t = p;
t                1756 kernel/sys.c   			accumulate_thread_rusage(t, r);
t                1757 kernel/sys.c   		} while_each_thread(p, t);
t                2251 kernel/sys.c   int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
t                2256 kernel/sys.c   int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
t                2611 kernel/sysctl.c 	struct ctl_table t;
t                2618 kernel/sysctl.c 	t = *table;
t                2619 kernel/sysctl.c 	t.data = &tmptaint;
t                2620 kernel/sysctl.c 	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
t                 287 kernel/time/hrtimer.c # define switch_hrtimer_base(t, b, p)	(b)
t                1763 kernel/time/hrtimer.c 	struct hrtimer_sleeper *t =
t                1765 kernel/time/hrtimer.c 	struct task_struct *task = t->task;
t                1767 kernel/time/hrtimer.c 	t->task = NULL;
t                1865 kernel/time/hrtimer.c static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
t                1871 kernel/time/hrtimer.c 		hrtimer_sleeper_start_expires(t, mode);
t                1873 kernel/time/hrtimer.c 		if (likely(t->task))
t                1876 kernel/time/hrtimer.c 		hrtimer_cancel(&t->timer);
t                1879 kernel/time/hrtimer.c 	} while (t->task && !signal_pending(current));
t                1883 kernel/time/hrtimer.c 	if (!t->task)
t                1888 kernel/time/hrtimer.c 		ktime_t rem = hrtimer_expires_remaining(&t->timer);
t                1902 kernel/time/hrtimer.c 	struct hrtimer_sleeper t;
t                1905 kernel/time/hrtimer.c 	hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
t                1907 kernel/time/hrtimer.c 	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
t                1908 kernel/time/hrtimer.c 	ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
t                1909 kernel/time/hrtimer.c 	destroy_hrtimer_on_stack(&t.timer);
t                1917 kernel/time/hrtimer.c 	struct hrtimer_sleeper t;
t                1925 kernel/time/hrtimer.c 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
t                1926 kernel/time/hrtimer.c 	hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
t                1927 kernel/time/hrtimer.c 	ret = do_nanosleep(&t, mode);
t                1939 kernel/time/hrtimer.c 	restart->nanosleep.clockid = t.timer.base->clockid;
t                1940 kernel/time/hrtimer.c 	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
t                1942 kernel/time/hrtimer.c 	destroy_hrtimer_on_stack(&t.timer);
t                2106 kernel/time/hrtimer.c 	struct hrtimer_sleeper t;
t                2125 kernel/time/hrtimer.c 	hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
t                2126 kernel/time/hrtimer.c 	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
t                2127 kernel/time/hrtimer.c 	hrtimer_sleeper_start_expires(&t, mode);
t                2129 kernel/time/hrtimer.c 	if (likely(t.task))
t                2132 kernel/time/hrtimer.c 	hrtimer_cancel(&t.timer);
t                2133 kernel/time/hrtimer.c 	destroy_hrtimer_on_stack(&t.timer);
t                2137 kernel/time/hrtimer.c 	return !t.task ? 0 : -EINTR;
t                  58 kernel/time/itimer.c 		u64 t, samples[CPUCLOCK_MAX];
t                  61 kernel/time/itimer.c 		t = samples[clock_id];
t                  63 kernel/time/itimer.c 		if (val < t)
t                  67 kernel/time/itimer.c 			val -= t;
t                 182 kernel/time/itimer.c #define timeval_valid(t) \
t                 183 kernel/time/itimer.c 	(((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
t                 366 kernel/time/posix-cpu-timers.c 	u64 t;
t                 373 kernel/time/posix-cpu-timers.c 		t = cpu_clock_sample(clkid, tsk);
t                 375 kernel/time/posix-cpu-timers.c 		t = cpu_clock_sample_group(clkid, tsk, false);
t                 378 kernel/time/posix-cpu-timers.c 	*tp = ns_to_timespec64(t);
t                1347 kernel/time/posix-cpu-timers.c 	struct timespec64 t;
t                1349 kernel/time/posix-cpu-timers.c 	t = ns_to_timespec64(restart_block->nanosleep.expires);
t                1351 kernel/time/posix-cpu-timers.c 	return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
t                 128 kernel/time/posix-stubs.c 	struct timespec64 t;
t                 139 kernel/time/posix-stubs.c 	if (get_timespec64(&t, rqtp))
t                 141 kernel/time/posix-stubs.c 	if (!timespec64_valid(&t))
t                 147 kernel/time/posix-stubs.c 	return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
t                 214 kernel/time/posix-stubs.c 	struct timespec64 t;
t                 225 kernel/time/posix-stubs.c 	if (get_old_timespec32(&t, rqtp))
t                 227 kernel/time/posix-stubs.c 	if (!timespec64_valid(&t))
t                 233 kernel/time/posix-stubs.c 	return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
t                 182 kernel/time/posix-timers.c 				    struct __kernel_timex *t)
t                 184 kernel/time/posix-timers.c 	return do_adjtimex(t);
t                1213 kernel/time/posix-timers.c 	struct timespec64 t;
t                1220 kernel/time/posix-timers.c 	if (get_timespec64(&t, rqtp))
t                1223 kernel/time/posix-timers.c 	if (!timespec64_valid(&t))
t                1230 kernel/time/posix-timers.c 	return kc->nsleep(which_clock, flags, &t);
t                1240 kernel/time/posix-timers.c 	struct timespec64 t;
t                1247 kernel/time/posix-timers.c 	if (get_old_timespec32(&t, rqtp))
t                1250 kernel/time/posix-timers.c 	if (!timespec64_valid(&t))
t                1257 kernel/time/posix-timers.c 	return kc->nsleep(which_clock, flags, &t);
t                  99 kernel/time/tick-broadcast-hrtimer.c static enum hrtimer_restart bc_handler(struct hrtimer *t)
t                2154 kernel/time/timekeeping.c 	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
t                2156 kernel/time/timekeeping.c 	*ts = ktime_to_timespec64(t);
t                  46 kernel/time/timekeeping_debug.c void tk_debug_account_sleep_time(const struct timespec64 *t)
t                  49 kernel/time/timekeeping_debug.c 	int bin = min(fls(t->tv_sec), NUM_BINS-1);
t                  53 kernel/time/timekeeping_debug.c 			   (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
t                  11 kernel/time/timekeeping_internal.h extern void tk_debug_account_sleep_time(const struct timespec64 *t);
t                1820 kernel/time/timer.c static void process_timeout(struct timer_list *t)
t                1822 kernel/time/timer.c 	struct process_timer *timeout = from_timer(timeout, t, timer);
t                  70 kernel/trace/blktrace.c 	struct blk_io_trace *t;
t                  82 kernel/trace/blktrace.c 						  sizeof(*t) + len + cgid_len,
t                  86 kernel/trace/blktrace.c 		t = ring_buffer_event_data(event);
t                  93 kernel/trace/blktrace.c 	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
t                  94 kernel/trace/blktrace.c 	if (t) {
t                  95 kernel/trace/blktrace.c 		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t                  96 kernel/trace/blktrace.c 		t->time = ktime_to_ns(ktime_get());
t                  98 kernel/trace/blktrace.c 		t->device = bt->dev;
t                  99 kernel/trace/blktrace.c 		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
t                 100 kernel/trace/blktrace.c 		t->pid = pid;
t                 101 kernel/trace/blktrace.c 		t->cpu = cpu;
t                 102 kernel/trace/blktrace.c 		t->pdu_len = len + cgid_len;
t                 104 kernel/trace/blktrace.c 			memcpy((void *)t + sizeof(*t), cgid, cgid_len);
t                 105 kernel/trace/blktrace.c 		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
t                 220 kernel/trace/blktrace.c 	struct blk_io_trace *t;
t                 255 kernel/trace/blktrace.c 						  sizeof(*t) + pdu_len + cgid_len,
t                 259 kernel/trace/blktrace.c 		t = ring_buffer_event_data(event);
t                 272 kernel/trace/blktrace.c 	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
t                 273 kernel/trace/blktrace.c 	if (t) {
t                 276 kernel/trace/blktrace.c 		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t                 277 kernel/trace/blktrace.c 		t->sequence = ++(*sequence);
t                 278 kernel/trace/blktrace.c 		t->time = ktime_to_ns(ktime_get());
t                 286 kernel/trace/blktrace.c 		t->cpu = cpu;
t                 287 kernel/trace/blktrace.c 		t->pid = pid;
t                 289 kernel/trace/blktrace.c 		t->sector = sector;
t                 290 kernel/trace/blktrace.c 		t->bytes = bytes;
t                 291 kernel/trace/blktrace.c 		t->action = what;
t                 292 kernel/trace/blktrace.c 		t->device = bt->dev;
t                 293 kernel/trace/blktrace.c 		t->error = error;
t                 294 kernel/trace/blktrace.c 		t->pdu_len = pdu_len + cgid_len;
t                 297 kernel/trace/blktrace.c 			memcpy((void *)t + sizeof(*t), cgid, cgid_len);
t                 299 kernel/trace/blktrace.c 			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
t                1178 kernel/trace/blktrace.c static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
t                1181 kernel/trace/blktrace.c 	int tc = t->action >> BLK_TC_SHIFT;
t                1183 kernel/trace/blktrace.c 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
t                1195 kernel/trace/blktrace.c 	else if (t->bytes)
t                1287 kernel/trace/blktrace.c 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
t                1289 kernel/trace/blktrace.c 	fill_rwbs(rwbs, t);
t                1293 kernel/trace/blktrace.c 			 MAJOR(t->device), MINOR(t->device), iter->cpu,
t                1301 kernel/trace/blktrace.c 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
t                1303 kernel/trace/blktrace.c 	fill_rwbs(rwbs, t);
t                1313 kernel/trace/blktrace.c 				 MAJOR(t->device), MINOR(t->device),
t                1318 kernel/trace/blktrace.c 				 MAJOR(t->device), MINOR(t->device),
t                1322 kernel/trace/blktrace.c 				 MAJOR(t->device), MINOR(t->device), act, rwbs);
t                1509 kernel/trace/blktrace.c 	const struct blk_io_trace *t;
t                1515 kernel/trace/blktrace.c 	t	   = te_blk_io_trace(iter->ent);
t                1516 kernel/trace/blktrace.c 	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
t                1519 kernel/trace/blktrace.c 	has_cg	   = t->action & __BLK_TA_CGROUP;
t                1521 kernel/trace/blktrace.c 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
t                1546 kernel/trace/blktrace.c 	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
t                1554 kernel/trace/blktrace.c 	trace_seq_putmem(s, &t->sector,
t                1555 kernel/trace/blktrace.c 			 sizeof(old) - offset + t->pdu_len);
t                 347 kernel/trace/fgraph.c 	struct task_struct *g, *t;
t                 363 kernel/trace/fgraph.c 	do_each_thread(g, t) {
t                 369 kernel/trace/fgraph.c 		if (t->ret_stack == NULL) {
t                 370 kernel/trace/fgraph.c 			atomic_set(&t->tracing_graph_pause, 0);
t                 371 kernel/trace/fgraph.c 			atomic_set(&t->trace_overrun, 0);
t                 372 kernel/trace/fgraph.c 			t->curr_ret_stack = -1;
t                 373 kernel/trace/fgraph.c 			t->curr_ret_depth = -1;
t                 376 kernel/trace/fgraph.c 			t->ret_stack = ret_stack_list[start++];
t                 378 kernel/trace/fgraph.c 	} while_each_thread(g, t);
t                 463 kernel/trace/fgraph.c graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
t                 465 kernel/trace/fgraph.c 	atomic_set(&t->tracing_graph_pause, 0);
t                 466 kernel/trace/fgraph.c 	atomic_set(&t->trace_overrun, 0);
t                 467 kernel/trace/fgraph.c 	t->ftrace_timestamp = 0;
t                 470 kernel/trace/fgraph.c 	t->ret_stack = ret_stack;
t                 477 kernel/trace/fgraph.c void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
t                 479 kernel/trace/fgraph.c 	t->curr_ret_stack = -1;
t                 480 kernel/trace/fgraph.c 	t->curr_ret_depth = -1;
t                 485 kernel/trace/fgraph.c 	if (t->ret_stack)
t                 486 kernel/trace/fgraph.c 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
t                 501 kernel/trace/fgraph.c 		graph_init_task(t, ret_stack);
t                 506 kernel/trace/fgraph.c void ftrace_graph_init_task(struct task_struct *t)
t                 509 kernel/trace/fgraph.c 	t->ret_stack = NULL;
t                 510 kernel/trace/fgraph.c 	t->curr_ret_stack = -1;
t                 511 kernel/trace/fgraph.c 	t->curr_ret_depth = -1;
t                 521 kernel/trace/fgraph.c 		graph_init_task(t, ret_stack);
t                 525 kernel/trace/fgraph.c void ftrace_graph_exit_task(struct task_struct *t)
t                 527 kernel/trace/fgraph.c 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
t                 529 kernel/trace/fgraph.c 	t->ret_stack = NULL;
t                1734 kernel/trace/trace.c 	struct tracer *t, **last;
t                1758 kernel/trace/trace.c 			for (t = trace_types; t; t = t->next) {
t                1759 kernel/trace/trace.c 				if (t == p->type) {
t                1760 kernel/trace/trace.c 					*last = t->next;
t                1763 kernel/trace/trace.c 				last = &t->next;
t                1784 kernel/trace/trace.c static void add_tracer_options(struct trace_array *tr, struct tracer *t);
t                1796 kernel/trace/trace.c 	struct tracer *t;
t                1819 kernel/trace/trace.c 	for (t = trace_types; t; t = t->next) {
t                1820 kernel/trace/trace.c 		if (strcmp(type->name, t->name) == 0) {
t                3545 kernel/trace/trace.c 	unsigned long t, e;
t                3552 kernel/trace/trace.c 		get_total_entries_cpu(buf, &t, &e, cpu);
t                3553 kernel/trace/trace.c 		*total += t;
t                4306 kernel/trace/trace.c trace_ok_for_array(struct tracer *t, struct trace_array *tr)
t                4308 kernel/trace/trace.c 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
t                4313 kernel/trace/trace.c get_tracer_for_array(struct trace_array *tr, struct tracer *t)
t                4315 kernel/trace/trace.c 	while (t && !trace_ok_for_array(t, tr))
t                4316 kernel/trace/trace.c 		t = t->next;
t                4318 kernel/trace/trace.c 	return t;
t                4325 kernel/trace/trace.c 	struct tracer *t = v;
t                4329 kernel/trace/trace.c 	if (t)
t                4330 kernel/trace/trace.c 		t = get_tracer_for_array(tr, t->next);
t                4332 kernel/trace/trace.c 	return t;
t                4338 kernel/trace/trace.c 	struct tracer *t;
t                4343 kernel/trace/trace.c 	t = get_tracer_for_array(tr, trace_types);
t                4344 kernel/trace/trace.c 	for (; t && l < *pos; t = t_next(m, t, &l))
t                4347 kernel/trace/trace.c 	return t;
t                4357 kernel/trace/trace.c 	struct tracer *t = v;
t                4359 kernel/trace/trace.c 	if (!t)
t                4362 kernel/trace/trace.c 	seq_puts(m, t->name);
t                4363 kernel/trace/trace.c 	if (t->next)
t                5448 kernel/trace/trace.c int tracer_init(struct tracer *t, struct trace_array *tr)
t                5451 kernel/trace/trace.c 	return t->init(tr);
t                5627 kernel/trace/trace.c static void add_tracer_options(struct trace_array *tr, struct tracer *t)
t                5633 kernel/trace/trace.c 	create_trace_option_files(tr, t);
t                5638 kernel/trace/trace.c 	struct tracer *t;
t                5654 kernel/trace/trace.c 	for (t = trace_types; t; t = t->next) {
t                5655 kernel/trace/trace.c 		if (strcmp(t->name, buf) == 0)
t                5658 kernel/trace/trace.c 	if (!t) {
t                5662 kernel/trace/trace.c 	if (t == tr->current_trace)
t                5666 kernel/trace/trace.c 	if (t->use_max_tr) {
t                5676 kernel/trace/trace.c 	if (system_state < SYSTEM_RUNNING && t->noboot) {
t                5678 kernel/trace/trace.c 			t->name);
t                5683 kernel/trace/trace.c 	if (!trace_ok_for_array(t, tr)) {
t                5707 kernel/trace/trace.c 	if (had_max_tr && !t->use_max_tr) {
t                5721 kernel/trace/trace.c 	if (t->use_max_tr && !had_max_tr) {
t                5728 kernel/trace/trace.c 	if (t->init) {
t                5729 kernel/trace/trace.c 		ret = tracer_init(t, tr);
t                5734 kernel/trace/trace.c 	tr->current_trace = t;
t                7533 kernel/trace/trace.c 	unsigned long long t;
t                7556 kernel/trace/trace.c 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
t                7557 kernel/trace/trace.c 		usec_rem = do_div(t, USEC_PER_SEC);
t                7559 kernel/trace/trace.c 								t, usec_rem);
t                7561 kernel/trace/trace.c 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
t                7562 kernel/trace/trace.c 		usec_rem = do_div(t, USEC_PER_SEC);
t                7563 kernel/trace/trace.c 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
t                8370 kernel/trace/trace.c 	struct tracer *t;
t                8372 kernel/trace/trace.c 	for (t = trace_types; t; t = t->next)
t                8373 kernel/trace/trace.c 		add_tracer_options(tr, t);
t                 679 kernel/trace/trace.h int tracer_init(struct tracer *t, struct trace_array *tr);
t                 119 kernel/trace/trace_events_filter.c 	int t, s;
t                 121 kernel/trace/trace_events_filter.c 	t = prog[N].target;
t                 122 kernel/trace/trace_events_filter.c 	s = prog[t].target;
t                 123 kernel/trace/trace_events_filter.c 	prog[t].when_to_branch = invert;
t                 124 kernel/trace/trace_events_filter.c 	prog[t].target = N;
t                 115 kernel/trace/trace_events_trigger.c static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
t                 119 kernel/trace/trace_events_trigger.c 	if (t == SHOW_AVAILABLE_TRIGGERS) {
t                 123 kernel/trace/trace_events_trigger.c 	return seq_list_next(t, &event_file->triggers, pos);
t                 142 kernel/trace/trace_events_trigger.c static void trigger_stop(struct seq_file *m, void *t)
t                 490 kernel/trace/trace_functions_graph.c static void print_graph_abs_time(u64 t, struct trace_seq *s)
t                 494 kernel/trace/trace_functions_graph.c 	usecs_rem = do_div(t, NSEC_PER_SEC);
t                 498 kernel/trace/trace_functions_graph.c 			 (unsigned long)t, usecs_rem);
t                 173 kernel/trace/trace_mmiotrace.c 	unsigned long long t	= ns2usecs(iter->ts);
t                 174 kernel/trace/trace_mmiotrace.c 	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
t                 175 kernel/trace/trace_mmiotrace.c 	unsigned secs		= (unsigned long)t;
t                 218 kernel/trace/trace_mmiotrace.c 	unsigned long long t	= ns2usecs(iter->ts);
t                 219 kernel/trace/trace_mmiotrace.c 	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
t                 220 kernel/trace/trace_mmiotrace.c 	unsigned secs		= (unsigned long)t;
t                 252 kernel/trace/trace_mmiotrace.c 	unsigned long long t	= ns2usecs(iter->ts);
t                 253 kernel/trace/trace_mmiotrace.c 	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
t                 254 kernel/trace/trace_mmiotrace.c 	unsigned secs		= (unsigned long)t;
t                 570 kernel/trace/trace_output.c 	unsigned long long t;
t                 593 kernel/trace/trace_output.c 		t = ns2usecs(iter->ts);
t                 594 kernel/trace/trace_output.c 		usec_rem = do_div(t, USEC_PER_SEC);
t                 595 kernel/trace/trace_output.c 		secs = (unsigned long)t;
t                 272 kernel/trace/trace_probe.c static int parse_probe_vars(char *arg, const struct fetch_type *t,
t                 509 kernel/trace/trace_probe.c 				      const struct fetch_type *t,
t                 535 kernel/trace/trace_probe.c 	code->lshift = BYTES_TO_BITS(t->size) - (bw + bo);
t                 536 kernel/trace/trace_probe.c 	code->rshift = BYTES_TO_BITS(t->size) - bw;
t                 537 kernel/trace/trace_probe.c 	code->basesize = t->size;
t                 539 kernel/trace/trace_probe.c 	return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
t                 547 kernel/trace/trace_probe.c 	char *t, *t2, *t3;
t                 563 kernel/trace/trace_probe.c 	t = strchr(arg, ':');
t                 564 kernel/trace/trace_probe.c 	if (t) {
t                 565 kernel/trace/trace_probe.c 		*t = '\0';
t                 566 kernel/trace/trace_probe.c 		t2 = strchr(++t, '[');
t                 600 kernel/trace/trace_probe.c 		if (parg->count || (t && strcmp(t, "string")))
t                 604 kernel/trace/trace_probe.c 		parg->type = find_fetch_type(t);
t                 606 kernel/trace/trace_probe.c 		trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_TYPE);
t                 637 kernel/trace/trace_probe.c 			trace_probe_log_err(offset + (t ? (t - arg) : 0),
t                 683 kernel/trace/trace_probe.c 	if (t != NULL) {
t                 684 kernel/trace/trace_probe.c 		ret = __parse_bitfield_probe_arg(t, parg->type, &code);
t                 686 kernel/trace/trace_probe.c 			trace_probe_log_err(offset + t - arg, BAD_BITFIELD);
t                 695 kernel/trace/trace_probe.c 			trace_probe_log_err(offset + (t ? (t - arg) : 0),
t                 171 kernel/trace/trace_probe.h #define __DEFAULT_FETCH_TYPE(t) x##t
t                 172 kernel/trace/trace_probe.h #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
t                 176 kernel/trace/trace_probe.h #define __ADDR_FETCH_TYPE(t) u##t
t                 177 kernel/trace/trace_probe.h #define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t)
t                 565 kernel/tracepoint.c 	struct task_struct *p, *t;
t                 569 kernel/tracepoint.c 		for_each_process_thread(p, t) {
t                 570 kernel/tracepoint.c 			set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
t                 581 kernel/tracepoint.c 	struct task_struct *p, *t;
t                 586 kernel/tracepoint.c 		for_each_process_thread(p, t) {
t                 587 kernel/tracepoint.c 			clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
t                 646 kernel/umh.c   	struct ctl_table t;
t                 670 kernel/umh.c   	t = *table;
t                 671 kernel/umh.c   	t.data = &cap_array;
t                 677 kernel/umh.c   	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
t                1614 kernel/workqueue.c void delayed_work_timer_fn(struct timer_list *t)
t                1616 kernel/workqueue.c 	struct delayed_work *dwork = from_timer(dwork, t, timer);
t                1989 kernel/workqueue.c static void idle_worker_timeout(struct timer_list *t)
t                1991 kernel/workqueue.c 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
t                2037 kernel/workqueue.c static void pool_mayday_timeout(struct timer_list *t)
t                2039 kernel/workqueue.c 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
t                 225 lib/842/842_compress.c 	u8 *t = comp_ops[c];
t                 231 lib/842/842_compress.c 	pr_debug("template %x\n", t[4]);
t                 233 lib/842/842_compress.c 	ret = add_bits(p, t[4], OP_BITS);
t                 238 lib/842/842_compress.c 		pr_debug("op %x\n", t[i]);
t                 240 lib/842/842_compress.c 		switch (t[i] & OP_AMOUNT) {
t                 244 lib/842/842_compress.c 			else if (t[i] & OP_ACTION_INDEX)
t                 246 lib/842/842_compress.c 			else if (t[i] & OP_ACTION_DATA)
t                 252 lib/842/842_compress.c 			if (b == 2 && t[i] & OP_ACTION_DATA)
t                 256 lib/842/842_compress.c 			else if (t[i] & OP_ACTION_INDEX)
t                 258 lib/842/842_compress.c 			else if (t[i] & OP_ACTION_DATA)
t                 266 lib/842/842_compress.c 			if (t[i] & OP_ACTION_INDEX)
t                 268 lib/842/842_compress.c 			else if (t[i] & OP_ACTION_DATA)
t                 274 lib/842/842_compress.c 			inv = (b != 8) || !(t[i] & OP_ACTION_NOOP);
t                 286 lib/842/842_compress.c 			       c, i, t[0], t[1], t[2], t[3]);
t                 290 lib/842/842_compress.c 		b += t[i] & OP_AMOUNT;
t                 295 lib/842/842_compress.c 		       c, b, t[0], t[1], t[2], t[3]);
t                 300 lib/842/842_compress.c 		atomic_inc(&template_count[t[4]]);
t                 382 lib/842/842_compress.c 	u8 *t = comp_ops[c];
t                 389 lib/842/842_compress.c 		if (t[i] & OP_ACTION_INDEX) {
t                 390 lib/842/842_compress.c 			if (t[i] & OP_AMOUNT_2)
t                 392 lib/842/842_compress.c 			else if (t[i] & OP_AMOUNT_4)
t                 394 lib/842/842_compress.c 			else if (t[i] & OP_AMOUNT_8)
t                 402 lib/842/842_compress.c 		b += t[i] & OP_AMOUNT;
t                  85 lib/bch.c      #define GF_T(_p)               ((_p)->t)
t                 353 lib/bch.c      	const int t = GF_T(bch);
t                 361 lib/bch.c      	memset(syn, 0, 2*t*sizeof(*syn));
t                 369 lib/bch.c      			for (j = 0; j < 2*t; j += 2)
t                 377 lib/bch.c      	for (j = 0; j < t; j++)
t                 389 lib/bch.c      	const unsigned int t = GF_T(bch);
t                 397 lib/bch.c      	memset(pelp, 0, GF_POLY_SZ(2*t));
t                 398 lib/bch.c      	memset(elp, 0, GF_POLY_SZ(2*t));
t                 406 lib/bch.c      	for (i = 0; (i < t) && (elp->deg <= t); i++) {
t                 428 lib/bch.c      		if (i < t-1) {
t                 435 lib/bch.c      	return (elp->deg > t) ? -1 : (int)elp->deg;
t                 524 lib/bch.c      	unsigned int mask = 0xff, t, rows[16] = {0,};
t                 544 lib/bch.c      			t = ((rows[k] >> j)^rows[k+j]) & mask;
t                 545 lib/bch.c      			rows[k] ^= (t << j);
t                 546 lib/bch.c      			rows[k+j] ^= t;
t                1177 lib/bch.c      	const unsigned int t = GF_T(bch);
t                1183 lib/bch.c      	g = bch_alloc(GF_POLY_SZ(m*t), &err);
t                1185 lib/bch.c      	genpoly = bch_alloc(DIV_ROUND_UP(m*t+1, 32)*sizeof(*genpoly), &err);
t                1195 lib/bch.c      	for (i = 0; i < t; i++) {
t                1259 lib/bch.c      struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
t                1275 lib/bch.c      	if ((m != (CONFIG_BCH_CONST_M)) || (t != (CONFIG_BCH_CONST_T))) {
t                1290 lib/bch.c      	if (t > BCH_MAX_T)
t                1298 lib/bch.c      	if ((t < 1) || (m*t >= ((1 << m)-1)))
t                1311 lib/bch.c      	bch->t = t;
t                1313 lib/bch.c      	words  = DIV_ROUND_UP(m*t, 32);
t                1314 lib/bch.c      	bch->ecc_bytes = DIV_ROUND_UP(m*t, 8);
t                1321 lib/bch.c      	bch->syn       = bch_alloc(2*t*sizeof(*bch->syn), &err);
t                1322 lib/bch.c      	bch->cache     = bch_alloc(2*t*sizeof(*bch->cache), &err);
t                1323 lib/bch.c      	bch->elp       = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err);
t                1326 lib/bch.c      		bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err);
t                  45 lib/crc64.c    	size_t i, t;
t                  50 lib/crc64.c    		t = ((crc >> 56) ^ (*_p++)) & 0xFF;
t                  51 lib/crc64.c    		crc = crc64table[t] ^ (crc << 8);
t                  33 lib/crc8.c     	u8 t = msbit;
t                  38 lib/crc8.c     		t = (t << 1) ^ (t & msbit ? polynomial : 0);
t                  40 lib/crc8.c     			table[i+j] = table[j] ^ t;
t                  54 lib/crc8.c     	u8 t = 1;
t                  59 lib/crc8.c     		t = (t >> 1) ^ (t & 1 ? polynomial : 0);
t                  61 lib/crc8.c     			table[i+j] = table[j] ^ t;
t                 162 lib/decompress_bunzip2.c 		i, j, k, t, runPos, symCount, symTotal, nSelectors, *byteCount;
t                 195 lib/decompress_bunzip2.c 	t = get_bits(bd, 16);
t                 198 lib/decompress_bunzip2.c 		if (t&(1 << (15-i))) {
t                 246 lib/decompress_bunzip2.c 		t = get_bits(bd, 5)-1;
t                 249 lib/decompress_bunzip2.c 				if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
t                 265 lib/decompress_bunzip2.c 				t += (((k+1)&2)-1);
t                 269 lib/decompress_bunzip2.c 			length[i] = t+1;
t                 310 lib/decompress_bunzip2.c 			for (t = 0; t < symCount; t++)
t                 311 lib/decompress_bunzip2.c 				if (length[t] == i)
t                 312 lib/decompress_bunzip2.c 					hufGroup->permute[pp++] = t;
t                 323 lib/decompress_bunzip2.c 		pp = t = 0;
t                 338 lib/decompress_bunzip2.c 			base[i+1] = pp-(t += temp[i]);
t                 415 lib/decompress_bunzip2.c 				t = 0;
t                 427 lib/decompress_bunzip2.c 			t += (runPos << nextSym);
t                 441 lib/decompress_bunzip2.c 			if (dbufCount+t >= dbufSize)
t                 445 lib/decompress_bunzip2.c 			byteCount[uc] += t;
t                 446 lib/decompress_bunzip2.c 			while (t--)
t                 156 lib/decompress_unlzma.c 	uint32_t t = rc_is_bit_0_helper(rc, p);
t                 157 lib/decompress_unlzma.c 	return rc->code < t;
t                 144 lib/inflate.c      struct huft *t;     /* pointer to next level of table */
t                 328 lib/inflate.c  	struct huft **t,        /* result: starting table */
t                 384 lib/inflate.c      *t = (struct huft *)NULL;
t                 498 lib/inflate.c          *t = q + 1;             /* link to list for huft_free() */
t                 499 lib/inflate.c          *(t = &(q->v.t)) = (struct huft *)NULL;
t                 509 lib/inflate.c            r.v.t = q;            /* pointer to this table */
t                 568 lib/inflate.c  	struct huft *t         /* table to free */
t                 578 lib/inflate.c    p = t;
t                 581 lib/inflate.c      q = (--p)->v.t;
t                 601 lib/inflate.c    struct huft *t;       /* pointer to table entry */
t                 618 lib/inflate.c      if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
t                 622 lib/inflate.c          DUMPBITS(t->b)
t                 625 lib/inflate.c        } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
t                 626 lib/inflate.c      DUMPBITS(t->b)
t                 629 lib/inflate.c        slide[w++] = (uch)t->v.n;
t                 645 lib/inflate.c        n = t->v.n + ((unsigned)b & mask_bits[e]);
t                 650 lib/inflate.c        if ((e = (t = td + ((unsigned)b & md))->e) > 16)
t                 654 lib/inflate.c            DUMPBITS(t->b)
t                 657 lib/inflate.c          } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
t                 658 lib/inflate.c        DUMPBITS(t->b)
t                 660 lib/inflate.c        d = w - t->v.n - ((unsigned)b & mask_bits[e]);
t                1039 lib/inflate.c    unsigned t;           /* block type */
t                1058 lib/inflate.c    t = (unsigned)b & 3;
t                1067 lib/inflate.c    if (t == 2)
t                1069 lib/inflate.c    if (t == 0)
t                1071 lib/inflate.c    if (t == 1)
t                  19 lib/is_single_threaded.c 	struct task_struct *p, *t;
t                  36 lib/is_single_threaded.c 		for_each_thread(p, t) {
t                  37 lib/is_single_threaded.c 			if (unlikely(t->mm == mm))
t                  39 lib/is_single_threaded.c 			if (likely(t->mm))
t                  68 lib/kobject.c  	struct kobj_type *t = get_ktype(kobj);
t                  73 lib/kobject.c  	if (t && t->default_attrs) {
t                  74 lib/kobject.c  		for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
t                 300 lib/kobject.c  		char *t;
t                 302 lib/kobject.c  		t = kstrdup(s, GFP_KERNEL);
t                 304 lib/kobject.c  		if (!t)
t                 306 lib/kobject.c  		strreplace(t, '/', '!');
t                 307 lib/kobject.c  		s = t;
t                 666 lib/kobject.c  	struct kobj_type *t = get_ktype(kobj);
t                 672 lib/kobject.c  	if (t && !t->release)
t                 690 lib/kobject.c  	if (t && t->release) {
t                 693 lib/kobject.c  		t->release(kobj);
t                  53 lib/locking-selftest.c static struct ww_acquire_ctx t, t2;
t                1118 lib/locking-selftest.c 	I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
t                1124 lib/locking-selftest.c 	memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
t                1312 lib/locking-selftest.c 	WWAI(&t);
t                1313 lib/locking-selftest.c 	t.stamp++;
t                1315 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1322 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1328 lib/locking-selftest.c 	t2 = t;
t                1346 lib/locking-selftest.c 	WWAI(&t);
t                1398 lib/locking-selftest.c 	mutex_lock_nest_lock(&o.base, &t);
t                1405 lib/locking-selftest.c 	WWAI(&t);
t                1411 lib/locking-selftest.c 	WWAI(&t);
t                1413 lib/locking-selftest.c 	t.ww_class = NULL;
t                1415 lib/locking-selftest.c 	WWL(&o, &t);
t                1420 lib/locking-selftest.c 	WWAI(&t);
t                1421 lib/locking-selftest.c 	WWAD(&t);
t                1422 lib/locking-selftest.c 	WWAD(&t);
t                1423 lib/locking-selftest.c 	WWAF(&t);
t                1428 lib/locking-selftest.c 	WWAI(&t);
t                1429 lib/locking-selftest.c 	WWAD(&t);
t                1430 lib/locking-selftest.c 	WWAF(&t);
t                1431 lib/locking-selftest.c 	WWAF(&t);
t                1436 lib/locking-selftest.c 	WWAI(&t);
t                1437 lib/locking-selftest.c 	WWL(&o, &t);
t                1438 lib/locking-selftest.c 	WWAD(&t);
t                1439 lib/locking-selftest.c 	WWAF(&t);
t                1444 lib/locking-selftest.c 	WWAI(&t);
t                1445 lib/locking-selftest.c 	WWAD(&t);
t                1446 lib/locking-selftest.c 	WWL(&o, &t);
t                1458 lib/locking-selftest.c 	WWAI(&t);
t                1459 lib/locking-selftest.c 	WWL(&o, &t);
t                1460 lib/locking-selftest.c 	t.acquired = 0;
t                1462 lib/locking-selftest.c 	WWAF(&t);
t                1467 lib/locking-selftest.c 	WWAI(&t);
t                1469 lib/locking-selftest.c 	WWL(&o, &t);
t                1480 lib/locking-selftest.c 	WWAI(&t);
t                1481 lib/locking-selftest.c 	t2 = t;
t                1484 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1487 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1495 lib/locking-selftest.c 	WWL(&o2, &t);
t                1506 lib/locking-selftest.c 	WWAI(&t);
t                1507 lib/locking-selftest.c 	t2 = t;
t                1510 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1513 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1521 lib/locking-selftest.c 	ww_mutex_lock_slow(&o2, &t);
t                1532 lib/locking-selftest.c 	WWAI(&t);
t                1533 lib/locking-selftest.c 	t2 = t;
t                1536 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1539 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1546 lib/locking-selftest.c 	WWL(&o2, &t);
t                1557 lib/locking-selftest.c 	WWAI(&t);
t                1558 lib/locking-selftest.c 	t2 = t;
t                1561 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1564 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1571 lib/locking-selftest.c 	ww_mutex_lock_slow(&o2, &t);
t                1582 lib/locking-selftest.c 	WWAI(&t);
t                1583 lib/locking-selftest.c 	t2 = t;
t                1586 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1589 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1592 lib/locking-selftest.c 	ret = WWL(&o3, &t);
t                1603 lib/locking-selftest.c 	WWAI(&t);
t                1604 lib/locking-selftest.c 	t2 = t;
t                1607 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1610 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1613 lib/locking-selftest.c 	ww_mutex_lock_slow(&o3, &t);
t                1628 lib/locking-selftest.c 	WWAI(&t);
t                1629 lib/locking-selftest.c 	t2 = t;
t                1632 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1635 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1638 lib/locking-selftest.c 	ret = WWL(&o3, &t);
t                1654 lib/locking-selftest.c 	WWAI(&t);
t                1655 lib/locking-selftest.c 	t2 = t;
t                1658 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1661 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1664 lib/locking-selftest.c 	ww_mutex_lock_slow(&o3, &t);
t                1675 lib/locking-selftest.c 	WWAI(&t);
t                1676 lib/locking-selftest.c 	t2 = t;
t                1679 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1682 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1689 lib/locking-selftest.c 	ret = WWL(&o3, &t);
t                1700 lib/locking-selftest.c 	WWAI(&t);
t                1701 lib/locking-selftest.c 	t2 = t;
t                1704 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1707 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1714 lib/locking-selftest.c 	ww_mutex_lock_slow(&o3, &t);
t                1725 lib/locking-selftest.c 	WWAI(&t);
t                1727 lib/locking-selftest.c 	ww_mutex_lock_slow(&o, &t);
t                1734 lib/locking-selftest.c 	WWAI(&t);
t                1736 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1745 lib/locking-selftest.c 	WWAI(&t);
t                1747 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1760 lib/locking-selftest.c 	WWAI(&t);
t                1762 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1765 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1803 lib/locking-selftest.c 	WWAI(&t);
t                1805 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1829 lib/locking-selftest.c 	WWAI(&t);
t                1831 lib/locking-selftest.c 	ret = WWL(&o2, &t);
t                1878 lib/locking-selftest.c 	WWAI(&t);
t                1880 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                1887 lib/locking-selftest.c 	ret = WWL(&o, &t);
t                  41 lib/lzo/lzo1x_compress.c 		size_t t, m_len, m_off;
t                 108 lib/lzo/lzo1x_compress.c 			t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
t                 109 lib/lzo/lzo1x_compress.c 			m_pos = in + dict[t];
t                 110 lib/lzo/lzo1x_compress.c 			dict[t] = (lzo_dict_t) (ip - in);
t                 117 lib/lzo/lzo1x_compress.c 		t = ip - ii;
t                 118 lib/lzo/lzo1x_compress.c 		if (t != 0) {
t                 119 lib/lzo/lzo1x_compress.c 			if (t <= 3) {
t                 120 lib/lzo/lzo1x_compress.c 				op[*state_offset] |= t;
t                 122 lib/lzo/lzo1x_compress.c 				op += t;
t                 123 lib/lzo/lzo1x_compress.c 			} else if (t <= 16) {
t                 124 lib/lzo/lzo1x_compress.c 				*op++ = (t - 3);
t                 127 lib/lzo/lzo1x_compress.c 				op += t;
t                 129 lib/lzo/lzo1x_compress.c 				if (t <= 18) {
t                 130 lib/lzo/lzo1x_compress.c 					*op++ = (t - 3);
t                 132 lib/lzo/lzo1x_compress.c 					size_t tt = t - 18;
t                 145 lib/lzo/lzo1x_compress.c 					t -= 16;
t                 146 lib/lzo/lzo1x_compress.c 				} while (t >= 16);
t                 147 lib/lzo/lzo1x_compress.c 				if (t > 0) do {
t                 149 lib/lzo/lzo1x_compress.c 				} while (--t > 0);
t                 312 lib/lzo/lzo1x_compress.c 	size_t t = 0;
t                 331 lib/lzo/lzo1x_compress.c 		if ((ll_end + ((t + ll) >> 5)) <= ll_end)
t                 335 lib/lzo/lzo1x_compress.c 		t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem,
t                 341 lib/lzo/lzo1x_compress.c 	t += l;
t                 343 lib/lzo/lzo1x_compress.c 	if (t > 0) {
t                 344 lib/lzo/lzo1x_compress.c 		const unsigned char *ii = in + in_len - t;
t                 346 lib/lzo/lzo1x_compress.c 		if (op == data_start && t <= 238) {
t                 347 lib/lzo/lzo1x_compress.c 			*op++ = (17 + t);
t                 348 lib/lzo/lzo1x_compress.c 		} else if (t <= 3) {
t                 349 lib/lzo/lzo1x_compress.c 			op[state_offset] |= t;
t                 350 lib/lzo/lzo1x_compress.c 		} else if (t <= 18) {
t                 351 lib/lzo/lzo1x_compress.c 			*op++ = (t - 3);
t                 353 lib/lzo/lzo1x_compress.c 			size_t tt = t - 18;
t                 361 lib/lzo/lzo1x_compress.c 		if (t >= 16) do {
t                 366 lib/lzo/lzo1x_compress.c 			t -= 16;
t                 367 lib/lzo/lzo1x_compress.c 		} while (t >= 16);
t                 368 lib/lzo/lzo1x_compress.c 		if (t > 0) do {
t                 370 lib/lzo/lzo1x_compress.c 		} while (--t > 0);
t                  44 lib/lzo/lzo1x_decompress_safe.c 	size_t t, next;
t                  66 lib/lzo/lzo1x_decompress_safe.c 		t = *ip++ - 17;
t                  67 lib/lzo/lzo1x_decompress_safe.c 		if (t < 4) {
t                  68 lib/lzo/lzo1x_decompress_safe.c 			next = t;
t                  75 lib/lzo/lzo1x_decompress_safe.c 		t = *ip++;
t                  76 lib/lzo/lzo1x_decompress_safe.c 		if (t < 16) {
t                  78 lib/lzo/lzo1x_decompress_safe.c 				if (unlikely(t == 0)) {
t                  91 lib/lzo/lzo1x_decompress_safe.c 					t += offset + 15 + *ip++;
t                  93 lib/lzo/lzo1x_decompress_safe.c 				t += 3;
t                  96 lib/lzo/lzo1x_decompress_safe.c 				if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
t                  97 lib/lzo/lzo1x_decompress_safe.c 					const unsigned char *ie = ip + t;
t                  98 lib/lzo/lzo1x_decompress_safe.c 					unsigned char *oe = op + t;
t                 112 lib/lzo/lzo1x_decompress_safe.c 					NEED_OP(t);
t                 113 lib/lzo/lzo1x_decompress_safe.c 					NEED_IP(t + 3);
t                 116 lib/lzo/lzo1x_decompress_safe.c 					} while (--t > 0);
t                 121 lib/lzo/lzo1x_decompress_safe.c 				next = t & 3;
t                 123 lib/lzo/lzo1x_decompress_safe.c 				m_pos -= t >> 2;
t                 132 lib/lzo/lzo1x_decompress_safe.c 				next = t & 3;
t                 134 lib/lzo/lzo1x_decompress_safe.c 				m_pos -= t >> 2;
t                 136 lib/lzo/lzo1x_decompress_safe.c 				t = 3;
t                 138 lib/lzo/lzo1x_decompress_safe.c 		} else if (t >= 64) {
t                 139 lib/lzo/lzo1x_decompress_safe.c 			next = t & 3;
t                 141 lib/lzo/lzo1x_decompress_safe.c 			m_pos -= (t >> 2) & 7;
t                 143 lib/lzo/lzo1x_decompress_safe.c 			t = (t >> 5) - 1 + (3 - 1);
t                 144 lib/lzo/lzo1x_decompress_safe.c 		} else if (t >= 32) {
t                 145 lib/lzo/lzo1x_decompress_safe.c 			t = (t & 31) + (3 - 1);
t                 146 lib/lzo/lzo1x_decompress_safe.c 			if (unlikely(t == 2)) {
t                 159 lib/lzo/lzo1x_decompress_safe.c 				t += offset + 31 + *ip++;
t                 171 lib/lzo/lzo1x_decompress_safe.c 			    ((t & 0xf8) == 0x18) &&
t                 174 lib/lzo/lzo1x_decompress_safe.c 				t &= 7;
t                 175 lib/lzo/lzo1x_decompress_safe.c 				t |= ip[2] << 3;
t                 176 lib/lzo/lzo1x_decompress_safe.c 				t += MIN_ZERO_RUN_LENGTH;
t                 177 lib/lzo/lzo1x_decompress_safe.c 				NEED_OP(t);
t                 178 lib/lzo/lzo1x_decompress_safe.c 				memset(op, 0, t);
t                 179 lib/lzo/lzo1x_decompress_safe.c 				op += t;
t                 185 lib/lzo/lzo1x_decompress_safe.c 				m_pos -= (t & 8) << 11;
t                 186 lib/lzo/lzo1x_decompress_safe.c 				t = (t & 7) + (3 - 1);
t                 187 lib/lzo/lzo1x_decompress_safe.c 				if (unlikely(t == 2)) {
t                 200 lib/lzo/lzo1x_decompress_safe.c 					t += offset + 7 + *ip++;
t                 215 lib/lzo/lzo1x_decompress_safe.c 			unsigned char *oe = op + t;
t                 216 lib/lzo/lzo1x_decompress_safe.c 			if (likely(HAVE_OP(t + 15))) {
t                 234 lib/lzo/lzo1x_decompress_safe.c 				NEED_OP(t);
t                 242 lib/lzo/lzo1x_decompress_safe.c 			unsigned char *oe = op + t;
t                 243 lib/lzo/lzo1x_decompress_safe.c 			NEED_OP(t);
t                 254 lib/lzo/lzo1x_decompress_safe.c 		t = next;
t                 258 lib/lzo/lzo1x_decompress_safe.c 			op += t;
t                 259 lib/lzo/lzo1x_decompress_safe.c 			ip += t;
t                 263 lib/lzo/lzo1x_decompress_safe.c 			NEED_IP(t + 3);
t                 264 lib/lzo/lzo1x_decompress_safe.c 			NEED_OP(t);
t                 265 lib/lzo/lzo1x_decompress_safe.c 			while (t > 0) {
t                 267 lib/lzo/lzo1x_decompress_safe.c 				t--;
t                 274 lib/lzo/lzo1x_decompress_safe.c 	return (t != 3       ? LZO_E_ERROR :
t                 172 lib/math/div64.c 	s64 quot, t;
t                 175 lib/math/div64.c 	t = (dividend ^ divisor) >> 63;
t                 177 lib/math/div64.c 	return (quot ^ t) - t;
t                  42 lib/math/rational.c 		unsigned long t, a;
t                  50 lib/math/rational.c 		t = d;
t                  53 lib/math/rational.c 		n = t;
t                  54 lib/math/rational.c 		t = n0 + a * n1;
t                  56 lib/math/rational.c 		n1 = t;
t                  57 lib/math/rational.c 		t = d0 + a * d1;
t                  59 lib/math/rational.c 		d1 = t;
t                  40 lib/mpi/longlong.h #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
t                  41 lib/mpi/longlong.h #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
t                  11 lib/muldi3.c   #define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
t                  12 lib/muldi3.c   #define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
t                  32 lib/plist.c    static void plist_check_prev_next(struct list_head *t, struct list_head *p,
t                  39 lib/plist.c    			 t, t->next, t->prev,
t                  35 lib/reed_solomon/decode_rs.c 	uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1);
t                 152 lib/reed_solomon/decode_rs.c 			t[0] = lambda[0];
t                 155 lib/reed_solomon/decode_rs.c 					t[i + 1] = lambda[i + 1] ^
t                 159 lib/reed_solomon/decode_rs.c 					t[i + 1] = lambda[i + 1];
t                 177 lib/reed_solomon/decode_rs.c 			memcpy(lambda, t, (nroots + 1) * sizeof(t[0]));
t                  52 lib/sha1.c     #define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
t                  53 lib/sha1.c     #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
t                  55 lib/sha1.c     #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
t                  56 lib/sha1.c     	__u32 TEMP = input(t); setW(t, TEMP); \
t                  60 lib/sha1.c     #define T_0_15(t, A, B, C, D, E)  SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
t                  61 lib/sha1.c     #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
t                  62 lib/sha1.c     #define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
t                  63 lib/sha1.c     #define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
t                  64 lib/sha1.c     #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) ,  0xca62c1d6, A, B, C, D, E )
t                  61 lib/sort.c     		u32 t = *(u32 *)(a + (n -= 4));
t                  63 lib/sort.c     		*(u32 *)(b + n) = t;
t                  87 lib/sort.c     		u64 t = *(u64 *)(a + (n -= 8));
t                  89 lib/sort.c     		*(u64 *)(b + n) = t;
t                  92 lib/sort.c     		u32 t = *(u32 *)(a + (n -= 4));
t                  94 lib/sort.c     		*(u32 *)(b + n) = t;
t                  96 lib/sort.c     		t = *(u32 *)(a + (n -= 4));
t                  98 lib/sort.c     		*(u32 *)(b + n) = t;
t                 114 lib/sort.c     		char t = ((char *)a)[--n];
t                 116 lib/sort.c     		((char *)b)[n] = t;
t                  31 lib/test-kstrtox.c 		const struct test_fail *t = &test[i];			\
t                  36 lib/test-kstrtox.c 		rv = fn(t->str, t->base, &tmp);				\
t                  39 lib/test-kstrtox.c 				t->str, t->base, rv, tmp);		\
t                  50 lib/test-kstrtox.c 		const typeof(test[0]) *t = &test[i];			\
t                  54 lib/test-kstrtox.c 		rv = fn(t->str, t->base, &res);				\
t                  57 lib/test-kstrtox.c 				t->str, t->base, t->expected_res, rv);	\
t                  60 lib/test-kstrtox.c 		if (res != t->expected_res) {				\
t                  62 lib/test-kstrtox.c 				t->str, t->base, t->expected_res, res);	\
t                  17 lib/test_overflow.c #define DEFINE_TEST_ARRAY(t)			\
t                  18 lib/test_overflow.c 	static const struct test_ ## t {	\
t                  19 lib/test_overflow.c 		t a, b;				\
t                  20 lib/test_overflow.c 		t sum, diff, prod;		\
t                  22 lib/test_overflow.c 	} t ## _tests[] __initconst
t                 218 lib/test_overflow.c #define check_one_op(t, fmt, op, sym, a, b, r, of) do {		\
t                 219 lib/test_overflow.c 	t _r;							\
t                 226 lib/test_overflow.c 			a, b, of ? "" : " not", #t);		\
t                 232 lib/test_overflow.c 			a, b, r, _r, #t);			\
t                 237 lib/test_overflow.c #define DEFINE_TEST_FUNC(t, fmt)					\
t                 238 lib/test_overflow.c static int __init do_test_ ## t(const struct test_ ## t *p)		\
t                 242 lib/test_overflow.c 	check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of);	\
t                 243 lib/test_overflow.c 	check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of);	\
t                 244 lib/test_overflow.c 	check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of);	\
t                 245 lib/test_overflow.c 	check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of);	\
t                 246 lib/test_overflow.c 	check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of);	\
t                 251 lib/test_overflow.c static int __init test_ ## t ## _overflow(void) {			\
t                 255 lib/test_overflow.c 	pr_info("%-3s: %zu arithmetic tests\n", #t,			\
t                 256 lib/test_overflow.c 		ARRAY_SIZE(t ## _tests));				\
t                 257 lib/test_overflow.c 	for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i)			\
t                 258 lib/test_overflow.c 		err |= do_test_ ## t(&t ## _tests[i]);			\
t                 296 lib/test_overflow.c #define TEST_ONE_SHIFT(a, s, t, expect, of) ({				\
t                 300 lib/test_overflow.c 	t __e = (expect);						\
t                 301 lib/test_overflow.c 	t __d;								\
t                 305 lib/test_overflow.c 			#t, #a, #s, of ? "" : " not");			\
t                 309 lib/test_overflow.c 			#t, #a, #s, #expect);				\
t                 310 lib/test_overflow.c 		if ((t)-1 < 0)						\
t                 317 lib/test_overflow.c 		pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s,	\
t                 385 lib/test_vmalloc.c 	struct test_driver *t = private;
t                 391 lib/test_vmalloc.c 	if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
t                 392 lib/test_vmalloc.c 		pr_err("Failed to set affinity to %d CPU\n", t->cpu);
t                 405 lib/test_vmalloc.c 	t->start = get_cycles();
t                 418 lib/test_vmalloc.c 				per_cpu_test_data[t->cpu][index].test_passed++;
t                 420 lib/test_vmalloc.c 				per_cpu_test_data[t->cpu][index].test_failed++;
t                 429 lib/test_vmalloc.c 		per_cpu_test_data[t->cpu][index].time = delta;
t                 431 lib/test_vmalloc.c 	t->stop = get_cycles();
t                 482 lib/test_vmalloc.c 		struct test_driver *t = &per_cpu_test_driver[cpu];
t                 484 lib/test_vmalloc.c 		t->cpu = cpu;
t                 485 lib/test_vmalloc.c 		t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
t                 487 lib/test_vmalloc.c 		if (!IS_ERR(t->task))
t                 510 lib/test_vmalloc.c 		struct test_driver *t = &per_cpu_test_driver[cpu];
t                 513 lib/test_vmalloc.c 		if (!IS_ERR(t->task))
t                 514 lib/test_vmalloc.c 			kthread_stop(t->task);
t                 530 lib/test_vmalloc.c 			cpu, t->stop - t->start);
t                 126 lib/ts_fsm.c   static inline int match_token(struct ts_fsm_token *t, u8 d)
t                 128 lib/ts_fsm.c   	if (t->type)
t                 129 lib/ts_fsm.c   		return (token_lookup_tbl[d] & t->type) != 0;
t                 131 lib/ts_fsm.c   		return t->value == d;
t                 272 lib/ts_fsm.c   		struct ts_fsm_token *t = &tokens[i];
t                 274 lib/ts_fsm.c   		if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX)
t                 277 lib/ts_fsm.c   		if (t->recur == TS_FSM_HEAD_IGNORE &&
t                 292 lib/ts_fsm.c   		struct ts_fsm_token *t = &fsm->tokens[i];
t                 293 lib/ts_fsm.c   		t->type = token_map[t->type];
t                 170 lib/vdso/gettimeofday.c 	time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
t                 173 lib/vdso/gettimeofday.c 		*time = t;
t                 175 lib/vdso/gettimeofday.c 	return t;
t                  32 lib/win_minmax.c 	u32 dt = val->t - m->s[0].t;
t                  45 lib/win_minmax.c 		if (unlikely(val->t - m->s[0].t > win)) {
t                  50 lib/win_minmax.c 	} else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) {
t                  56 lib/win_minmax.c 	} else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) {
t                  67 lib/win_minmax.c u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas)
t                  69 lib/win_minmax.c 	struct minmax_sample val = { .t = t, .v = meas };
t                  72 lib/win_minmax.c 	    unlikely(val.t - m->s[2].t > win))	  /* nothing left in window? */
t                  73 lib/win_minmax.c 		return minmax_reset(m, t, meas);  /* forget earlier samples */
t                  85 lib/win_minmax.c u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas)
t                  87 lib/win_minmax.c 	struct minmax_sample val = { .t = t, .v = meas };
t                  90 lib/win_minmax.c 	    unlikely(val.t - m->s[2].t > win))	  /* nothing left in window? */
t                  91 lib/win_minmax.c 		return minmax_reset(m, t, meas);  /* forget earlier samples */
t                 262 mm/hugetlb.c   static long region_add(struct resv_map *resv, long f, long t)
t                 280 mm/hugetlb.c   	if (&rg->link == head || t < rg->from) {
t                 289 mm/hugetlb.c   		nrg->to = t;
t                 292 mm/hugetlb.c   		add += t - f;
t                 305 mm/hugetlb.c   		if (rg->from > t)
t                 311 mm/hugetlb.c   		if (rg->to > t)
t                 312 mm/hugetlb.c   			t = rg->to;
t                 326 mm/hugetlb.c   	add += t - nrg->to;		/* Added to end of region */
t                 327 mm/hugetlb.c   	nrg->to = t;
t                 358 mm/hugetlb.c   static long region_chg(struct resv_map *resv, long f, long t)
t                 401 mm/hugetlb.c   	if (&rg->link == head || t < rg->from) {
t                 416 mm/hugetlb.c   		chg = t - f;
t                 423 mm/hugetlb.c   	chg = t - f;
t                 429 mm/hugetlb.c   		if (rg->from > t)
t                 435 mm/hugetlb.c   		if (rg->to > t) {
t                 436 mm/hugetlb.c   			chg += rg->to - t;
t                 437 mm/hugetlb.c   			t = rg->to;
t                 463 mm/hugetlb.c   static void region_abort(struct resv_map *resv, long f, long t)
t                 485 mm/hugetlb.c   static long region_del(struct resv_map *resv, long f, long t)
t                 505 mm/hugetlb.c   		if (rg->from >= t)
t                 508 mm/hugetlb.c   		if (f > rg->from && t < rg->to) { /* Must split region */
t                 530 mm/hugetlb.c   			del += t - f;
t                 533 mm/hugetlb.c   			nrg->from = t;
t                 545 mm/hugetlb.c   		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
t                 553 mm/hugetlb.c   			del += t - rg->from;
t                 554 mm/hugetlb.c   			rg->from = t;
t                 592 mm/hugetlb.c   static long region_count(struct resv_map *resv, long f, long t)
t                 606 mm/hugetlb.c   		if (rg->from >= t)
t                 610 mm/hugetlb.c   		seg_to = min(rg->to, t);
t                4024 mm/memcontrol.c 	struct mem_cgroup_threshold_ary *t;
t                4030 mm/memcontrol.c 		t = rcu_dereference(memcg->thresholds.primary);
t                4032 mm/memcontrol.c 		t = rcu_dereference(memcg->memsw_thresholds.primary);
t                4034 mm/memcontrol.c 	if (!t)
t                4044 mm/memcontrol.c 	i = t->current_threshold;
t                4052 mm/memcontrol.c 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
t                4053 mm/memcontrol.c 		eventfd_signal(t->entries[i].eventfd, 1);
t                4064 mm/memcontrol.c 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
t                4065 mm/memcontrol.c 		eventfd_signal(t->entries[i].eventfd, 1);
t                4068 mm/memcontrol.c 	t->current_threshold = i - 1;
t                 211 mm/memory-failure.c 	struct task_struct *t = tk->tsk;
t                 216 mm/memory-failure.c 		pfn, t->comm, t->pid);
t                 218 mm/memory-failure.c 	if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
t                 229 mm/memory-failure.c 				      addr_lsb, t);  /* synchronous? */
t                 233 mm/memory-failure.c 			t->comm, t->pid, ret);
t                 405 mm/memory-failure.c 	struct task_struct *t;
t                 407 mm/memory-failure.c 	for_each_thread(tsk, t)
t                 408 mm/memory-failure.c 		if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
t                 409 mm/memory-failure.c 			return t;
t                 422 mm/memory-failure.c 	struct task_struct *t;
t                 427 mm/memory-failure.c 	t = find_early_kill_thread(tsk);
t                 428 mm/memory-failure.c 	if (t)
t                 429 mm/memory-failure.c 		return t;
t                 454 mm/memory-failure.c 		struct task_struct *t = task_early_kill(tsk, force_early);
t                 456 mm/memory-failure.c 		if (!t)
t                 463 mm/memory-failure.c 			if (vma->vm_mm == t->mm)
t                 464 mm/memory-failure.c 				add_to_kill(t, page, vma, to_kill, tkc);
t                 485 mm/memory-failure.c 		struct task_struct *t = task_early_kill(tsk, force_early);
t                 487 mm/memory-failure.c 		if (!t)
t                 498 mm/memory-failure.c 			if (vma->vm_mm == t->mm)
t                 499 mm/memory-failure.c 				add_to_kill(t, page, vma, to_kill, tkc);
t                1331 mm/mempolicy.c 	unsigned long t;
t                1359 mm/mempolicy.c 			if (get_user(t, nmask + k))
t                1362 mm/mempolicy.c 				if (t & endmask)
t                1364 mm/mempolicy.c 			} else if (t)
t                1375 mm/mempolicy.c 		if (get_user(t, nmask + nlongs - 1))
t                1377 mm/mempolicy.c 		if (t & valid_mask)
t                 134 mm/oom_kill.c  	struct task_struct *t;
t                 138 mm/oom_kill.c  	for_each_thread(p, t) {
t                 139 mm/oom_kill.c  		task_lock(t);
t                 140 mm/oom_kill.c  		if (likely(t->mm))
t                 142 mm/oom_kill.c  		task_unlock(t);
t                 144 mm/oom_kill.c  	t = NULL;
t                 148 mm/oom_kill.c  	return t;
t                 492 mm/oom_kill.c  	struct task_struct *t;
t                 494 mm/oom_kill.c  	for_each_thread(p, t) {
t                 495 mm/oom_kill.c  		struct mm_struct *t_mm = READ_ONCE(t->mm);
t                 628 mm/page-writeback.c static void writeout_period(struct timer_list *t)
t                 630 mm/page-writeback.c 	struct wb_domain *dom = from_timer(dom, t, period_timer);
t                1423 mm/page-writeback.c 	unsigned long t;
t                1432 mm/page-writeback.c 	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
t                1433 mm/page-writeback.c 	t++;
t                1435 mm/page-writeback.c 	return min_t(unsigned long, t, MAX_PAUSE);
t                1446 mm/page-writeback.c 	long t;		/* target pause */
t                1451 mm/page-writeback.c 	t = max(1, HZ / 100);
t                1460 mm/page-writeback.c 		t += (hi - lo) * (10 * HZ) / 1024;
t                1480 mm/page-writeback.c 	t = min(t, 1 + max_pause / 2);
t                1481 mm/page-writeback.c 	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
t                1492 mm/page-writeback.c 		t = max_pause;
t                1493 mm/page-writeback.c 		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
t                1496 mm/page-writeback.c 			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
t                1502 mm/page-writeback.c 		t = max_pause;
t                1503 mm/page-writeback.c 		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
t                1510 mm/page-writeback.c 	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
t                1997 mm/page-writeback.c void laptop_mode_timer_fn(struct timer_list *t)
t                2000 mm/page-writeback.c 		from_timer(backing_dev_info, t, laptop_mode_wb_timer);
t                1734 mm/page_alloc.c 		unsigned long t;
t                1739 mm/page_alloc.c 		t = min(mo_pfn, *end_pfn);
t                1740 mm/page_alloc.c 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
t                1752 mm/page_alloc.c 		unsigned long t;
t                1757 mm/page_alloc.c 		t = min(mo_pfn, epfn);
t                1758 mm/page_alloc.c 		deferred_free_pages(spfn, t);
t                2972 mm/page_alloc.c 	unsigned int order, t;
t                2997 mm/page_alloc.c 	for_each_migratetype_order(order, t) {
t                2999 mm/page_alloc.c 				&zone->free_area[order].free_list[t], lru) {
t                6021 mm/page_alloc.c 	unsigned int order, t;
t                6022 mm/page_alloc.c 	for_each_migratetype_order(order, t) {
t                6023 mm/page_alloc.c 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
t                 583 mm/slub.c      static void print_track(const char *s, struct track *t, unsigned long pr_time)
t                 585 mm/slub.c      	if (!t->addr)
t                 589 mm/slub.c      	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
t                 594 mm/slub.c      			if (t->addrs[i])
t                 595 mm/slub.c      				pr_err("\t%pS\n", (void *)t->addrs[i]);
t                1854 mm/slub.c      		void *t;
t                1859 mm/slub.c      		t = acquire_slab(s, n, page, object == NULL, &objects);
t                1860 mm/slub.c      		if (!t)
t                1867 mm/slub.c      			object = t;
t                3996 mm/slub.c      	struct page *t;
t                4016 mm/slub.c      		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
t                4042 mm/slub.c      		list_for_each_entry_safe(page, t, &discard, slab_list)
t                4512 mm/slub.c      static void free_loc_track(struct loc_track *t)
t                4514 mm/slub.c      	if (t->max)
t                4515 mm/slub.c      		free_pages((unsigned long)t->loc,
t                4516 mm/slub.c      			get_order(sizeof(struct location) * t->max));
t                4519 mm/slub.c      static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
t                4530 mm/slub.c      	if (t->count) {
t                4531 mm/slub.c      		memcpy(l, t->loc, sizeof(struct location) * t->count);
t                4532 mm/slub.c      		free_loc_track(t);
t                4534 mm/slub.c      	t->max = max;
t                4535 mm/slub.c      	t->loc = l;
t                4539 mm/slub.c      static int add_location(struct loc_track *t, struct kmem_cache *s,
t                4548 mm/slub.c      	end = t->count;
t                4560 mm/slub.c      		caddr = t->loc[pos].addr;
t                4563 mm/slub.c      			l = &t->loc[pos];
t                4593 mm/slub.c      	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
t                4596 mm/slub.c      	l = t->loc + pos;
t                4597 mm/slub.c      	if (pos < t->count)
t                4599 mm/slub.c      			(t->count - pos) * sizeof(struct location));
t                4600 mm/slub.c      	t->count++;
t                4615 mm/slub.c      static void process_slab(struct loc_track *t, struct kmem_cache *s,
t                4627 mm/slub.c      			add_location(t, s, get_track(s, p, alloc));
t                4635 mm/slub.c      	struct loc_track t = { 0, 0, NULL };
t                4640 mm/slub.c      	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
t                4657 mm/slub.c      			process_slab(&t, s, page, alloc, map);
t                4659 mm/slub.c      			process_slab(&t, s, page, alloc, map);
t                4663 mm/slub.c      	for (i = 0; i < t.count; i++) {
t                4664 mm/slub.c      		struct location *l = &t.loc[i];
t                4707 mm/slub.c      	free_loc_track(&t);
t                4709 mm/slub.c      	if (!t.count)
t                 298 mm/util.c      	struct task_struct * __maybe_unused t = current;
t                 300 mm/util.c      	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
t                  56 mm/vmalloc.c   	struct llist_node *t, *llnode;
t                  58 mm/vmalloc.c   	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
t                 322 mm/vmstat.c    	long t;
t                 326 mm/vmstat.c    	t = __this_cpu_read(pcp->stat_threshold);
t                 328 mm/vmstat.c    	if (unlikely(x > t || x < -t)) {
t                 342 mm/vmstat.c    	long t;
t                 346 mm/vmstat.c    	t = __this_cpu_read(pcp->stat_threshold);
t                 348 mm/vmstat.c    	if (unlikely(x > t || x < -t)) {
t                 383 mm/vmstat.c    	s8 v, t;
t                 386 mm/vmstat.c    	t = __this_cpu_read(pcp->stat_threshold);
t                 387 mm/vmstat.c    	if (unlikely(v > t)) {
t                 388 mm/vmstat.c    		s8 overstep = t >> 1;
t                 399 mm/vmstat.c    	s8 v, t;
t                 402 mm/vmstat.c    	t = __this_cpu_read(pcp->stat_threshold);
t                 403 mm/vmstat.c    	if (unlikely(v > t)) {
t                 404 mm/vmstat.c    		s8 overstep = t >> 1;
t                 427 mm/vmstat.c    	s8 v, t;
t                 430 mm/vmstat.c    	t = __this_cpu_read(pcp->stat_threshold);
t                 431 mm/vmstat.c    	if (unlikely(v < - t)) {
t                 432 mm/vmstat.c    		s8 overstep = t >> 1;
t                 443 mm/vmstat.c    	s8 v, t;
t                 446 mm/vmstat.c    	t = __this_cpu_read(pcp->stat_threshold);
t                 447 mm/vmstat.c    	if (unlikely(v < - t)) {
t                 448 mm/vmstat.c    		s8 overstep = t >> 1;
t                 485 mm/vmstat.c    	long o, n, t, z;
t                 500 mm/vmstat.c    		t = this_cpu_read(pcp->stat_threshold);
t                 505 mm/vmstat.c    		if (n > t || n < -t) {
t                 506 mm/vmstat.c    			int os = overstep_mode * (t >> 1) ;
t                 542 mm/vmstat.c    	long o, n, t, z;
t                 557 mm/vmstat.c    		t = this_cpu_read(pcp->stat_threshold);
t                 562 mm/vmstat.c    		if (n > t || n < -t) {
t                 563 mm/vmstat.c    			int os = overstep_mode * (t >> 1) ;
t                  69 net/6lowpan/debugfs.c 	struct lowpan_iphc_ctx_table *t =
t                  75 net/6lowpan/debugfs.c 	spin_lock_bh(&t->lock);
t                  77 net/6lowpan/debugfs.c 	spin_unlock_bh(&t->lock);
t                  85 net/6lowpan/debugfs.c 	struct lowpan_iphc_ctx_table *t =
t                  88 net/6lowpan/debugfs.c 	spin_lock_bh(&t->lock);
t                  90 net/6lowpan/debugfs.c 	spin_unlock_bh(&t->lock);
t                 100 net/6lowpan/debugfs.c 	struct lowpan_iphc_ctx_table *t =
t                 103 net/6lowpan/debugfs.c 	spin_lock_bh(&t->lock);
t                 113 net/6lowpan/debugfs.c 	spin_unlock_bh(&t->lock);
t                 130 net/6lowpan/debugfs.c 	struct lowpan_iphc_ctx_table *t =
t                 149 net/6lowpan/debugfs.c 	spin_lock_bh(&t->lock);
t                 152 net/6lowpan/debugfs.c 	spin_unlock_bh(&t->lock);
t                 194 net/6lowpan/debugfs.c 	struct lowpan_iphc_ctx_table *t = file->private;
t                 200 net/6lowpan/debugfs.c 	spin_lock_bh(&t->lock);
t                 202 net/6lowpan/debugfs.c 		if (!lowpan_iphc_ctx_is_active(&t->table[i]))
t                 205 net/6lowpan/debugfs.c 		seq_printf(file, "%3d|%39pI6c/%-3d|%d\n", t->table[i].id,
t                 206 net/6lowpan/debugfs.c 			   &t->table[i].pfx, t->table[i].plen,
t                 207 net/6lowpan/debugfs.c 			   lowpan_iphc_ctx_is_compression(&t->table[i]));
t                 209 net/6lowpan/debugfs.c 	spin_unlock_bh(&t->lock);
t                 401 net/802/garp.c static void garp_join_timer(struct timer_list *t)
t                 403 net/802/garp.c 	struct garp_applicant *app = from_timer(app, t, join_timer);
t                 586 net/802/mrp.c  static void mrp_join_timer(struct timer_list *t)
t                 588 net/802/mrp.c  	struct mrp_applicant *app = from_timer(app, t, join_timer);
t                 605 net/802/mrp.c  static void mrp_periodic_timer(struct timer_list *t)
t                 607 net/802/mrp.c  	struct mrp_applicant *app = from_timer(app, t, periodic_timer);
t                  97 net/9p/mod.c   	struct p9_trans_module *t, *found = NULL;
t                 101 net/9p/mod.c   	list_for_each_entry(t, &v9fs_trans_list, list)
t                 102 net/9p/mod.c   		if (strcmp(t->name, s) == 0 &&
t                 103 net/9p/mod.c   		    try_module_get(t->owner)) {
t                 104 net/9p/mod.c   			found = t;
t                 120 net/9p/mod.c   	struct p9_trans_module *t, *found = NULL;
t                 124 net/9p/mod.c   	list_for_each_entry(t, &v9fs_trans_list, list)
t                 125 net/9p/mod.c   		if (t->def && try_module_get(t->owner)) {
t                 126 net/9p/mod.c   			found = t;
t                 131 net/9p/mod.c   		list_for_each_entry(t, &v9fs_trans_list, list)
t                 132 net/9p/mod.c   			if (try_module_get(t->owner)) {
t                 133 net/9p/mod.c   				found = t;
t                 254 net/appletalk/aarp.c 	struct aarp_entry *t;
t                 259 net/appletalk/aarp.c 			t = *n;
t                 261 net/appletalk/aarp.c 			__aarp_expire(t);
t                 273 net/appletalk/aarp.c 	struct aarp_entry *t;
t                 278 net/appletalk/aarp.c 			t = *n;
t                 280 net/appletalk/aarp.c 			__aarp_expire(t);
t                 295 net/appletalk/aarp.c 	struct aarp_entry *t;
t                 299 net/appletalk/aarp.c 			t = *n;
t                 301 net/appletalk/aarp.c 			__aarp_expire(t);
t                 353 net/appletalk/aarp.c 	struct aarp_entry *t;
t                 356 net/appletalk/aarp.c 		t = *n;
t                 358 net/appletalk/aarp.c 		__aarp_expire(t);
t                 156 net/appletalk/ddp.c static void atalk_destroy_timer(struct timer_list *t)
t                 158 net/appletalk/ddp.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                1230 net/atm/lec.c  static void lec_arp_expire_arp(struct timer_list *t);
t                1566 net/atm/lec.c  static void lec_arp_expire_arp(struct timer_list *t)
t                1570 net/atm/lec.c  	entry = from_timer(entry, t, timer);
t                1588 net/atm/lec.c  static void lec_arp_expire_vcc(struct timer_list *t)
t                1591 net/atm/lec.c  	struct lec_arp_table *to_remove = from_timer(to_remove, t, timer);
t                 268 net/ax25/af_ax25.c static void ax25_destroy_timer(struct timer_list *t)
t                 270 net/ax25/af_ax25.c 	ax25_cb *ax25 = from_timer(ax25, t, dtimer);
t                  65 net/ax25/ax25_ds_timer.c static void ax25_ds_timeout(struct timer_list *t)
t                  67 net/ax25/ax25_ds_timer.c 	ax25_dev *ax25_dev = from_timer(ax25_dev, t, dama.slave_timer);
t                  44 net/ax25/ax25_route.c 	ax25_route *s, *t, *ax25_rt;
t                  58 net/ax25/ax25_route.c 				for (t = ax25_route_list; t != NULL; t = t->next) {
t                  59 net/ax25/ax25_route.c 					if (t->next == s) {
t                  60 net/ax25/ax25_route.c 						t->next = s->next;
t                 147 net/ax25/ax25_route.c 	ax25_route *s, *t, *ax25_rt;
t                 165 net/ax25/ax25_route.c 				for (t = ax25_route_list; t != NULL; t = t->next) {
t                 166 net/ax25/ax25_route.c 					if (t->next == s) {
t                 167 net/ax25/ax25_route.c 						t->next = s->next;
t                 222 net/ax25/ax25_subr.c 	int n, t = 2;
t                 229 net/ax25/ax25_subr.c 		t += 2 * ax25->n2count;
t                 234 net/ax25/ax25_subr.c 			t *= 2;
t                 235 net/ax25/ax25_subr.c 		if (t > 8) t = 8;
t                 239 net/ax25/ax25_subr.c 	ax25->t1 = t * ax25->rtt;
t                 119 net/ax25/ax25_timer.c static void ax25_heartbeat_expiry(struct timer_list *t)
t                 122 net/ax25/ax25_timer.c 	ax25_cb *ax25 = from_timer(ax25, t, timer);
t                 144 net/ax25/ax25_timer.c static void ax25_t1timer_expiry(struct timer_list *t)
t                 146 net/ax25/ax25_timer.c 	ax25_cb *ax25 = from_timer(ax25, t, t1timer);
t                 163 net/ax25/ax25_timer.c static void ax25_t2timer_expiry(struct timer_list *t)
t                 165 net/ax25/ax25_timer.c 	ax25_cb *ax25 = from_timer(ax25, t, t2timer);
t                 182 net/ax25/ax25_timer.c static void ax25_t3timer_expiry(struct timer_list *t)
t                 184 net/ax25/ax25_timer.c 	ax25_cb *ax25 = from_timer(ax25, t, t3timer);
t                 203 net/ax25/ax25_timer.c static void ax25_idletimer_expiry(struct timer_list *t)
t                 205 net/ax25/ax25_timer.c 	ax25_cb *ax25 = from_timer(ax25, t, idletimer);
t                 482 net/batman-adv/tp_meter.c static void batadv_tp_sender_timeout(struct timer_list *t)
t                 484 net/batman-adv/tp_meter.c 	struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
t                1103 net/batman-adv/tp_meter.c static void batadv_tp_receiver_shutdown(struct timer_list *t)
t                1105 net/batman-adv/tp_meter.c 	struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
t                 406 net/bluetooth/hidp/core.c static void hidp_idle_timeout(struct timer_list *t)
t                 408 net/bluetooth/hidp/core.c 	struct hidp_session *session = from_timer(session, t, timer);
t                 236 net/bluetooth/rfcomm/core.c static void rfcomm_session_timeout(struct timer_list *t)
t                 238 net/bluetooth/rfcomm/core.c 	struct rfcomm_session *s = from_timer(s, t, timer);
t                 261 net/bluetooth/rfcomm/core.c static void rfcomm_dlc_timeout(struct timer_list *t)
t                 263 net/bluetooth/rfcomm/core.c 	struct rfcomm_dlc *d = from_timer(d, t, timer);
t                  76 net/bluetooth/sco.c static void sco_sock_timeout(struct timer_list *t)
t                  78 net/bluetooth/sco.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 250 net/bluetooth/smp.c 	u8 m[53], t[16];
t                 257 net/bluetooth/smp.c 	err = aes_cmac(tfm_cmac, salt, w, 32, t);
t                 261 net/bluetooth/smp.c 	SMP_DBG("t %16phN", t);
t                 272 net/bluetooth/smp.c 	err = aes_cmac(tfm_cmac, t, m, sizeof(m), mackey);
t                 280 net/bluetooth/smp.c 	err = aes_cmac(tfm_cmac, t, m, sizeof(m), ltk);
t                 142 net/bridge/br_multicast.c static void br_multicast_group_expired(struct timer_list *t)
t                 144 net/bridge/br_multicast.c 	struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
t                 200 net/bridge/br_multicast.c static void br_multicast_port_group_expired(struct timer_list *t)
t                 202 net/bridge/br_multicast.c 	struct net_bridge_port_group *pg = from_timer(pg, t, timer);
t                 626 net/bridge/br_multicast.c static void br_multicast_router_expired(struct timer_list *t)
t                 629 net/bridge/br_multicast.c 			from_timer(port, t, multicast_router_timer);
t                 656 net/bridge/br_multicast.c static void br_multicast_local_router_expired(struct timer_list *t)
t                 658 net/bridge/br_multicast.c 	struct net_bridge *br = from_timer(br, t, multicast_router_timer);
t                 684 net/bridge/br_multicast.c static void br_ip4_multicast_querier_expired(struct timer_list *t)
t                 686 net/bridge/br_multicast.c 	struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
t                 692 net/bridge/br_multicast.c static void br_ip6_multicast_querier_expired(struct timer_list *t)
t                 694 net/bridge/br_multicast.c 	struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
t                 796 net/bridge/br_multicast.c static void br_ip4_multicast_port_query_expired(struct timer_list *t)
t                 798 net/bridge/br_multicast.c 	struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
t                 804 net/bridge/br_multicast.c static void br_ip6_multicast_port_query_expired(struct timer_list *t)
t                 806 net/bridge/br_multicast.c 	struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
t                1768 net/bridge/br_multicast.c static void br_ip4_multicast_query_expired(struct timer_list *t)
t                1770 net/bridge/br_multicast.c 	struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
t                1776 net/bridge/br_multicast.c static void br_ip6_multicast_query_expired(struct timer_list *t)
t                1778 net/bridge/br_multicast.c 	struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
t                 266 net/bridge/br_netlink_tunnel.c 		int t, v;
t                 273 net/bridge/br_netlink_tunnel.c 		t = tinfo_last->tunid;
t                 275 net/bridge/br_netlink_tunnel.c 			err = br_vlan_tunnel_info(p, cmd, v, t, changed);
t                 278 net/bridge/br_netlink_tunnel.c 			t++;
t                1120 net/bridge/br_private.h void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
t                1124 net/bridge/br_private.h int __set_ageing_time(struct net_device *dev, unsigned long t);
t                  69 net/bridge/br_stp.c 	int t;
t                  84 net/bridge/br_stp.c 	t = memcmp(&p->designated_root, &rp->designated_root, 8);
t                  85 net/bridge/br_stp.c 	if (t < 0)
t                  87 net/bridge/br_stp.c 	else if (t > 0)
t                  97 net/bridge/br_stp.c 	t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8);
t                  98 net/bridge/br_stp.c 	if (t < 0)
t                 100 net/bridge/br_stp.c 	else if (t > 0)
t                 253 net/bridge/br_stp.c 	int t;
t                 267 net/bridge/br_stp.c 	t = memcmp(&br->bridge_id, &p->designated_bridge, 8);
t                 268 net/bridge/br_stp.c 	if (t < 0)
t                 270 net/bridge/br_stp.c 	else if (t > 0)
t                 296 net/bridge/br_stp.c 	int t;
t                 298 net/bridge/br_stp.c 	t = memcmp(&bpdu->root, &p->designated_root, 8);
t                 299 net/bridge/br_stp.c 	if (t < 0)
t                 301 net/bridge/br_stp.c 	else if (t > 0)
t                 309 net/bridge/br_stp.c 	t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8);
t                 310 net/bridge/br_stp.c 	if (t < 0)
t                 312 net/bridge/br_stp.c 	else if (t > 0)
t                 532 net/bridge/br_stp.c 	unsigned long t = clock_t_to_jiffies(val);
t                 534 net/bridge/br_stp.c 	if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
t                 538 net/bridge/br_stp.c 	br->bridge_hello_time = t;
t                 547 net/bridge/br_stp.c 	unsigned long t = clock_t_to_jiffies(val);
t                 549 net/bridge/br_stp.c 	if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
t                 553 net/bridge/br_stp.c 	br->bridge_max_age = t;
t                 562 net/bridge/br_stp.c int __set_ageing_time(struct net_device *dev, unsigned long t)
t                 568 net/bridge/br_stp.c 		.u.ageing_time = jiffies_to_clock_t(t),
t                 589 net/bridge/br_stp.c 	unsigned long t = clock_t_to_jiffies(ageing_time);
t                 592 net/bridge/br_stp.c 	err = __set_ageing_time(br->dev, t);
t                 597 net/bridge/br_stp.c 	br->bridge_ageing_time = t;
t                 598 net/bridge/br_stp.c 	br->ageing_time = t;
t                 609 net/bridge/br_stp.c 	unsigned long t;
t                 618 net/bridge/br_stp.c 			t = 2 * br->forward_delay;
t                 619 net/bridge/br_stp.c 			br_debug(br, "decreasing ageing time to %lu\n", t);
t                 621 net/bridge/br_stp.c 			t = br->bridge_ageing_time;
t                 622 net/bridge/br_stp.c 			br_debug(br, "restoring ageing time to %lu\n", t);
t                 625 net/bridge/br_stp.c 		err = __set_ageing_time(br->dev, t);
t                 629 net/bridge/br_stp.c 			br->ageing_time = t;
t                 635 net/bridge/br_stp.c void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
t                 637 net/bridge/br_stp.c 	br->bridge_forward_delay = t;
t                 644 net/bridge/br_stp.c 	unsigned long t = clock_t_to_jiffies(val);
t                 649 net/bridge/br_stp.c 	    (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
t                 652 net/bridge/br_stp.c 	__br_set_forward_delay(br, t);
t                  30 net/bridge/br_stp_timer.c static void br_hello_timer_expired(struct timer_list *t)
t                  32 net/bridge/br_stp_timer.c 	struct net_bridge *br = from_timer(br, t, hello_timer);
t                  46 net/bridge/br_stp_timer.c static void br_message_age_timer_expired(struct timer_list *t)
t                  48 net/bridge/br_stp_timer.c 	struct net_bridge_port *p = from_timer(p, t, message_age_timer);
t                  79 net/bridge/br_stp_timer.c static void br_forward_delay_timer_expired(struct timer_list *t)
t                  81 net/bridge/br_stp_timer.c 	struct net_bridge_port *p = from_timer(p, t, forward_delay_timer);
t                 103 net/bridge/br_stp_timer.c static void br_tcn_timer_expired(struct timer_list *t)
t                 105 net/bridge/br_stp_timer.c 	struct net_bridge *br = from_timer(br, t, tcn_timer);
t                 117 net/bridge/br_stp_timer.c static void br_topology_change_timer_expired(struct timer_list *t)
t                 119 net/bridge/br_stp_timer.c 	struct net_bridge *br = from_timer(br, t, topology_change_timer);
t                 128 net/bridge/br_stp_timer.c static void br_hold_timer_expired(struct timer_list *t)
t                 130 net/bridge/br_stp_timer.c 	struct net_bridge_port *p = from_timer(p, t, hold_timer);
t                 187 net/bridge/netfilter/ebtables.c 	const struct ebt_entry_target *t;
t                 231 net/bridge/netfilter/ebtables.c 		t = ebt_get_target_c(point);
t                 233 net/bridge/netfilter/ebtables.c 		if (!t->u.target->target)
t                 234 net/bridge/netfilter/ebtables.c 			verdict = ((struct ebt_standard_target *)t)->verdict;
t                 236 net/bridge/netfilter/ebtables.c 			acpar.target   = t->u.target;
t                 237 net/bridge/netfilter/ebtables.c 			acpar.targinfo = t->data;
t                 238 net/bridge/netfilter/ebtables.c 			verdict = t->u.target->target(skb, &acpar);
t                 620 net/bridge/netfilter/ebtables.c 	struct ebt_entry_target *t;
t                 629 net/bridge/netfilter/ebtables.c 	t = ebt_get_target(e);
t                 632 net/bridge/netfilter/ebtables.c 	par.target   = t->u.target;
t                 633 net/bridge/netfilter/ebtables.c 	par.targinfo = t->data;
t                 647 net/bridge/netfilter/ebtables.c 	struct ebt_entry_target *t;
t                 707 net/bridge/netfilter/ebtables.c 	t = ebt_get_target(e);
t                 710 net/bridge/netfilter/ebtables.c 	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
t                 723 net/bridge/netfilter/ebtables.c 	t->u.target = target;
t                 724 net/bridge/netfilter/ebtables.c 	if (t->u.target == &ebt_standard_target) {
t                 729 net/bridge/netfilter/ebtables.c 		if (((struct ebt_standard_target *)t)->verdict <
t                 734 net/bridge/netfilter/ebtables.c 	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
t                 735 net/bridge/netfilter/ebtables.c 		module_put(t->u.target->me);
t                 741 net/bridge/netfilter/ebtables.c 	tgpar.targinfo = t->data;
t                 742 net/bridge/netfilter/ebtables.c 	ret = xt_check_target(&tgpar, t->target_size,
t                 766 net/bridge/netfilter/ebtables.c 	const struct ebt_entry_target *t;
t                 785 net/bridge/netfilter/ebtables.c 		t = ebt_get_target_c(e);
t                 786 net/bridge/netfilter/ebtables.c 		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
t                 792 net/bridge/netfilter/ebtables.c 		verdict = ((struct ebt_standard_target *)t)->verdict;
t                 974 net/bridge/netfilter/ebtables.c 	struct ebt_table *t;
t                 996 net/bridge/netfilter/ebtables.c 	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
t                 997 net/bridge/netfilter/ebtables.c 	if (!t) {
t                1003 net/bridge/netfilter/ebtables.c 	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
t                1006 net/bridge/netfilter/ebtables.c 	if (repl->num_counters && repl->num_counters != t->private->nentries) {
t                1012 net/bridge/netfilter/ebtables.c 	table = t->private;
t                1014 net/bridge/netfilter/ebtables.c 	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
t                1018 net/bridge/netfilter/ebtables.c 		module_put(t->me);
t                1020 net/bridge/netfilter/ebtables.c 	write_lock_bh(&t->lock);
t                1022 net/bridge/netfilter/ebtables.c 		get_counters(t->private->counters, counterstmp,
t                1023 net/bridge/netfilter/ebtables.c 		   t->private->nentries);
t                1025 net/bridge/netfilter/ebtables.c 	t->private = newinfo;
t                1026 net/bridge/netfilter/ebtables.c 	write_unlock_bh(&t->lock);
t                1147 net/bridge/netfilter/ebtables.c 	struct ebt_table *t, *table;
t                1204 net/bridge/netfilter/ebtables.c 	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
t                1205 net/bridge/netfilter/ebtables.c 		if (strcmp(t->name, table->name) == 0) {
t                1255 net/bridge/netfilter/ebtables.c 	struct ebt_table *t;
t                1264 net/bridge/netfilter/ebtables.c 	t = find_table_lock(net, name, &ret, &ebt_mutex);
t                1265 net/bridge/netfilter/ebtables.c 	if (!t)
t                1268 net/bridge/netfilter/ebtables.c 	if (num_counters != t->private->nentries) {
t                1279 net/bridge/netfilter/ebtables.c 	write_lock_bh(&t->lock);
t                1283 net/bridge/netfilter/ebtables.c 		ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
t                1285 net/bridge/netfilter/ebtables.c 	write_unlock_bh(&t->lock);
t                1352 net/bridge/netfilter/ebtables.c 	const struct ebt_entry_target *t;
t                1366 net/bridge/netfilter/ebtables.c 	t = ebt_get_target_c(e);
t                1374 net/bridge/netfilter/ebtables.c 	ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
t                1375 net/bridge/netfilter/ebtables.c 			      t->u.target->usersize, t->target_size,
t                1376 net/bridge/netfilter/ebtables.c 			      t->u.target->revision);
t                1383 net/bridge/netfilter/ebtables.c static int copy_counters_to_user(struct ebt_table *t,
t                1402 net/bridge/netfilter/ebtables.c 	write_lock_bh(&t->lock);
t                1404 net/bridge/netfilter/ebtables.c 	write_unlock_bh(&t->lock);
t                1414 net/bridge/netfilter/ebtables.c static int copy_everything_to_user(struct ebt_table *t, void __user *user,
t                1424 net/bridge/netfilter/ebtables.c 		entries_size = t->private->entries_size;
t                1425 net/bridge/netfilter/ebtables.c 		nentries = t->private->nentries;
t                1426 net/bridge/netfilter/ebtables.c 		entries = t->private->entries;
t                1427 net/bridge/netfilter/ebtables.c 		oldcounters = t->private->counters;
t                1429 net/bridge/netfilter/ebtables.c 		entries_size = t->table->entries_size;
t                1430 net/bridge/netfilter/ebtables.c 		nentries = t->table->nentries;
t                1431 net/bridge/netfilter/ebtables.c 		entries = t->table->entries;
t                1432 net/bridge/netfilter/ebtables.c 		oldcounters = t->table->counters;
t                1448 net/bridge/netfilter/ebtables.c 	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
t                1484 net/bridge/netfilter/ebtables.c 	struct ebt_table *t;
t                1495 net/bridge/netfilter/ebtables.c 	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
t                1496 net/bridge/netfilter/ebtables.c 	if (!t)
t                1508 net/bridge/netfilter/ebtables.c 			tmp.nentries = t->private->nentries;
t                1509 net/bridge/netfilter/ebtables.c 			tmp.entries_size = t->private->entries_size;
t                1510 net/bridge/netfilter/ebtables.c 			tmp.valid_hooks = t->valid_hooks;
t                1512 net/bridge/netfilter/ebtables.c 			tmp.nentries = t->table->nentries;
t                1513 net/bridge/netfilter/ebtables.c 			tmp.entries_size = t->table->entries_size;
t                1514 net/bridge/netfilter/ebtables.c 			tmp.valid_hooks = t->table->valid_hooks;
t                1526 net/bridge/netfilter/ebtables.c 		ret = copy_everything_to_user(t, user, len, cmd);
t                1620 net/bridge/netfilter/ebtables.c static int compat_target_to_user(struct ebt_entry_target *t,
t                1624 net/bridge/netfilter/ebtables.c 	const struct xt_target *target = t->u.target;
t                1627 net/bridge/netfilter/ebtables.c 	compat_uint_t tsize = t->target_size - off;
t                1629 net/bridge/netfilter/ebtables.c 	if (WARN_ON(off >= t->target_size))
t                1638 net/bridge/netfilter/ebtables.c 		if (target->compat_to_user(cm->data, t->data))
t                1641 net/bridge/netfilter/ebtables.c 		if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
t                1663 net/bridge/netfilter/ebtables.c 	struct ebt_entry_target *t;
t                1700 net/bridge/netfilter/ebtables.c 	t = ebt_get_target(e);
t                1702 net/bridge/netfilter/ebtables.c 	ret = compat_target_to_user(t, dstptr, size);
t                1735 net/bridge/netfilter/ebtables.c 	const struct ebt_entry_target *t;
t                1748 net/bridge/netfilter/ebtables.c 	t = ebt_get_target_c(e);
t                1750 net/bridge/netfilter/ebtables.c 	off += xt_compat_target_offset(t->u.target);
t                1800 net/bridge/netfilter/ebtables.c static int compat_copy_everything_to_user(struct ebt_table *t,
t                1812 net/bridge/netfilter/ebtables.c 		tinfo.entries_size = t->private->entries_size;
t                1813 net/bridge/netfilter/ebtables.c 		tinfo.nentries = t->private->nentries;
t                1814 net/bridge/netfilter/ebtables.c 		tinfo.entries = t->private->entries;
t                1815 net/bridge/netfilter/ebtables.c 		oldcounters = t->private->counters;
t                1817 net/bridge/netfilter/ebtables.c 		tinfo.entries_size = t->table->entries_size;
t                1818 net/bridge/netfilter/ebtables.c 		tinfo.nentries = t->table->nentries;
t                1819 net/bridge/netfilter/ebtables.c 		tinfo.entries = t->table->entries;
t                1820 net/bridge/netfilter/ebtables.c 		oldcounters = t->table->counters;
t                1832 net/bridge/netfilter/ebtables.c 		ret = compat_table_info(t->private, &repl);
t                1846 net/bridge/netfilter/ebtables.c 	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
t                2348 net/bridge/netfilter/ebtables.c 	struct ebt_table *t;
t                2364 net/bridge/netfilter/ebtables.c 	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
t                2365 net/bridge/netfilter/ebtables.c 	if (!t)
t                2371 net/bridge/netfilter/ebtables.c 		tmp.nentries = t->private->nentries;
t                2372 net/bridge/netfilter/ebtables.c 		ret = compat_table_info(t->private, &tmp);
t                2375 net/bridge/netfilter/ebtables.c 		tmp.valid_hooks = t->valid_hooks;
t                2384 net/bridge/netfilter/ebtables.c 		tmp.nentries = t->table->nentries;
t                2385 net/bridge/netfilter/ebtables.c 		tmp.entries_size = t->table->entries_size;
t                2386 net/bridge/netfilter/ebtables.c 		tmp.valid_hooks = t->table->valid_hooks;
t                2404 net/bridge/netfilter/ebtables.c 		if (copy_everything_to_user(t, user, len, cmd) == 0)
t                2407 net/bridge/netfilter/ebtables.c 			ret = compat_copy_everything_to_user(t, user, len, cmd);
t                 101 net/can/af_can.h void can_stat_update(struct timer_list *t);
t                 120 net/can/proc.c void can_stat_update(struct timer_list *t)
t                 122 net/can/proc.c 	struct net *net = from_timer(net, t, can.stattimer);
t                 116 net/ceph/crush/mapper.c 				unsigned int t = work->perm[p + i];
t                 118 net/ceph/crush/mapper.c 				work->perm[p] = t;
t                 201 net/ceph/crush/mapper.c 	__u64 t;
t                 210 net/ceph/crush/mapper.c 		t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
t                 212 net/ceph/crush/mapper.c 		t = t >> 32;
t                 216 net/ceph/crush/mapper.c 		if (t < bucket->node_weights[l])
t                 181 net/ceph/debugfs.c static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
t                 185 net/ceph/debugfs.c 	seq_printf(s, "osd%d\t%llu.%x\t", t->osd, t->pgid.pool, t->pgid.seed);
t                 186 net/ceph/debugfs.c 	dump_spgid(s, &t->spgid);
t                 188 net/ceph/debugfs.c 	for (i = 0; i < t->up.size; i++)
t                 189 net/ceph/debugfs.c 		seq_printf(s, "%s%d", (!i ? "" : ","), t->up.osds[i]);
t                 190 net/ceph/debugfs.c 	seq_printf(s, "]/%d\t[", t->up.primary);
t                 191 net/ceph/debugfs.c 	for (i = 0; i < t->acting.size; i++)
t                 192 net/ceph/debugfs.c 		seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
t                 193 net/ceph/debugfs.c 	seq_printf(s, "]/%d\te%u\t", t->acting.primary, t->epoch);
t                 194 net/ceph/debugfs.c 	if (t->target_oloc.pool_ns) {
t                 196 net/ceph/debugfs.c 			(int)t->target_oloc.pool_ns->len,
t                 197 net/ceph/debugfs.c 			t->target_oloc.pool_ns->str,
t                 198 net/ceph/debugfs.c 			t->target_oid.name_len, t->target_oid.name, t->flags);
t                 200 net/ceph/debugfs.c 		seq_printf(s, "%*pE\t0x%x", t->target_oid.name_len,
t                 201 net/ceph/debugfs.c 			t->target_oid.name, t->flags);
t                 203 net/ceph/debugfs.c 	if (t->paused)
t                 248 net/ceph/debugfs.c 	dump_target(s, &lreq->t);
t                 416 net/ceph/osd_client.c static void target_init(struct ceph_osd_request_target *t)
t                 418 net/ceph/osd_client.c 	ceph_oid_init(&t->base_oid);
t                 419 net/ceph/osd_client.c 	ceph_oloc_init(&t->base_oloc);
t                 420 net/ceph/osd_client.c 	ceph_oid_init(&t->target_oid);
t                 421 net/ceph/osd_client.c 	ceph_oloc_init(&t->target_oloc);
t                 423 net/ceph/osd_client.c 	ceph_osds_init(&t->acting);
t                 424 net/ceph/osd_client.c 	ceph_osds_init(&t->up);
t                 425 net/ceph/osd_client.c 	t->size = -1;
t                 426 net/ceph/osd_client.c 	t->min_size = -1;
t                 428 net/ceph/osd_client.c 	t->osd = CEPH_HOMELESS_OSD;
t                 458 net/ceph/osd_client.c static void target_destroy(struct ceph_osd_request_target *t)
t                 460 net/ceph/osd_client.c 	ceph_oid_destroy(&t->base_oid);
t                 461 net/ceph/osd_client.c 	ceph_oloc_destroy(&t->base_oloc);
t                 462 net/ceph/osd_client.c 	ceph_oid_destroy(&t->target_oid);
t                 463 net/ceph/osd_client.c 	ceph_oloc_destroy(&t->target_oloc);
t                1486 net/ceph/osd_client.c 				    const struct ceph_osd_request_target *t,
t                1494 net/ceph/osd_client.c 	WARN_ON(pi->id != t->target_oloc.pool);
t                1495 net/ceph/osd_client.c 	return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
t                1496 net/ceph/osd_client.c 	       ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
t                1507 net/ceph/osd_client.c 					   struct ceph_osd_request_target *t,
t                1522 net/ceph/osd_client.c 	t->epoch = osdc->osdmap->epoch;
t                1523 net/ceph/osd_client.c 	pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
t                1525 net/ceph/osd_client.c 		t->osd = CEPH_HOMELESS_OSD;
t                1531 net/ceph/osd_client.c 		if (t->last_force_resend < pi->last_force_request_resend) {
t                1532 net/ceph/osd_client.c 			t->last_force_resend = pi->last_force_request_resend;
t                1534 net/ceph/osd_client.c 		} else if (t->last_force_resend == 0) {
t                1540 net/ceph/osd_client.c 	ceph_oid_copy(&t->target_oid, &t->base_oid);
t                1541 net/ceph/osd_client.c 	ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
t                1542 net/ceph/osd_client.c 	if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
t                1543 net/ceph/osd_client.c 		if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
t                1544 net/ceph/osd_client.c 			t->target_oloc.pool = pi->read_tier;
t                1545 net/ceph/osd_client.c 		if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
t                1546 net/ceph/osd_client.c 			t->target_oloc.pool = pi->write_tier;
t                1548 net/ceph/osd_client.c 		pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
t                1550 net/ceph/osd_client.c 			t->osd = CEPH_HOMELESS_OSD;
t                1556 net/ceph/osd_client.c 	__ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
t                1558 net/ceph/osd_client.c 	last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
t                1562 net/ceph/osd_client.c 	    ceph_is_new_interval(&t->acting,
t                1564 net/ceph/osd_client.c 				 &t->up,
t                1566 net/ceph/osd_client.c 				 t->size,
t                1568 net/ceph/osd_client.c 				 t->min_size,
t                1570 net/ceph/osd_client.c 				 t->pg_num,
t                1572 net/ceph/osd_client.c 				 t->sort_bitwise,
t                1574 net/ceph/osd_client.c 				 t->recovery_deletes,
t                1579 net/ceph/osd_client.c 	if (t->paused && !target_should_be_paused(osdc, t, pi)) {
t                1580 net/ceph/osd_client.c 		t->paused = false;
t                1583 net/ceph/osd_client.c 	legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
t                1584 net/ceph/osd_client.c 			ceph_osds_changed(&t->acting, &acting, any_change);
t                1585 net/ceph/osd_client.c 	if (t->pg_num)
t                1586 net/ceph/osd_client.c 		split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
t                1589 net/ceph/osd_client.c 		t->pgid = pgid; /* struct */
t                1590 net/ceph/osd_client.c 		ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
t                1591 net/ceph/osd_client.c 		ceph_osds_copy(&t->acting, &acting);
t                1592 net/ceph/osd_client.c 		ceph_osds_copy(&t->up, &up);
t                1593 net/ceph/osd_client.c 		t->size = pi->size;
t                1594 net/ceph/osd_client.c 		t->min_size = pi->min_size;
t                1595 net/ceph/osd_client.c 		t->pg_num = pi->pg_num;
t                1596 net/ceph/osd_client.c 		t->pg_num_mask = pi->pg_num_mask;
t                1597 net/ceph/osd_client.c 		t->sort_bitwise = sort_bitwise;
t                1598 net/ceph/osd_client.c 		t->recovery_deletes = recovery_deletes;
t                1600 net/ceph/osd_client.c 		t->osd = acting.primary;
t                1609 net/ceph/osd_client.c 	dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
t                1610 net/ceph/osd_client.c 	     legacy_change, force_resend, split, ct_res, t->osd);
t                1894 net/ceph/osd_client.c 				  const struct ceph_osd_request_target *t)
t                1898 net/ceph/osd_client.c 	hoid->oid = t->target_oid.name;
t                1899 net/ceph/osd_client.c 	hoid->oid_len = t->target_oid.name_len;
t                1901 net/ceph/osd_client.c 	hoid->hash = t->pgid.seed;
t                1903 net/ceph/osd_client.c 	if (t->target_oloc.pool_ns) {
t                1904 net/ceph/osd_client.c 		hoid->nspace = t->target_oloc.pool_ns->str;
t                1905 net/ceph/osd_client.c 		hoid->nspace_len = t->target_oloc.pool_ns->len;
t                1910 net/ceph/osd_client.c 	hoid->pool = t->target_oloc.pool;
t                2671 net/ceph/osd_client.c 	target_destroy(&lreq->t);
t                2708 net/ceph/osd_client.c 	target_init(&lreq->t);
t                3009 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
t                3010 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
t                3011 net/ceph/osd_client.c 	req->r_flags = lreq->t.flags;
t                3083 net/ceph/osd_client.c 	target_copy(&req->r_t, &lreq->t);
t                3114 net/ceph/osd_client.c 	calc_target(osdc, &lreq->t, false);
t                3115 net/ceph/osd_client.c 	osd = lookup_create_osd(osdc, lreq->t.osd, true);
t                3733 net/ceph/osd_client.c 	ct_res = calc_target(osdc, &lreq->t, true);
t                3737 net/ceph/osd_client.c 		osd = lookup_create_osd(osdc, lreq->t.osd, true);
t                3775 net/ceph/osd_client.c 			     pool_cleared_full(osdc, lreq->t.base_oloc.pool));
t                4263 net/ceph/osd_client.c static bool target_contained_by(const struct ceph_osd_request_target *t,
t                4270 net/ceph/osd_client.c 	hoid_fill_from_target(&hoid, t);
t                4585 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
t                4586 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
t                4637 net/ceph/osd_client.c 	ceph_oid_copy(&lreq->t.base_oid, oid);
t                4638 net/ceph/osd_client.c 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
t                4639 net/ceph/osd_client.c 	lreq->t.flags = CEPH_OSD_FLAG_WRITE;
t                4687 net/ceph/osd_client.c 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
t                4688 net/ceph/osd_client.c 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
t                4841 net/ceph/osd_client.c 	ceph_oid_copy(&lreq->t.base_oid, oid);
t                4842 net/ceph/osd_client.c 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
t                4843 net/ceph/osd_client.c 	lreq->t.flags = CEPH_OSD_FLAG_READ;
t                  33 net/ceph/osdmap.c static int calc_bits_of(unsigned int t)
t                  36 net/ceph/osdmap.c 	while (t) {
t                  37 net/ceph/osdmap.c 		t = t >> 1;
t                 205 net/core/drop_monitor.c static void sched_send_work(struct timer_list *t)
t                 207 net/core/drop_monitor.c 	struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
t                8215 net/core/filter.c 				      FIELD_SIZEOF(struct minmax_sample, t));
t                  76 net/core/gen_estimator.c static void est_timer(struct timer_list *t)
t                  78 net/core/gen_estimator.c 	struct net_rate_estimator *est = from_timer(est, t, timer);
t                  54 net/core/neighbour.c static void neigh_timer_handler(struct timer_list *t);
t                1016 net/core/neighbour.c static void neigh_timer_handler(struct timer_list *t)
t                1019 net/core/neighbour.c 	struct neighbour *neigh = from_timer(neigh, t, timer);
t                1531 net/core/neighbour.c static void neigh_proxy_process(struct timer_list *t)
t                1533 net/core/neighbour.c 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
t                2703 net/core/neighbour.c 	int t, family, s_t;
t                2722 net/core/neighbour.c 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
t                2723 net/core/neighbour.c 		tbl = neigh_tables[t];
t                2727 net/core/neighbour.c 		if (t < s_t || (family && tbl->family != family))
t                2729 net/core/neighbour.c 		if (t > s_t)
t                2740 net/core/neighbour.c 	cb->args[0] = t;
t                3623 net/core/neighbour.c 	struct neigh_sysctl_table *t;
t                3628 net/core/neighbour.c 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
t                3629 net/core/neighbour.c 	if (!t)
t                3633 net/core/neighbour.c 		t->neigh_vars[i].data += (long) p;
t                3634 net/core/neighbour.c 		t->neigh_vars[i].extra1 = dev;
t                3635 net/core/neighbour.c 		t->neigh_vars[i].extra2 = p;
t                3641 net/core/neighbour.c 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
t                3642 net/core/neighbour.c 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
t                3646 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
t                3647 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
t                3648 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
t                3649 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
t                3654 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
t                3656 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
t                3658 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
t                3660 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
t                3669 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
t                3672 net/core/neighbour.c 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
t                3678 net/core/neighbour.c 		t->neigh_vars[0].procname = NULL;
t                3693 net/core/neighbour.c 	t->sysctl_header =
t                3694 net/core/neighbour.c 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
t                3695 net/core/neighbour.c 	if (!t->sysctl_header)
t                3698 net/core/neighbour.c 	p->sysctl_table = t;
t                3702 net/core/neighbour.c 	kfree(t);
t                3711 net/core/neighbour.c 		struct neigh_sysctl_table *t = p->sysctl_table;
t                3713 net/core/neighbour.c 		unregister_net_sysctl_table(t->sysctl_header);
t                3714 net/core/neighbour.c 		kfree(t);
t                 189 net/core/net-procfs.c 	int t;
t                 197 net/core/net-procfs.c 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
t                 198 net/core/net-procfs.c 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
t                 232 net/core/pktgen.c #define   if_lock(t)           mutex_lock(&(t->if_lock));
t                 233 net/core/pktgen.c #define   if_unlock(t)           mutex_unlock(&(t->if_lock));
t                 463 net/core/pktgen.c static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
t                 464 net/core/pktgen.c static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
t                 465 net/core/pktgen.c static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
t                 472 net/core/pktgen.c static void pktgen_stop(struct pktgen_thread *t);
t                1720 net/core/pktgen.c 	struct pktgen_thread *t = seq->private;
t                1723 net/core/pktgen.c 	BUG_ON(!t);
t                1728 net/core/pktgen.c 	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
t                1734 net/core/pktgen.c 	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
t                1738 net/core/pktgen.c 	if (t->result[0])
t                1739 net/core/pktgen.c 		seq_printf(seq, "\nResult: %s\n", t->result);
t                1753 net/core/pktgen.c 	struct pktgen_thread *t = seq->private;
t                1791 net/core/pktgen.c 	if (!t) {
t                1797 net/core/pktgen.c 	pg_result = &(t->result[0]);
t                1811 net/core/pktgen.c 		ret = pktgen_add_device(t, f);
t                1823 net/core/pktgen.c 		t->control |= T_REMDEVALL;
t                1859 net/core/pktgen.c 	struct pktgen_thread *t;
t                1863 net/core/pktgen.c 	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
t                1864 net/core/pktgen.c 		pkt_dev = pktgen_find_dev(t, ifname, exact);
t                1868 net/core/pktgen.c 				t->control |= T_REMDEV;
t                1913 net/core/pktgen.c 	struct pktgen_thread *t;
t                1917 net/core/pktgen.c 	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
t                1920 net/core/pktgen.c 		if_lock(t);
t                1921 net/core/pktgen.c 		list_for_each_entry(pkt_dev, &t->if_list, list) {
t                1936 net/core/pktgen.c 		if_unlock(t);
t                2157 net/core/pktgen.c 	struct hrtimer_sleeper t;
t                2159 net/core/pktgen.c 	hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
t                2160 net/core/pktgen.c 	hrtimer_set_expires(&t.timer, spin_until);
t                2162 net/core/pktgen.c 	remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
t                2175 net/core/pktgen.c 			hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS);
t                2177 net/core/pktgen.c 			if (likely(t.task))
t                2180 net/core/pktgen.c 			hrtimer_cancel(&t.timer);
t                2181 net/core/pktgen.c 		} while (t.task && pkt_dev->running && !signal_pending(current));
t                2189 net/core/pktgen.c 	destroy_hrtimer_on_stack(&t.timer);
t                2273 net/core/pktgen.c 		__u16 t;
t                2275 net/core/pktgen.c 			t = prandom_u32() %
t                2280 net/core/pktgen.c 			t = pkt_dev->cur_queue_map + 1;
t                2281 net/core/pktgen.c 			if (t > pkt_dev->queue_map_max)
t                2282 net/core/pktgen.c 				t = pkt_dev->queue_map_min;
t                2284 net/core/pktgen.c 		pkt_dev->cur_queue_map = t;
t                2402 net/core/pktgen.c 			__u32 t;
t                2404 net/core/pktgen.c 				t = prandom_u32() % (imx - imn) + imn;
t                2406 net/core/pktgen.c 				t = ntohl(pkt_dev->cur_saddr);
t                2407 net/core/pktgen.c 				t++;
t                2408 net/core/pktgen.c 				if (t > imx)
t                2409 net/core/pktgen.c 					t = imn;
t                2412 net/core/pktgen.c 			pkt_dev->cur_saddr = htonl(t);
t                2421 net/core/pktgen.c 				__u32 t;
t                2426 net/core/pktgen.c 						t = prandom_u32() %
t                2428 net/core/pktgen.c 						s = htonl(t);
t                2436 net/core/pktgen.c 					t = ntohl(pkt_dev->cur_daddr);
t                2437 net/core/pktgen.c 					t++;
t                2438 net/core/pktgen.c 					if (t > imx) {
t                2439 net/core/pktgen.c 						t = imn;
t                2441 net/core/pktgen.c 					pkt_dev->cur_daddr = htonl(t);
t                2472 net/core/pktgen.c 		__u32 t;
t                2474 net/core/pktgen.c 			t = prandom_u32() %
t                2478 net/core/pktgen.c 			t = pkt_dev->cur_pkt_size + 1;
t                2479 net/core/pktgen.c 			if (t > pkt_dev->max_pkt_size)
t                2480 net/core/pktgen.c 				t = pkt_dev->min_pkt_size;
t                2482 net/core/pktgen.c 		pkt_dev->cur_pkt_size = t;
t                2997 net/core/pktgen.c static void pktgen_run(struct pktgen_thread *t)
t                3005 net/core/pktgen.c 	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
t                3027 net/core/pktgen.c 		t->control &= ~(T_STOP);
t                3032 net/core/pktgen.c 	struct pktgen_thread *t;
t                3038 net/core/pktgen.c 	list_for_each_entry(t, &pn->pktgen_threads, th_list)
t                3039 net/core/pktgen.c 		t->control |= T_STOP;
t                3044 net/core/pktgen.c static int thread_is_running(const struct pktgen_thread *t)
t                3049 net/core/pktgen.c 	list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
t                3058 net/core/pktgen.c static int pktgen_wait_thread_run(struct pktgen_thread *t)
t                3060 net/core/pktgen.c 	while (thread_is_running(t)) {
t                3080 net/core/pktgen.c 	struct pktgen_thread *t;
t                3089 net/core/pktgen.c 	list_for_each_entry(t, &pn->pktgen_threads, th_list) {
t                3090 net/core/pktgen.c 		sig = pktgen_wait_thread_run(t);
t                3096 net/core/pktgen.c 		list_for_each_entry(t, &pn->pktgen_threads, th_list)
t                3097 net/core/pktgen.c 			t->control |= (T_STOP);
t                3106 net/core/pktgen.c 	struct pktgen_thread *t;
t                3112 net/core/pktgen.c 	list_for_each_entry(t, &pn->pktgen_threads, th_list)
t                3113 net/core/pktgen.c 		t->control |= (T_RUN);
t                3125 net/core/pktgen.c 	struct pktgen_thread *t;
t                3131 net/core/pktgen.c 	list_for_each_entry(t, &pn->pktgen_threads, th_list)
t                3132 net/core/pktgen.c 		t->control |= (T_REMDEVALL);
t                3192 net/core/pktgen.c static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
t                3197 net/core/pktgen.c 	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
t                3210 net/core/pktgen.c static void pktgen_stop(struct pktgen_thread *t)
t                3218 net/core/pktgen.c 	list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
t                3229 net/core/pktgen.c static void pktgen_rem_one_if(struct pktgen_thread *t)
t                3236 net/core/pktgen.c 	list_for_each_safe(q, n, &t->if_list) {
t                3245 net/core/pktgen.c 		pktgen_remove_device(t, cur);
t                3251 net/core/pktgen.c static void pktgen_rem_all_ifs(struct pktgen_thread *t)
t                3260 net/core/pktgen.c 	list_for_each_safe(q, n, &t->if_list) {
t                3266 net/core/pktgen.c 		pktgen_remove_device(t, cur);
t                3270 net/core/pktgen.c static void pktgen_rem_thread(struct pktgen_thread *t)
t                3273 net/core/pktgen.c 	remove_proc_entry(t->tsk->comm, t->net->proc_dir);
t                3464 net/core/pktgen.c 	struct pktgen_thread *t = arg;
t                3466 net/core/pktgen.c 	int cpu = t->cpu;
t                3470 net/core/pktgen.c 	init_waitqueue_head(&t->queue);
t                3471 net/core/pktgen.c 	complete(&t->start_done);
t                3478 net/core/pktgen.c 		pkt_dev = next_to_run(t);
t                3480 net/core/pktgen.c 		if (unlikely(!pkt_dev && t->control == 0)) {
t                3481 net/core/pktgen.c 			if (t->net->pktgen_exiting)
t                3483 net/core/pktgen.c 			wait_event_interruptible_timeout(t->queue,
t                3484 net/core/pktgen.c 							 t->control != 0,
t                3499 net/core/pktgen.c 		if (t->control & T_STOP) {
t                3500 net/core/pktgen.c 			pktgen_stop(t);
t                3501 net/core/pktgen.c 			t->control &= ~(T_STOP);
t                3504 net/core/pktgen.c 		if (t->control & T_RUN) {
t                3505 net/core/pktgen.c 			pktgen_run(t);
t                3506 net/core/pktgen.c 			t->control &= ~(T_RUN);
t                3509 net/core/pktgen.c 		if (t->control & T_REMDEVALL) {
t                3510 net/core/pktgen.c 			pktgen_rem_all_ifs(t);
t                3511 net/core/pktgen.c 			t->control &= ~(T_REMDEVALL);
t                3514 net/core/pktgen.c 		if (t->control & T_REMDEV) {
t                3515 net/core/pktgen.c 			pktgen_rem_one_if(t);
t                3516 net/core/pktgen.c 			t->control &= ~(T_REMDEV);
t                3522 net/core/pktgen.c 	pr_debug("%s stopping all device\n", t->tsk->comm);
t                3523 net/core/pktgen.c 	pktgen_stop(t);
t                3525 net/core/pktgen.c 	pr_debug("%s removing all device\n", t->tsk->comm);
t                3526 net/core/pktgen.c 	pktgen_rem_all_ifs(t);
t                3528 net/core/pktgen.c 	pr_debug("%s removing thread\n", t->tsk->comm);
t                3529 net/core/pktgen.c 	pktgen_rem_thread(t);
t                3534 net/core/pktgen.c static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
t                3541 net/core/pktgen.c 	list_for_each_entry_rcu(p, &t->if_list, list)
t                3560 net/core/pktgen.c static int add_dev_to_thread(struct pktgen_thread *t,
t                3571 net/core/pktgen.c 	if_lock(t);
t                3580 net/core/pktgen.c 	pkt_dev->pg_thread = t;
t                3581 net/core/pktgen.c 	list_add_rcu(&pkt_dev->list, &t->if_list);
t                3584 net/core/pktgen.c 	if_unlock(t);
t                3590 net/core/pktgen.c static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
t                3594 net/core/pktgen.c 	int node = cpu_to_node(t->cpu);
t                3598 net/core/pktgen.c 	pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND);
t                3635 net/core/pktgen.c 	err = pktgen_setup_dev(t->net, pkt_dev, ifname);
t                3641 net/core/pktgen.c 	pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
t                3665 net/core/pktgen.c 	return add_dev_to_thread(t, pkt_dev);
t                3679 net/core/pktgen.c 	struct pktgen_thread *t;
t                3683 net/core/pktgen.c 	t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
t                3685 net/core/pktgen.c 	if (!t) {
t                3690 net/core/pktgen.c 	mutex_init(&t->if_lock);
t                3691 net/core/pktgen.c 	t->cpu = cpu;
t                3693 net/core/pktgen.c 	INIT_LIST_HEAD(&t->if_list);
t                3695 net/core/pktgen.c 	list_add_tail(&t->th_list, &pn->pktgen_threads);
t                3696 net/core/pktgen.c 	init_completion(&t->start_done);
t                3699 net/core/pktgen.c 				   t,
t                3703 net/core/pktgen.c 		pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
t                3704 net/core/pktgen.c 		list_del(&t->th_list);
t                3705 net/core/pktgen.c 		kfree(t);
t                3709 net/core/pktgen.c 	t->tsk = p;
t                3711 net/core/pktgen.c 	pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
t                3712 net/core/pktgen.c 			      &pktgen_thread_fops, t);
t                3715 net/core/pktgen.c 		       PG_PROC_DIR, t->tsk->comm);
t                3717 net/core/pktgen.c 		list_del(&t->th_list);
t                3718 net/core/pktgen.c 		kfree(t);
t                3722 net/core/pktgen.c 	t->net = pn;
t                3725 net/core/pktgen.c 	wait_for_completion(&t->start_done);
t                3733 net/core/pktgen.c static void _rem_dev_from_if_list(struct pktgen_thread *t,
t                3739 net/core/pktgen.c 	if_lock(t);
t                3740 net/core/pktgen.c 	list_for_each_safe(q, n, &t->if_list) {
t                3745 net/core/pktgen.c 	if_unlock(t);
t                3748 net/core/pktgen.c static int pktgen_remove_device(struct pktgen_thread *t,
t                3771 net/core/pktgen.c 	_rem_dev_from_if_list(t, pkt_dev);
t                3831 net/core/pktgen.c 	struct pktgen_thread *t;
t                3843 net/core/pktgen.c 		t = list_entry(q, struct pktgen_thread, th_list);
t                3844 net/core/pktgen.c 		list_del(&t->th_list);
t                3845 net/core/pktgen.c 		kthread_stop(t->tsk);
t                3846 net/core/pktgen.c 		put_task_struct(t->tsk);
t                3847 net/core/pktgen.c 		kfree(t);
t                 119 net/dccp/ccids/ccid2.c 	struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
t                 121 net/dccp/ccids/ccid2.c 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
t                 123 net/dccp/ccids/ccid2.c 		__tasklet_schedule(t);
t                 127 net/dccp/ccids/ccid2.c static void ccid2_hc_tx_rto_expire(struct timer_list *t)
t                 129 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
t                 185 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_no_feedback_timer(struct timer_list *t)
t                 187 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = from_timer(hc, t, tx_no_feedback_timer);
t                 124 net/dccp/timer.c static void dccp_write_timer(struct timer_list *t)
t                 127 net/dccp/timer.c 			from_timer(icsk, t, icsk_retransmit_timer);
t                 161 net/dccp/timer.c static void dccp_keepalive_timer(struct timer_list *t)
t                 163 net/dccp/timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 170 net/dccp/timer.c static void dccp_delack_timer(struct timer_list *t)
t                 173 net/dccp/timer.c 			from_timer(icsk, t, icsk_delack_timer);
t                 234 net/dccp/timer.c static void dccp_write_xmit_timer(struct timer_list *t)
t                 236 net/dccp/timer.c 	struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
t                 211 net/decnet/dn_dev.c 	struct dn_dev_sysctl_table *t;
t                 216 net/decnet/dn_dev.c 	t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
t                 217 net/decnet/dn_dev.c 	if (t == NULL)
t                 220 net/decnet/dn_dev.c 	for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
t                 221 net/decnet/dn_dev.c 		long offset = (long)t->dn_dev_vars[i].data;
t                 222 net/decnet/dn_dev.c 		t->dn_dev_vars[i].data = ((char *)parms) + offset;
t                 228 net/decnet/dn_dev.c 	t->dn_dev_vars[0].extra1 = (void *)dev;
t                 230 net/decnet/dn_dev.c 	t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
t                 231 net/decnet/dn_dev.c 	if (t->sysctl_header == NULL)
t                 232 net/decnet/dn_dev.c 		kfree(t);
t                 234 net/decnet/dn_dev.c 		parms->sysctl = t;
t                 240 net/decnet/dn_dev.c 		struct dn_dev_sysctl_table *t = parms->sysctl;
t                 242 net/decnet/dn_dev.c 		unregister_net_sysctl_table(t->sysctl_header);
t                 243 net/decnet/dn_dev.c 		kfree(t);
t                1042 net/decnet/dn_dev.c static void dn_dev_timer_func(struct timer_list *t)
t                1044 net/decnet/dn_dev.c 	struct dn_dev *dn_db = from_timer(dn_db, t, timer);
t                 503 net/decnet/dn_neigh.c 	int t, n;
t                 518 net/decnet/dn_neigh.c 	if (s->t == s->n)
t                 521 net/decnet/dn_neigh.c 		s->t++;
t                 537 net/decnet/dn_neigh.c 	state.t = 0;
t                 544 net/decnet/dn_neigh.c 	return state.t;
t                 138 net/decnet/dn_nsp_out.c 	unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
t                 140 net/decnet/dn_nsp_out.c 	t *= nsp_backoff[scp->nsp_rxtshift];
t                 142 net/decnet/dn_nsp_out.c 	if (t < HZ) t = HZ;
t                 143 net/decnet/dn_nsp_out.c 	if (t > (600*HZ)) t = (600*HZ);
t                 150 net/decnet/dn_nsp_out.c 	return t;
t                 345 net/decnet/dn_nsp_out.c 	unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
t                 354 net/decnet/dn_nsp_out.c 	if ((jiffies - scp->stamp) > t)
t                 794 net/decnet/dn_table.c 	struct dn_hash *t = (struct dn_hash *)tb->data;
t                 797 net/decnet/dn_table.c 	for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
t                 838 net/decnet/dn_table.c 	struct dn_fib_table *t;
t                 849 net/decnet/dn_table.c 	hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
t                 850 net/decnet/dn_table.c 		if (t->n == n) {
t                 852 net/decnet/dn_table.c 			return t;
t                 865 net/decnet/dn_table.c 	t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
t                 867 net/decnet/dn_table.c 	if (t == NULL)
t                 870 net/decnet/dn_table.c 	t->n = n;
t                 871 net/decnet/dn_table.c 	t->insert = dn_fib_table_insert;
t                 872 net/decnet/dn_table.c 	t->delete = dn_fib_table_delete;
t                 873 net/decnet/dn_table.c 	t->lookup = dn_fib_table_lookup;
t                 874 net/decnet/dn_table.c 	t->flush  = dn_fib_table_flush;
t                 875 net/decnet/dn_table.c 	t->dump = dn_fib_table_dump;
t                 876 net/decnet/dn_table.c 	hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
t                 878 net/decnet/dn_table.c 	return t;
t                 916 net/decnet/dn_table.c 	struct dn_fib_table *t;
t                 922 net/decnet/dn_table.c 		hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
t                 924 net/decnet/dn_table.c 			hlist_del(&t->hlist);
t                 925 net/decnet/dn_table.c 			kfree(t);
t                  37 net/decnet/dn_timer.c static void dn_slow_timer(struct timer_list *t);
t                  50 net/decnet/dn_timer.c static void dn_slow_timer(struct timer_list *t)
t                  52 net/decnet/dn_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 323 net/hsr/hsr_device.c static void hsr_announce(struct timer_list *t)
t                 329 net/hsr/hsr_device.c 	hsr = from_timer(hsr, t, announce_timer);
t                 385 net/hsr/hsr_framereg.c void hsr_prune_nodes(struct timer_list *t)
t                 387 net/hsr/hsr_framereg.c 	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
t                  32 net/hsr/hsr_framereg.h void hsr_prune_nodes(struct timer_list *t);
t                  44 net/ieee802154/6lowpan/reassembly.c static void lowpan_frag_expire(struct timer_list *t)
t                  46 net/ieee802154/6lowpan/reassembly.c 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
t                2560 net/ipv4/devinet.c 	struct devinet_sysctl_table *t;
t                2563 net/ipv4/devinet.c 	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
t                2564 net/ipv4/devinet.c 	if (!t)
t                2567 net/ipv4/devinet.c 	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
t                2568 net/ipv4/devinet.c 		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
t                2569 net/ipv4/devinet.c 		t->devinet_vars[i].extra1 = p;
t                2570 net/ipv4/devinet.c 		t->devinet_vars[i].extra2 = net;
t                2575 net/ipv4/devinet.c 	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
t                2576 net/ipv4/devinet.c 	if (!t->sysctl_header)
t                2579 net/ipv4/devinet.c 	p->sysctl = t;
t                2586 net/ipv4/devinet.c 	kfree(t);
t                2594 net/ipv4/devinet.c 	struct devinet_sysctl_table *t = cnf->sysctl;
t                2596 net/ipv4/devinet.c 	if (t) {
t                2598 net/ipv4/devinet.c 		unregister_net_sysctl_table(t->sysctl_header);
t                2599 net/ipv4/devinet.c 		kfree(t);
t                 173 net/ipv4/fib_trie.c static struct key_vector *resize(struct trie *t, struct key_vector *tn);
t                 509 net/ipv4/fib_trie.c static struct key_vector *replace(struct trie *t,
t                 532 net/ipv4/fib_trie.c 			tn = resize(t, inode);
t                 538 net/ipv4/fib_trie.c static struct key_vector *inflate(struct trie *t,
t                 626 net/ipv4/fib_trie.c 	return replace(t, oldtnode, tn);
t                 634 net/ipv4/fib_trie.c static struct key_vector *halve(struct trie *t,
t                 681 net/ipv4/fib_trie.c 	return replace(t, oldtnode, tn);
t                 689 net/ipv4/fib_trie.c static struct key_vector *collapse(struct trie *t,
t                 849 net/ipv4/fib_trie.c static struct key_vector *resize(struct trie *t, struct key_vector *tn)
t                 852 net/ipv4/fib_trie.c 	struct trie_use_stats __percpu *stats = t->stats;
t                 871 net/ipv4/fib_trie.c 		tp = inflate(t, tn);
t                 894 net/ipv4/fib_trie.c 		tp = halve(t, tn);
t                 908 net/ipv4/fib_trie.c 		return collapse(t, tn);
t                 937 net/ipv4/fib_trie.c static struct key_vector *fib_find_node(struct trie *t,
t                 940 net/ipv4/fib_trie.c 	struct key_vector *pn, *n = t->kv;
t                1008 net/ipv4/fib_trie.c static void trie_rebalance(struct trie *t, struct key_vector *tn)
t                1011 net/ipv4/fib_trie.c 		tn = resize(t, tn);
t                1014 net/ipv4/fib_trie.c static int fib_insert_node(struct trie *t, struct key_vector *tp,
t                1055 net/ipv4/fib_trie.c 	trie_rebalance(t, tp);
t                1067 net/ipv4/fib_trie.c static int fib_insert_alias(struct trie *t, struct key_vector *tp,
t                1072 net/ipv4/fib_trie.c 		return fib_insert_node(t, tp, new, key);
t                1124 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                1148 net/ipv4/fib_trie.c 	l = fib_find_node(t, &tp, key);
t                1275 net/ipv4/fib_trie.c 	err = fib_insert_alias(t, tp, l, new_fa, fa, key);
t                1315 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *) tb->tb_data;
t                1317 net/ipv4/fib_trie.c 	struct trie_use_stats __percpu *stats = t->stats;
t                1325 net/ipv4/fib_trie.c 	pn = t->kv;
t                1515 net/ipv4/fib_trie.c static void fib_remove_alias(struct trie *t, struct key_vector *tp,
t                1533 net/ipv4/fib_trie.c 		trie_rebalance(t, tp);
t                1550 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *) tb->tb_data;
t                1563 net/ipv4/fib_trie.c 	l = fib_find_node(t, &tp, key);
t                1571 net/ipv4/fib_trie.c 	pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
t                1607 net/ipv4/fib_trie.c 	fib_remove_alias(t, tp, l, fa_to_delete);
t                1677 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                1678 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
t                1728 net/ipv4/fib_trie.c 	free_percpu(t->stats);
t                1794 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                1795 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
t                1817 net/ipv4/fib_trie.c 			pn = resize(t, pn);
t                1863 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                1864 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
t                1887 net/ipv4/fib_trie.c 			pn = resize(t, pn);
t                1951 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                1952 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
t                2043 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                2044 net/ipv4/fib_trie.c 	struct key_vector *l, *tp = t->kv;
t                2074 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                2077 net/ipv4/fib_trie.c 		free_percpu(t->stats);
t                2170 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
t                2171 net/ipv4/fib_trie.c 	struct key_vector *l, *tp = t->kv;
t                2225 net/ipv4/fib_trie.c 	struct trie *t;
t                2242 net/ipv4/fib_trie.c 	t = (struct trie *) tb->tb_data;
t                2243 net/ipv4/fib_trie.c 	t->kv[0].pos = KEYLENGTH;
t                2244 net/ipv4/fib_trie.c 	t->kv[0].slen = KEYLENGTH;
t                2246 net/ipv4/fib_trie.c 	t->stats = alloc_percpu(struct trie_use_stats);
t                2247 net/ipv4/fib_trie.c 	if (!t->stats) {
t                2310 net/ipv4/fib_trie.c 					     struct trie *t)
t                2314 net/ipv4/fib_trie.c 	if (!t)
t                2317 net/ipv4/fib_trie.c 	pn = t->kv;
t                2335 net/ipv4/fib_trie.c static void trie_collect_stats(struct trie *t, struct trie_stat *s)
t                2343 net/ipv4/fib_trie.c 	for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
t                2464 net/ipv4/fib_trie.c 			struct trie *t = (struct trie *) tb->tb_data;
t                2467 net/ipv4/fib_trie.c 			if (!t)
t                2472 net/ipv4/fib_trie.c 			trie_collect_stats(t, &stat);
t                2475 net/ipv4/fib_trie.c 			trie_show_usage(seq, t->stats);
t                2600 net/ipv4/fib_trie.c static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
t                2602 net/ipv4/fib_trie.c 	if (t < __RTN_MAX && rtn_type_names[t])
t                2603 net/ipv4/fib_trie.c 		return rtn_type_names[t];
t                2604 net/ipv4/fib_trie.c 	snprintf(buf, len, "type %u", t);
t                2705 net/ipv4/fib_trie.c 	struct trie *t;
t                2714 net/ipv4/fib_trie.c 	t = (struct trie *)tb->tb_data;
t                2715 net/ipv4/fib_trie.c 	iter->tnode = t->kv;
t                 796 net/ipv4/igmp.c static void igmp_gq_timer_expire(struct timer_list *t)
t                 798 net/ipv4/igmp.c 	struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
t                 805 net/ipv4/igmp.c static void igmp_ifc_timer_expire(struct timer_list *t)
t                 807 net/ipv4/igmp.c 	struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
t                 828 net/ipv4/igmp.c static void igmp_timer_expire(struct timer_list *t)
t                 830 net/ipv4/igmp.c 	struct ip_mc_list *im = from_timer(im, t, timer);
t                 529 net/ipv4/inet_connection_sock.c 			       void (*retransmit_handler)(struct timer_list *t),
t                 530 net/ipv4/inet_connection_sock.c 			       void (*delack_handler)(struct timer_list *t),
t                 531 net/ipv4/inet_connection_sock.c 			       void (*keepalive_handler)(struct timer_list *t))
t                 710 net/ipv4/inet_connection_sock.c static void reqsk_timer_handler(struct timer_list *t)
t                 712 net/ipv4/inet_connection_sock.c 	struct request_sock *req = from_timer(req, t, rsk_timer);
t                 144 net/ipv4/inet_timewait_sock.c static void tw_timer_handler(struct timer_list *t)
t                 146 net/ipv4/inet_timewait_sock.c 	struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
t                 133 net/ipv4/ip_fragment.c static void ip_expire(struct timer_list *t)
t                 135 net/ipv4/ip_fragment.c 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
t                 142 net/ipv4/ip_gre.c 	struct ip_tunnel *t;
t                 153 net/ipv4/ip_gre.c 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
t                 156 net/ipv4/ip_gre.c 	if (!t)
t                 196 net/ipv4/ip_gre.c 	if (t->parms.iph.daddr == 0 ||
t                 197 net/ipv4/ip_gre.c 	    ipv4_is_multicast(t->parms.iph.daddr))
t                 200 net/ipv4/ip_gre.c 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
t                 203 net/ipv4/ip_gre.c 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t                 204 net/ipv4/ip_gre.c 		t->err_count++;
t                 206 net/ipv4/ip_gre.c 		t->err_count = 1;
t                 207 net/ipv4/ip_gre.c 	t->err_time = jiffies;
t                 770 net/ipv4/ip_gre.c 		struct ip_tunnel *t = netdev_priv(dev);
t                 772 net/ipv4/ip_gre.c 		t->parms.i_flags = p.i_flags;
t                 773 net/ipv4/ip_gre.c 		t->parms.o_flags = p.o_flags;
t                 819 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 823 net/ipv4/ip_gre.c 	iph = skb_push(skb, t->hlen + sizeof(*iph));
t                 825 net/ipv4/ip_gre.c 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
t                 828 net/ipv4/ip_gre.c 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
t                 836 net/ipv4/ip_gre.c 		return t->hlen + sizeof(*iph);
t                 838 net/ipv4/ip_gre.c 	return -(t->hlen + sizeof(*iph));
t                 856 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 858 net/ipv4/ip_gre.c 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
t                 862 net/ipv4/ip_gre.c 		rt = ip_route_output_gre(t->net, &fl4,
t                 863 net/ipv4/ip_gre.c 					 t->parms.iph.daddr,
t                 864 net/ipv4/ip_gre.c 					 t->parms.iph.saddr,
t                 865 net/ipv4/ip_gre.c 					 t->parms.o_key,
t                 866 net/ipv4/ip_gre.c 					 RT_TOS(t->parms.iph.tos),
t                 867 net/ipv4/ip_gre.c 					 t->parms.link);
t                 874 net/ipv4/ip_gre.c 		t->mlink = dev->ifindex;
t                 875 net/ipv4/ip_gre.c 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
t                 882 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 884 net/ipv4/ip_gre.c 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
t                 886 net/ipv4/ip_gre.c 		in_dev = inetdev_by_index(t->net, t->mlink);
t                 888 net/ipv4/ip_gre.c 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
t                1094 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1131 net/ipv4/ip_gre.c 		if (t->ignore_df)
t                1137 net/ipv4/ip_gre.c 		t->collect_md = true;
t                1146 net/ipv4/ip_gre.c 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
t                1161 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1171 net/ipv4/ip_gre.c 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
t                1173 net/ipv4/ip_gre.c 		if (t->erspan_ver != 1 && t->erspan_ver != 2)
t                1177 net/ipv4/ip_gre.c 	if (t->erspan_ver == 1) {
t                1179 net/ipv4/ip_gre.c 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
t                1180 net/ipv4/ip_gre.c 			if (t->index & ~INDEX_MASK)
t                1183 net/ipv4/ip_gre.c 	} else if (t->erspan_ver == 2) {
t                1185 net/ipv4/ip_gre.c 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
t                1186 net/ipv4/ip_gre.c 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
t                1190 net/ipv4/ip_gre.c 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
t                1191 net/ipv4/ip_gre.c 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
t                1299 net/ipv4/ip_gre.c 		struct ip_tunnel *t = netdev_priv(dev);
t                1300 net/ipv4/ip_gre.c 		int err = ip_tunnel_encap_setup(t, &ipencap);
t                1349 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1350 net/ipv4/ip_gre.c 	__u32 fwmark = t->fwmark;
t                1366 net/ipv4/ip_gre.c 	t->parms.i_flags = p.i_flags;
t                1367 net/ipv4/ip_gre.c 	t->parms.o_flags = p.o_flags;
t                1378 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1379 net/ipv4/ip_gre.c 	__u32 fwmark = t->fwmark;
t                1395 net/ipv4/ip_gre.c 	t->parms.i_flags = p.i_flags;
t                1396 net/ipv4/ip_gre.c 	t->parms.o_flags = p.o_flags;
t                1451 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1452 net/ipv4/ip_gre.c 	struct ip_tunnel_parm *p = &t->parms;
t                1455 net/ipv4/ip_gre.c 	if (t->erspan_ver == 1 || t->erspan_ver == 2) {
t                1456 net/ipv4/ip_gre.c 		if (!t->collect_md)
t                1459 net/ipv4/ip_gre.c 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
t                1462 net/ipv4/ip_gre.c 		if (t->erspan_ver == 1) {
t                1463 net/ipv4/ip_gre.c 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
t                1466 net/ipv4/ip_gre.c 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
t                1468 net/ipv4/ip_gre.c 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
t                1486 net/ipv4/ip_gre.c 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
t                1490 net/ipv4/ip_gre.c 			t->encap.type) ||
t                1492 net/ipv4/ip_gre.c 			 t->encap.sport) ||
t                1494 net/ipv4/ip_gre.c 			 t->encap.dport) ||
t                1496 net/ipv4/ip_gre.c 			t->encap.flags))
t                1499 net/ipv4/ip_gre.c 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
t                1502 net/ipv4/ip_gre.c 	if (t->collect_md) {
t                1515 net/ipv4/ip_gre.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1523 net/ipv4/ip_gre.c 	t->erspan_ver = 1;
t                1601 net/ipv4/ip_gre.c 	struct ip_tunnel *t;
t                1612 net/ipv4/ip_gre.c 	t = netdev_priv(dev);
t                1613 net/ipv4/ip_gre.c 	t->collect_md = true;
t                  89 net/ipv4/ip_tunnel.c 	struct ip_tunnel *t, *cand = NULL;
t                  95 net/ipv4/ip_tunnel.c 	hlist_for_each_entry_rcu(t, head, hash_node) {
t                  96 net/ipv4/ip_tunnel.c 		if (local != t->parms.iph.saddr ||
t                  97 net/ipv4/ip_tunnel.c 		    remote != t->parms.iph.daddr ||
t                  98 net/ipv4/ip_tunnel.c 		    !(t->dev->flags & IFF_UP))
t                 101 net/ipv4/ip_tunnel.c 		if (!ip_tunnel_key_match(&t->parms, flags, key))
t                 104 net/ipv4/ip_tunnel.c 		if (t->parms.link == link)
t                 105 net/ipv4/ip_tunnel.c 			return t;
t                 107 net/ipv4/ip_tunnel.c 			cand = t;
t                 110 net/ipv4/ip_tunnel.c 	hlist_for_each_entry_rcu(t, head, hash_node) {
t                 111 net/ipv4/ip_tunnel.c 		if (remote != t->parms.iph.daddr ||
t                 112 net/ipv4/ip_tunnel.c 		    t->parms.iph.saddr != 0 ||
t                 113 net/ipv4/ip_tunnel.c 		    !(t->dev->flags & IFF_UP))
t                 116 net/ipv4/ip_tunnel.c 		if (!ip_tunnel_key_match(&t->parms, flags, key))
t                 119 net/ipv4/ip_tunnel.c 		if (t->parms.link == link)
t                 120 net/ipv4/ip_tunnel.c 			return t;
t                 122 net/ipv4/ip_tunnel.c 			cand = t;
t                 128 net/ipv4/ip_tunnel.c 	hlist_for_each_entry_rcu(t, head, hash_node) {
t                 129 net/ipv4/ip_tunnel.c 		if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
t                 130 net/ipv4/ip_tunnel.c 		    (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
t                 133 net/ipv4/ip_tunnel.c 		if (!(t->dev->flags & IFF_UP))
t                 136 net/ipv4/ip_tunnel.c 		if (!ip_tunnel_key_match(&t->parms, flags, key))
t                 139 net/ipv4/ip_tunnel.c 		if (t->parms.link == link)
t                 140 net/ipv4/ip_tunnel.c 			return t;
t                 142 net/ipv4/ip_tunnel.c 			cand = t;
t                 145 net/ipv4/ip_tunnel.c 	hlist_for_each_entry_rcu(t, head, hash_node) {
t                 146 net/ipv4/ip_tunnel.c 		if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
t                 147 net/ipv4/ip_tunnel.c 		    t->parms.iph.saddr != 0 ||
t                 148 net/ipv4/ip_tunnel.c 		    t->parms.iph.daddr != 0 ||
t                 149 net/ipv4/ip_tunnel.c 		    !(t->dev->flags & IFF_UP))
t                 152 net/ipv4/ip_tunnel.c 		if (t->parms.link == link)
t                 153 net/ipv4/ip_tunnel.c 			return t;
t                 155 net/ipv4/ip_tunnel.c 			cand = t;
t                 161 net/ipv4/ip_tunnel.c 	t = rcu_dereference(itn->collect_md_tun);
t                 162 net/ipv4/ip_tunnel.c 	if (t && t->dev->flags & IFF_UP)
t                 163 net/ipv4/ip_tunnel.c 		return t;
t                 191 net/ipv4/ip_tunnel.c static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
t                 193 net/ipv4/ip_tunnel.c 	struct hlist_head *head = ip_bucket(itn, &t->parms);
t                 195 net/ipv4/ip_tunnel.c 	if (t->collect_md)
t                 196 net/ipv4/ip_tunnel.c 		rcu_assign_pointer(itn->collect_md_tun, t);
t                 197 net/ipv4/ip_tunnel.c 	hlist_add_head_rcu(&t->hash_node, head);
t                 200 net/ipv4/ip_tunnel.c static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
t                 202 net/ipv4/ip_tunnel.c 	if (t->collect_md)
t                 204 net/ipv4/ip_tunnel.c 	hlist_del_init_rcu(&t->hash_node);
t                 216 net/ipv4/ip_tunnel.c 	struct ip_tunnel *t = NULL;
t                 219 net/ipv4/ip_tunnel.c 	hlist_for_each_entry_rcu(t, head, hash_node) {
t                 220 net/ipv4/ip_tunnel.c 		if (local == t->parms.iph.saddr &&
t                 221 net/ipv4/ip_tunnel.c 		    remote == t->parms.iph.daddr &&
t                 222 net/ipv4/ip_tunnel.c 		    link == t->parms.link &&
t                 223 net/ipv4/ip_tunnel.c 		    type == t->dev->type &&
t                 224 net/ipv4/ip_tunnel.c 		    ip_tunnel_key_match(&t->parms, flags, key))
t                 227 net/ipv4/ip_tunnel.c 	return t;
t                 462 net/ipv4/ip_tunnel.c int ip_tunnel_encap_setup(struct ip_tunnel *t,
t                 467 net/ipv4/ip_tunnel.c 	memset(&t->encap, 0, sizeof(t->encap));
t                 473 net/ipv4/ip_tunnel.c 	t->encap.type = ipencap->type;
t                 474 net/ipv4/ip_tunnel.c 	t->encap.sport = ipencap->sport;
t                 475 net/ipv4/ip_tunnel.c 	t->encap.dport = ipencap->dport;
t                 476 net/ipv4/ip_tunnel.c 	t->encap.flags = ipencap->flags;
t                 478 net/ipv4/ip_tunnel.c 	t->encap_hlen = hlen;
t                 479 net/ipv4/ip_tunnel.c 	t->hlen = t->encap_hlen + t->tun_hlen;
t                 829 net/ipv4/ip_tunnel.c 			     struct ip_tunnel *t,
t                 835 net/ipv4/ip_tunnel.c 	ip_tunnel_del(itn, t);
t                 836 net/ipv4/ip_tunnel.c 	t->parms.iph.saddr = p->iph.saddr;
t                 837 net/ipv4/ip_tunnel.c 	t->parms.iph.daddr = p->iph.daddr;
t                 838 net/ipv4/ip_tunnel.c 	t->parms.i_key = p->i_key;
t                 839 net/ipv4/ip_tunnel.c 	t->parms.o_key = p->o_key;
t                 844 net/ipv4/ip_tunnel.c 	ip_tunnel_add(itn, t);
t                 846 net/ipv4/ip_tunnel.c 	t->parms.iph.ttl = p->iph.ttl;
t                 847 net/ipv4/ip_tunnel.c 	t->parms.iph.tos = p->iph.tos;
t                 848 net/ipv4/ip_tunnel.c 	t->parms.iph.frag_off = p->iph.frag_off;
t                 850 net/ipv4/ip_tunnel.c 	if (t->parms.link != p->link || t->fwmark != fwmark) {
t                 853 net/ipv4/ip_tunnel.c 		t->parms.link = p->link;
t                 854 net/ipv4/ip_tunnel.c 		t->fwmark = fwmark;
t                 859 net/ipv4/ip_tunnel.c 	dst_cache_reset(&t->dst_cache);
t                 866 net/ipv4/ip_tunnel.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 867 net/ipv4/ip_tunnel.c 	struct net *net = t->net;
t                 868 net/ipv4/ip_tunnel.c 	struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
t                 873 net/ipv4/ip_tunnel.c 			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
t                 874 net/ipv4/ip_tunnel.c 			if (!t)
t                 875 net/ipv4/ip_tunnel.c 				t = netdev_priv(dev);
t                 877 net/ipv4/ip_tunnel.c 		memcpy(p, &t->parms, sizeof(*p));
t                 894 net/ipv4/ip_tunnel.c 		t = ip_tunnel_find(itn, p, itn->type);
t                 897 net/ipv4/ip_tunnel.c 			if (!t) {
t                 898 net/ipv4/ip_tunnel.c 				t = ip_tunnel_create(net, itn, p);
t                 899 net/ipv4/ip_tunnel.c 				err = PTR_ERR_OR_ZERO(t);
t                 907 net/ipv4/ip_tunnel.c 			if (t) {
t                 908 net/ipv4/ip_tunnel.c 				if (t->dev != dev) {
t                 925 net/ipv4/ip_tunnel.c 				t = netdev_priv(dev);
t                 929 net/ipv4/ip_tunnel.c 		if (t) {
t                 931 net/ipv4/ip_tunnel.c 			ip_tunnel_update(itn, t, dev, p, true, 0);
t                 944 net/ipv4/ip_tunnel.c 			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
t                 945 net/ipv4/ip_tunnel.c 			if (!t)
t                 948 net/ipv4/ip_tunnel.c 			if (t == netdev_priv(itn->fb_tunnel_dev))
t                 950 net/ipv4/ip_tunnel.c 			dev = t->dev;
t                1084 net/ipv4/ip_tunnel.c 		struct ip_tunnel *t;
t                1088 net/ipv4/ip_tunnel.c 		hlist_for_each_entry_safe(t, n, thead, hash_node)
t                1092 net/ipv4/ip_tunnel.c 			if (!net_eq(dev_net(t->dev), net))
t                1093 net/ipv4/ip_tunnel.c 				unregister_netdevice_queue(t->dev, head);
t                1169 net/ipv4/ip_tunnel.c 	struct ip_tunnel *t;
t                1177 net/ipv4/ip_tunnel.c 	t = ip_tunnel_find(itn, p, dev->type);
t                1179 net/ipv4/ip_tunnel.c 	if (t) {
t                1180 net/ipv4/ip_tunnel.c 		if (t->dev != dev)
t                1183 net/ipv4/ip_tunnel.c 		t = tunnel;
t                1199 net/ipv4/ip_tunnel.c 	ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
t                 589 net/ipv4/ip_vti.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 590 net/ipv4/ip_vti.c 	__u32 fwmark = t->fwmark;
t                 617 net/ipv4/ip_vti.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 618 net/ipv4/ip_vti.c 	struct ip_tunnel_parm *p = &t->parms;
t                 625 net/ipv4/ip_vti.c 	    nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
t                  59 net/ipv4/ipcomp.c 	struct xfrm_state *t;
t                  61 net/ipv4/ipcomp.c 	t = xfrm_state_alloc(net);
t                  62 net/ipv4/ipcomp.c 	if (!t)
t                  65 net/ipv4/ipcomp.c 	t->id.proto = IPPROTO_IPIP;
t                  66 net/ipv4/ipcomp.c 	t->id.spi = x->props.saddr.a4;
t                  67 net/ipv4/ipcomp.c 	t->id.daddr.a4 = x->id.daddr.a4;
t                  68 net/ipv4/ipcomp.c 	memcpy(&t->sel, &x->sel, sizeof(t->sel));
t                  69 net/ipv4/ipcomp.c 	t->props.family = AF_INET;
t                  70 net/ipv4/ipcomp.c 	t->props.mode = x->props.mode;
t                  71 net/ipv4/ipcomp.c 	t->props.saddr.a4 = x->props.saddr.a4;
t                  72 net/ipv4/ipcomp.c 	t->props.flags = x->props.flags;
t                  73 net/ipv4/ipcomp.c 	t->props.extra_flags = x->props.extra_flags;
t                  74 net/ipv4/ipcomp.c 	memcpy(&t->mark, &x->mark, sizeof(t->mark));
t                  76 net/ipv4/ipcomp.c 	if (xfrm_init_state(t))
t                  79 net/ipv4/ipcomp.c 	atomic_set(&t->tunnel_users, 1);
t                  81 net/ipv4/ipcomp.c 	return t;
t                  84 net/ipv4/ipcomp.c 	t->km.state = XFRM_STATE_DEAD;
t                  85 net/ipv4/ipcomp.c 	xfrm_state_put(t);
t                  86 net/ipv4/ipcomp.c 	t = NULL;
t                  98 net/ipv4/ipcomp.c 	struct xfrm_state *t;
t                 101 net/ipv4/ipcomp.c 	t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4,
t                 103 net/ipv4/ipcomp.c 	if (!t) {
t                 104 net/ipv4/ipcomp.c 		t = ipcomp_tunnel_create(x);
t                 105 net/ipv4/ipcomp.c 		if (!t) {
t                 109 net/ipv4/ipcomp.c 		xfrm_state_insert(t);
t                 110 net/ipv4/ipcomp.c 		xfrm_state_hold(t);
t                 112 net/ipv4/ipcomp.c 	x->tunnel = t;
t                 113 net/ipv4/ipcomp.c 	atomic_inc(&t->tunnel_users);
t                 135 net/ipv4/ipip.c 	struct ip_tunnel *t;
t                 138 net/ipv4/ipip.c 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
t                 140 net/ipv4/ipip.c 	if (!t) {
t                 173 net/ipv4/ipip.c 		ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
t                 178 net/ipv4/ipip.c 		ipv4_redirect(skb, net, t->parms.link, iph->protocol);
t                 182 net/ipv4/ipip.c 	if (t->parms.iph.daddr == 0) {
t                 187 net/ipv4/ipip.c 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
t                 190 net/ipv4/ipip.c 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t                 191 net/ipv4/ipip.c 		t->err_count++;
t                 193 net/ipv4/ipip.c 		t->err_count = 1;
t                 194 net/ipv4/ipip.c 	t->err_time = jiffies;
t                 498 net/ipv4/ipip.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 504 net/ipv4/ipip.c 		int err = ip_tunnel_encap_setup(t, &ipencap);
t                 510 net/ipv4/ipip.c 	ipip_netlink_parms(data, &p, &t->collect_md, &fwmark);
t                 518 net/ipv4/ipip.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 522 net/ipv4/ipip.c 	__u32 fwmark = t->fwmark;
t                 525 net/ipv4/ipip.c 		int err = ip_tunnel_encap_setup(t, &ipencap);
t                 109 net/ipv4/ipmr.c static void ipmr_expire_process(struct timer_list *t);
t                 770 net/ipv4/ipmr.c static void ipmr_expire_process(struct timer_list *t)
t                 772 net/ipv4/ipmr.c 	struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
t                2835 net/ipv4/ipmr.c 	unsigned int t = 0, s_t;
t                2854 net/ipv4/ipmr.c 		if (t < s_t)
t                2901 net/ipv4/ipmr.c 		t++;
t                2906 net/ipv4/ipmr.c 	cb->args[0] = t;
t                  34 net/ipv4/ipmr_base.c 	       void (*expire_func)(struct timer_list *t),
t                 353 net/ipv4/ipmr_base.c 	unsigned int t = 0, s_t = cb->args[0];
t                 369 net/ipv4/ipmr_base.c 		if (t < s_t)
t                 377 net/ipv4/ipmr_base.c 		t++;
t                 381 net/ipv4/ipmr_base.c 	cb->args[0] = t;
t                 221 net/ipv4/netfilter/arp_tables.c 		const struct xt_entry_target *t;
t                 232 net/ipv4/netfilter/arp_tables.c 		t = arpt_get_target_c(e);
t                 235 net/ipv4/netfilter/arp_tables.c 		if (!t->u.kernel.target->target) {
t                 238 net/ipv4/netfilter/arp_tables.c 			v = ((struct xt_standard_target *)t)->verdict;
t                 267 net/ipv4/netfilter/arp_tables.c 		acpar.target   = t->u.kernel.target;
t                 268 net/ipv4/netfilter/arp_tables.c 		acpar.targinfo = t->data;
t                 269 net/ipv4/netfilter/arp_tables.c 		verdict = t->u.kernel.target->target(skb, &acpar);
t                 321 net/ipv4/netfilter/arp_tables.c 			const struct xt_standard_target *t
t                 333 net/ipv4/netfilter/arp_tables.c 			     (strcmp(t->target.u.user.name,
t                 335 net/ipv4/netfilter/arp_tables.c 			     t->verdict < 0) || visited) {
t                 362 net/ipv4/netfilter/arp_tables.c 				int newpos = t->verdict;
t                 364 net/ipv4/netfilter/arp_tables.c 				if (strcmp(t->target.u.user.name,
t                 389 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t = arpt_get_target(e);
t                 394 net/ipv4/netfilter/arp_tables.c 		.target    = t->u.kernel.target,
t                 395 net/ipv4/netfilter/arp_tables.c 		.targinfo  = t->data,
t                 400 net/ipv4/netfilter/arp_tables.c 	return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
t                 408 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t;
t                 415 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target(e);
t                 416 net/ipv4/netfilter/arp_tables.c 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t                 417 net/ipv4/netfilter/arp_tables.c 					t->u.user.revision);
t                 422 net/ipv4/netfilter/arp_tables.c 	t->u.kernel.target = target;
t                 429 net/ipv4/netfilter/arp_tables.c 	module_put(t->u.kernel.target->me);
t                 438 net/ipv4/netfilter/arp_tables.c 	const struct xt_entry_target *t;
t                 443 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target_c(e);
t                 444 net/ipv4/netfilter/arp_tables.c 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
t                 446 net/ipv4/netfilter/arp_tables.c 	verdict = ((struct xt_standard_target *)t)->verdict;
t                 502 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t;
t                 504 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target(e);
t                 506 net/ipv4/netfilter/arp_tables.c 	par.target   = t->u.kernel.target;
t                 507 net/ipv4/netfilter/arp_tables.c 	par.targinfo = t->data;
t                 599 net/ipv4/netfilter/arp_tables.c static void get_counters(const struct xt_table_info *t,
t                 610 net/ipv4/netfilter/arp_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
t                 629 net/ipv4/netfilter/arp_tables.c static void get_old_counters(const struct xt_table_info *t,
t                 637 net/ipv4/netfilter/arp_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
t                 689 net/ipv4/netfilter/arp_tables.c 		const struct xt_entry_target *t;
t                 704 net/ipv4/netfilter/arp_tables.c 		t = arpt_get_target_c(e);
t                 705 net/ipv4/netfilter/arp_tables.c 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
t                 739 net/ipv4/netfilter/arp_tables.c 	const struct xt_entry_target *t;
t                 746 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target_c(e);
t                 747 net/ipv4/netfilter/arp_tables.c 	off += xt_compat_target_offset(t->u.kernel.target);
t                 794 net/ipv4/netfilter/arp_tables.c 	struct xt_table *t;
t                 808 net/ipv4/netfilter/arp_tables.c 	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
t                 809 net/ipv4/netfilter/arp_tables.c 	if (!IS_ERR(t)) {
t                 811 net/ipv4/netfilter/arp_tables.c 		const struct xt_table_info *private = t->private;
t                 822 net/ipv4/netfilter/arp_tables.c 		info.valid_hooks = t->valid_hooks;
t                 835 net/ipv4/netfilter/arp_tables.c 		xt_table_unlock(t);
t                 836 net/ipv4/netfilter/arp_tables.c 		module_put(t->me);
t                 838 net/ipv4/netfilter/arp_tables.c 		ret = PTR_ERR(t);
t                 851 net/ipv4/netfilter/arp_tables.c 	struct xt_table *t;
t                 862 net/ipv4/netfilter/arp_tables.c 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
t                 863 net/ipv4/netfilter/arp_tables.c 	if (!IS_ERR(t)) {
t                 864 net/ipv4/netfilter/arp_tables.c 		const struct xt_table_info *private = t->private;
t                 868 net/ipv4/netfilter/arp_tables.c 						   t, uptr->entrytable);
t                 872 net/ipv4/netfilter/arp_tables.c 		module_put(t->me);
t                 873 net/ipv4/netfilter/arp_tables.c 		xt_table_unlock(t);
t                 875 net/ipv4/netfilter/arp_tables.c 		ret = PTR_ERR(t);
t                 887 net/ipv4/netfilter/arp_tables.c 	struct xt_table *t;
t                 900 net/ipv4/netfilter/arp_tables.c 	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
t                 901 net/ipv4/netfilter/arp_tables.c 	if (IS_ERR(t)) {
t                 902 net/ipv4/netfilter/arp_tables.c 		ret = PTR_ERR(t);
t                 907 net/ipv4/netfilter/arp_tables.c 	if (valid_hooks != t->valid_hooks) {
t                 912 net/ipv4/netfilter/arp_tables.c 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
t                 919 net/ipv4/netfilter/arp_tables.c 		module_put(t->me);
t                 922 net/ipv4/netfilter/arp_tables.c 		module_put(t->me);
t                 924 net/ipv4/netfilter/arp_tables.c 	xt_table_unlock(t);
t                 943 net/ipv4/netfilter/arp_tables.c 	module_put(t->me);
t                 944 net/ipv4/netfilter/arp_tables.c 	xt_table_unlock(t);
t                1006 net/ipv4/netfilter/arp_tables.c 	struct xt_table *t;
t                1016 net/ipv4/netfilter/arp_tables.c 	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
t                1017 net/ipv4/netfilter/arp_tables.c 	if (IS_ERR(t)) {
t                1018 net/ipv4/netfilter/arp_tables.c 		ret = PTR_ERR(t);
t                1023 net/ipv4/netfilter/arp_tables.c 	private = t->private;
t                1042 net/ipv4/netfilter/arp_tables.c 	xt_table_unlock(t);
t                1043 net/ipv4/netfilter/arp_tables.c 	module_put(t->me);
t                1065 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t;
t                1067 net/ipv4/netfilter/arp_tables.c 	t = compat_arpt_get_target(e);
t                1068 net/ipv4/netfilter/arp_tables.c 	module_put(t->u.kernel.target->me);
t                1078 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t;
t                1103 net/ipv4/netfilter/arp_tables.c 	t = compat_arpt_get_target(e);
t                1104 net/ipv4/netfilter/arp_tables.c 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t                1105 net/ipv4/netfilter/arp_tables.c 					t->u.user.revision);
t                1110 net/ipv4/netfilter/arp_tables.c 	t->u.kernel.target = target;
t                1121 net/ipv4/netfilter/arp_tables.c 	module_put(t->u.kernel.target->me);
t                1131 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t;
t                1145 net/ipv4/netfilter/arp_tables.c 	t = compat_arpt_get_target(e);
t                1146 net/ipv4/netfilter/arp_tables.c 	xt_compat_target_from_user(t, dstptr, size);
t                1326 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t;
t                1344 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target(e);
t                1345 net/ipv4/netfilter/arp_tables.c 	ret = xt_compat_target_to_user(t, dstptr, size);
t                1395 net/ipv4/netfilter/arp_tables.c 	struct xt_table *t;
t                1407 net/ipv4/netfilter/arp_tables.c 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
t                1408 net/ipv4/netfilter/arp_tables.c 	if (!IS_ERR(t)) {
t                1409 net/ipv4/netfilter/arp_tables.c 		const struct xt_table_info *private = t->private;
t                1415 net/ipv4/netfilter/arp_tables.c 							  t, uptr->entrytable);
t                1420 net/ipv4/netfilter/arp_tables.c 		module_put(t->me);
t                1421 net/ipv4/netfilter/arp_tables.c 		xt_table_unlock(t);
t                1423 net/ipv4/netfilter/arp_tables.c 		ret = PTR_ERR(t);
t                 162 net/ipv4/netfilter/ip_tables.c 	const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
t                 164 net/ipv4/netfilter/ip_tables.c 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
t                 166 net/ipv4/netfilter/ip_tables.c 		*chainname = t->target.data;
t                 172 net/ipv4/netfilter/ip_tables.c 		    strcmp(t->target.u.kernel.target->name,
t                 174 net/ipv4/netfilter/ip_tables.c 		   t->verdict < 0) {
t                 279 net/ipv4/netfilter/ip_tables.c 		const struct xt_entry_target *t;
t                 301 net/ipv4/netfilter/ip_tables.c 		t = ipt_get_target_c(e);
t                 302 net/ipv4/netfilter/ip_tables.c 		WARN_ON(!t->u.kernel.target);
t                 311 net/ipv4/netfilter/ip_tables.c 		if (!t->u.kernel.target->target) {
t                 314 net/ipv4/netfilter/ip_tables.c 			v = ((struct xt_standard_target *)t)->verdict;
t                 343 net/ipv4/netfilter/ip_tables.c 		acpar.target   = t->u.kernel.target;
t                 344 net/ipv4/netfilter/ip_tables.c 		acpar.targinfo = t->data;
t                 346 net/ipv4/netfilter/ip_tables.c 		verdict = t->u.kernel.target->target(skb, &acpar);
t                 387 net/ipv4/netfilter/ip_tables.c 			const struct xt_standard_target *t
t                 398 net/ipv4/netfilter/ip_tables.c 			     (strcmp(t->target.u.user.name,
t                 400 net/ipv4/netfilter/ip_tables.c 			     t->verdict < 0) || visited) {
t                 426 net/ipv4/netfilter/ip_tables.c 				int newpos = t->verdict;
t                 428 net/ipv4/netfilter/ip_tables.c 				if (strcmp(t->target.u.user.name,
t                 500 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t = ipt_get_target(e);
t                 505 net/ipv4/netfilter/ip_tables.c 		.target    = t->u.kernel.target,
t                 506 net/ipv4/netfilter/ip_tables.c 		.targinfo  = t->data,
t                 511 net/ipv4/netfilter/ip_tables.c 	return xt_check_target(&par, t->u.target_size - sizeof(*t),
t                 520 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t;
t                 544 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target(e);
t                 545 net/ipv4/netfilter/ip_tables.c 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t                 546 net/ipv4/netfilter/ip_tables.c 					t->u.user.revision);
t                 551 net/ipv4/netfilter/ip_tables.c 	t->u.kernel.target = target;
t                 559 net/ipv4/netfilter/ip_tables.c 	module_put(t->u.kernel.target->me);
t                 574 net/ipv4/netfilter/ip_tables.c 	const struct xt_entry_target *t;
t                 579 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target_c(e);
t                 580 net/ipv4/netfilter/ip_tables.c 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
t                 582 net/ipv4/netfilter/ip_tables.c 	verdict = ((struct xt_standard_target *)t)->verdict;
t                 640 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t;
t                 646 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target(e);
t                 649 net/ipv4/netfilter/ip_tables.c 	par.target   = t->u.kernel.target;
t                 650 net/ipv4/netfilter/ip_tables.c 	par.targinfo = t->data;
t                 740 net/ipv4/netfilter/ip_tables.c get_counters(const struct xt_table_info *t,
t                 751 net/ipv4/netfilter/ip_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
t                 770 net/ipv4/netfilter/ip_tables.c static void get_old_counters(const struct xt_table_info *t,
t                 778 net/ipv4/netfilter/ip_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
t                 833 net/ipv4/netfilter/ip_tables.c 		const struct xt_entry_target *t;
t                 859 net/ipv4/netfilter/ip_tables.c 		t = ipt_get_target_c(e);
t                 860 net/ipv4/netfilter/ip_tables.c 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
t                 895 net/ipv4/netfilter/ip_tables.c 	const struct xt_entry_target *t;
t                 903 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target_c(e);
t                 904 net/ipv4/netfilter/ip_tables.c 	off += xt_compat_target_offset(t->u.kernel.target);
t                 951 net/ipv4/netfilter/ip_tables.c 	struct xt_table *t;
t                 965 net/ipv4/netfilter/ip_tables.c 	t = xt_request_find_table_lock(net, AF_INET, name);
t                 966 net/ipv4/netfilter/ip_tables.c 	if (!IS_ERR(t)) {
t                 968 net/ipv4/netfilter/ip_tables.c 		const struct xt_table_info *private = t->private;
t                 979 net/ipv4/netfilter/ip_tables.c 		info.valid_hooks = t->valid_hooks;
t                 993 net/ipv4/netfilter/ip_tables.c 		xt_table_unlock(t);
t                 994 net/ipv4/netfilter/ip_tables.c 		module_put(t->me);
t                 996 net/ipv4/netfilter/ip_tables.c 		ret = PTR_ERR(t);
t                1010 net/ipv4/netfilter/ip_tables.c 	struct xt_table *t;
t                1020 net/ipv4/netfilter/ip_tables.c 	t = xt_find_table_lock(net, AF_INET, get.name);
t                1021 net/ipv4/netfilter/ip_tables.c 	if (!IS_ERR(t)) {
t                1022 net/ipv4/netfilter/ip_tables.c 		const struct xt_table_info *private = t->private;
t                1025 net/ipv4/netfilter/ip_tables.c 						   t, uptr->entrytable);
t                1029 net/ipv4/netfilter/ip_tables.c 		module_put(t->me);
t                1030 net/ipv4/netfilter/ip_tables.c 		xt_table_unlock(t);
t                1032 net/ipv4/netfilter/ip_tables.c 		ret = PTR_ERR(t);
t                1043 net/ipv4/netfilter/ip_tables.c 	struct xt_table *t;
t                1055 net/ipv4/netfilter/ip_tables.c 	t = xt_request_find_table_lock(net, AF_INET, name);
t                1056 net/ipv4/netfilter/ip_tables.c 	if (IS_ERR(t)) {
t                1057 net/ipv4/netfilter/ip_tables.c 		ret = PTR_ERR(t);
t                1062 net/ipv4/netfilter/ip_tables.c 	if (valid_hooks != t->valid_hooks) {
t                1067 net/ipv4/netfilter/ip_tables.c 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
t                1074 net/ipv4/netfilter/ip_tables.c 		module_put(t->me);
t                1077 net/ipv4/netfilter/ip_tables.c 		module_put(t->me);
t                1079 net/ipv4/netfilter/ip_tables.c 	xt_table_unlock(t);
t                1097 net/ipv4/netfilter/ip_tables.c 	module_put(t->me);
t                1098 net/ipv4/netfilter/ip_tables.c 	xt_table_unlock(t);
t                1161 net/ipv4/netfilter/ip_tables.c 	struct xt_table *t;
t                1171 net/ipv4/netfilter/ip_tables.c 	t = xt_find_table_lock(net, AF_INET, tmp.name);
t                1172 net/ipv4/netfilter/ip_tables.c 	if (IS_ERR(t)) {
t                1173 net/ipv4/netfilter/ip_tables.c 		ret = PTR_ERR(t);
t                1178 net/ipv4/netfilter/ip_tables.c 	private = t->private;
t                1196 net/ipv4/netfilter/ip_tables.c 	xt_table_unlock(t);
t                1197 net/ipv4/netfilter/ip_tables.c 	module_put(t->me);
t                1222 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t;
t                1245 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target(e);
t                1246 net/ipv4/netfilter/ip_tables.c 	ret = xt_compat_target_to_user(t, dstptr, size);
t                1275 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t;
t                1281 net/ipv4/netfilter/ip_tables.c 	t = compat_ipt_get_target(e);
t                1282 net/ipv4/netfilter/ip_tables.c 	module_put(t->u.kernel.target->me);
t                1293 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t;
t                1326 net/ipv4/netfilter/ip_tables.c 	t = compat_ipt_get_target(e);
t                1327 net/ipv4/netfilter/ip_tables.c 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t                1328 net/ipv4/netfilter/ip_tables.c 					t->u.user.revision);
t                1333 net/ipv4/netfilter/ip_tables.c 	t->u.kernel.target = target;
t                1344 net/ipv4/netfilter/ip_tables.c 	module_put(t->u.kernel.target->me);
t                1359 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t;
t                1377 net/ipv4/netfilter/ip_tables.c 	t = compat_ipt_get_target(e);
t                1378 net/ipv4/netfilter/ip_tables.c 	xt_compat_target_from_user(t, dstptr, size);
t                1603 net/ipv4/netfilter/ip_tables.c 	struct xt_table *t;
t                1617 net/ipv4/netfilter/ip_tables.c 	t = xt_find_table_lock(net, AF_INET, get.name);
t                1618 net/ipv4/netfilter/ip_tables.c 	if (!IS_ERR(t)) {
t                1619 net/ipv4/netfilter/ip_tables.c 		const struct xt_table_info *private = t->private;
t                1624 net/ipv4/netfilter/ip_tables.c 							  t, uptr->entrytable);
t                1629 net/ipv4/netfilter/ip_tables.c 		module_put(t->me);
t                1630 net/ipv4/netfilter/ip_tables.c 		xt_table_unlock(t);
t                1632 net/ipv4/netfilter/ip_tables.c 		ret = PTR_ERR(t);
t                  49 net/ipv4/netfilter/nf_nat_pptp.c 	struct nf_conntrack_tuple t = {};
t                  66 net/ipv4/netfilter/nf_nat_pptp.c 		t.src.l3num = AF_INET;
t                  67 net/ipv4/netfilter/nf_nat_pptp.c 		t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t                  68 net/ipv4/netfilter/nf_nat_pptp.c 		t.src.u.gre.key = ct_pptp_info->pac_call_id;
t                  69 net/ipv4/netfilter/nf_nat_pptp.c 		t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t                  70 net/ipv4/netfilter/nf_nat_pptp.c 		t.dst.u.gre.key = ct_pptp_info->pns_call_id;
t                  71 net/ipv4/netfilter/nf_nat_pptp.c 		t.dst.protonum = IPPROTO_GRE;
t                  75 net/ipv4/netfilter/nf_nat_pptp.c 		t.src.l3num = AF_INET;
t                  76 net/ipv4/netfilter/nf_nat_pptp.c 		t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
t                  77 net/ipv4/netfilter/nf_nat_pptp.c 		t.src.u.gre.key = nat_pptp_info->pns_call_id;
t                  78 net/ipv4/netfilter/nf_nat_pptp.c 		t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
t                  79 net/ipv4/netfilter/nf_nat_pptp.c 		t.dst.u.gre.key = nat_pptp_info->pac_call_id;
t                  80 net/ipv4/netfilter/nf_nat_pptp.c 		t.dst.protonum = IPPROTO_GRE;
t                  84 net/ipv4/netfilter/nf_nat_pptp.c 	nf_ct_dump_tuple_ip(&t);
t                  85 net/ipv4/netfilter/nf_nat_pptp.c 	other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
t                 691 net/ipv4/tcp_bbr.c 	u32 t;
t                 743 net/ipv4/tcp_bbr.c 	t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
t                 744 net/ipv4/tcp_bbr.c 	if ((s32)t < 1)
t                 747 net/ipv4/tcp_bbr.c 	if (t >= ~0U / USEC_PER_MSEC) {
t                 751 net/ipv4/tcp_bbr.c 	t *= USEC_PER_MSEC;
t                 753 net/ipv4/tcp_bbr.c 	do_div(bw, t);
t                 228 net/ipv4/tcp_cubic.c 	u64 offs, t;
t                 278 net/ipv4/tcp_cubic.c 	t = (s32)(tcp_jiffies32 - ca->epoch_start);
t                 279 net/ipv4/tcp_cubic.c 	t += msecs_to_jiffies(ca->delay_min >> 3);
t                 281 net/ipv4/tcp_cubic.c 	t <<= BICTCP_HZ;
t                 282 net/ipv4/tcp_cubic.c 	do_div(t, HZ);
t                 284 net/ipv4/tcp_cubic.c 	if (t < ca->bic_K)		/* t - K */
t                 285 net/ipv4/tcp_cubic.c 		offs = ca->bic_K - t;
t                 287 net/ipv4/tcp_cubic.c 		offs = t - ca->bic_K;
t                 291 net/ipv4/tcp_cubic.c 	if (t < ca->bic_K)                            /* below origin*/
t                 122 net/ipv4/tcp_illinois.c 	u64 t = ca->sum_rtt;
t                 124 net/ipv4/tcp_illinois.c 	do_div(t, ca->cnt_rtt);
t                 125 net/ipv4/tcp_illinois.c 	return t - ca->base_rtt;
t                 317 net/ipv4/tcp_illinois.c 			u64 t = ca->sum_rtt;
t                 319 net/ipv4/tcp_illinois.c 			do_div(t, info->vegas.tcpv_rttcnt);
t                 320 net/ipv4/tcp_illinois.c 			info->vegas.tcpv_rtt = t;
t                 318 net/ipv4/tcp_timer.c static void tcp_delack_timer(struct timer_list *t)
t                 321 net/ipv4/tcp_timer.c 			from_timer(icsk, t, icsk_delack_timer);
t                 611 net/ipv4/tcp_timer.c static void tcp_write_timer(struct timer_list *t)
t                 614 net/ipv4/tcp_timer.c 			from_timer(icsk, t, icsk_retransmit_timer);
t                 650 net/ipv4/tcp_timer.c static void tcp_keepalive_timer (struct timer_list *t)
t                 652 net/ipv4/tcp_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                  34 net/ipv4/tunnel4.c 	struct xfrm_tunnel *t;
t                  42 net/ipv4/tunnel4.c 	     (t = rcu_dereference_protected(*pprev,
t                  44 net/ipv4/tunnel4.c 	     pprev = &t->next) {
t                  45 net/ipv4/tunnel4.c 		if (t->priority > priority)
t                  47 net/ipv4/tunnel4.c 		if (t->priority == priority)
t                  66 net/ipv4/tunnel4.c 	struct xfrm_tunnel *t;
t                  72 net/ipv4/tunnel4.c 	     (t = rcu_dereference_protected(*pprev,
t                  74 net/ipv4/tunnel4.c 	     pprev = &t->next) {
t                  75 net/ipv4/tunnel4.c 		if (t == handler) {
t                 215 net/ipv4/xfrm4_protocol.c 	struct xfrm4_protocol *t;
t                 230 net/ipv4/xfrm4_protocol.c 	     (t = rcu_dereference_protected(*pprev,
t                 232 net/ipv4/xfrm4_protocol.c 	     pprev = &t->next) {
t                 233 net/ipv4/xfrm4_protocol.c 		if (t->priority < priority)
t                 235 net/ipv4/xfrm4_protocol.c 		if (t->priority == priority)
t                 262 net/ipv4/xfrm4_protocol.c 	struct xfrm4_protocol *t;
t                 271 net/ipv4/xfrm4_protocol.c 	     (t = rcu_dereference_protected(*pprev,
t                 273 net/ipv4/xfrm4_protocol.c 	     pprev = &t->next) {
t                 274 net/ipv4/xfrm4_protocol.c 		if (t == handler) {
t                 180 net/ipv6/addrconf.c static void addrconf_rs_timer(struct timer_list *t);
t                3869 net/ipv6/addrconf.c static void addrconf_rs_timer(struct timer_list *t)
t                3871 net/ipv6/addrconf.c 	struct inet6_dev *idev = from_timer(idev, t, rs_timer);
t                  71 net/ipv6/ip6_fib.c static void fib6_gc_timer_cb(struct timer_list *t);
t                2234 net/ipv6/ip6_fib.c static void fib6_gc_timer_cb(struct timer_list *t)
t                2236 net/ipv6/ip6_fib.c 	struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer);
t                  78 net/ipv6/ip6_gre.c static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
t                  79 net/ipv6/ip6_gre.c static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
t                  80 net/ipv6/ip6_gre.c static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
t                 123 net/ipv6/ip6_gre.c 	struct ip6_tnl *t, *cand = NULL;
t                 131 net/ipv6/ip6_gre.c 	for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
t                 132 net/ipv6/ip6_gre.c 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
t                 133 net/ipv6/ip6_gre.c 		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
t                 134 net/ipv6/ip6_gre.c 		    key != t->parms.i_key ||
t                 135 net/ipv6/ip6_gre.c 		    !(t->dev->flags & IFF_UP))
t                 138 net/ipv6/ip6_gre.c 		if (t->dev->type != ARPHRD_IP6GRE &&
t                 139 net/ipv6/ip6_gre.c 		    t->dev->type != dev_type)
t                 143 net/ipv6/ip6_gre.c 		if (t->parms.link != link)
t                 145 net/ipv6/ip6_gre.c 		if (t->dev->type != dev_type)
t                 148 net/ipv6/ip6_gre.c 			return t;
t                 151 net/ipv6/ip6_gre.c 			cand = t;
t                 156 net/ipv6/ip6_gre.c 	for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
t                 157 net/ipv6/ip6_gre.c 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
t                 158 net/ipv6/ip6_gre.c 		    key != t->parms.i_key ||
t                 159 net/ipv6/ip6_gre.c 		    !(t->dev->flags & IFF_UP))
t                 162 net/ipv6/ip6_gre.c 		if (t->dev->type != ARPHRD_IP6GRE &&
t                 163 net/ipv6/ip6_gre.c 		    t->dev->type != dev_type)
t                 167 net/ipv6/ip6_gre.c 		if (t->parms.link != link)
t                 169 net/ipv6/ip6_gre.c 		if (t->dev->type != dev_type)
t                 172 net/ipv6/ip6_gre.c 			return t;
t                 175 net/ipv6/ip6_gre.c 			cand = t;
t                 180 net/ipv6/ip6_gre.c 	for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
t                 181 net/ipv6/ip6_gre.c 		if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
t                 182 net/ipv6/ip6_gre.c 			  (!ipv6_addr_equal(local, &t->parms.raddr) ||
t                 184 net/ipv6/ip6_gre.c 		    key != t->parms.i_key ||
t                 185 net/ipv6/ip6_gre.c 		    !(t->dev->flags & IFF_UP))
t                 188 net/ipv6/ip6_gre.c 		if (t->dev->type != ARPHRD_IP6GRE &&
t                 189 net/ipv6/ip6_gre.c 		    t->dev->type != dev_type)
t                 193 net/ipv6/ip6_gre.c 		if (t->parms.link != link)
t                 195 net/ipv6/ip6_gre.c 		if (t->dev->type != dev_type)
t                 198 net/ipv6/ip6_gre.c 			return t;
t                 201 net/ipv6/ip6_gre.c 			cand = t;
t                 206 net/ipv6/ip6_gre.c 	for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
t                 207 net/ipv6/ip6_gre.c 		if (t->parms.i_key != key ||
t                 208 net/ipv6/ip6_gre.c 		    !(t->dev->flags & IFF_UP))
t                 211 net/ipv6/ip6_gre.c 		if (t->dev->type != ARPHRD_IP6GRE &&
t                 212 net/ipv6/ip6_gre.c 		    t->dev->type != dev_type)
t                 216 net/ipv6/ip6_gre.c 		if (t->parms.link != link)
t                 218 net/ipv6/ip6_gre.c 		if (t->dev->type != dev_type)
t                 221 net/ipv6/ip6_gre.c 			return t;
t                 224 net/ipv6/ip6_gre.c 			cand = t;
t                 234 net/ipv6/ip6_gre.c 		t = rcu_dereference(ign->collect_md_tun_erspan);
t                 236 net/ipv6/ip6_gre.c 		t = rcu_dereference(ign->collect_md_tun);
t                 238 net/ipv6/ip6_gre.c 	if (t && t->dev->flags & IFF_UP)
t                 239 net/ipv6/ip6_gre.c 		return t;
t                 266 net/ipv6/ip6_gre.c static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
t                 268 net/ipv6/ip6_gre.c 	if (t->parms.collect_md)
t                 269 net/ipv6/ip6_gre.c 		rcu_assign_pointer(ign->collect_md_tun, t);
t                 272 net/ipv6/ip6_gre.c static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
t                 274 net/ipv6/ip6_gre.c 	if (t->parms.collect_md)
t                 275 net/ipv6/ip6_gre.c 		rcu_assign_pointer(ign->collect_md_tun_erspan, t);
t                 278 net/ipv6/ip6_gre.c static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
t                 280 net/ipv6/ip6_gre.c 	if (t->parms.collect_md)
t                 285 net/ipv6/ip6_gre.c 				       struct ip6_tnl *t)
t                 287 net/ipv6/ip6_gre.c 	if (t->parms.collect_md)
t                 292 net/ipv6/ip6_gre.c 		const struct ip6_tnl *t)
t                 294 net/ipv6/ip6_gre.c 	return __ip6gre_bucket(ign, &t->parms);
t                 297 net/ipv6/ip6_gre.c static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
t                 299 net/ipv6/ip6_gre.c 	struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
t                 301 net/ipv6/ip6_gre.c 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
t                 302 net/ipv6/ip6_gre.c 	rcu_assign_pointer(*tp, t);
t                 305 net/ipv6/ip6_gre.c static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
t                 310 net/ipv6/ip6_gre.c 	for (tp = ip6gre_bucket(ign, t);
t                 313 net/ipv6/ip6_gre.c 		if (t == iter) {
t                 314 net/ipv6/ip6_gre.c 			rcu_assign_pointer(*tp, t->next);
t                 328 net/ipv6/ip6_gre.c 	struct ip6_tnl *t;
t                 333 net/ipv6/ip6_gre.c 	     (t = rtnl_dereference(*tp)) != NULL;
t                 334 net/ipv6/ip6_gre.c 	     tp = &t->next)
t                 335 net/ipv6/ip6_gre.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                 336 net/ipv6/ip6_gre.c 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
t                 337 net/ipv6/ip6_gre.c 		    key == t->parms.i_key &&
t                 338 net/ipv6/ip6_gre.c 		    link == t->parms.link &&
t                 339 net/ipv6/ip6_gre.c 		    type == t->dev->type)
t                 342 net/ipv6/ip6_gre.c 	return t;
t                 348 net/ipv6/ip6_gre.c 	struct ip6_tnl *t, *nt;
t                 353 net/ipv6/ip6_gre.c 	t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
t                 354 net/ipv6/ip6_gre.c 	if (t && create)
t                 356 net/ipv6/ip6_gre.c 	if (t || !create)
t                 357 net/ipv6/ip6_gre.c 		return t;
t                 366 net/ipv6/ip6_gre.c 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
t                 400 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 401 net/ipv6/ip6_gre.c 	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
t                 403 net/ipv6/ip6_gre.c 	ip6erspan_tunnel_unlink_md(ign, t);
t                 404 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink(ign, t);
t                 405 net/ipv6/ip6_gre.c 	dst_cache_reset(&t->dst_cache);
t                 411 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 412 net/ipv6/ip6_gre.c 	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
t                 414 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink_md(ign, t);
t                 415 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink(ign, t);
t                 416 net/ipv6/ip6_gre.c 	dst_cache_reset(&t->dst_cache);
t                 427 net/ipv6/ip6_gre.c 	struct ip6_tnl *t;
t                 434 net/ipv6/ip6_gre.c 	t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
t                 436 net/ipv6/ip6_gre.c 	if (!t)
t                 444 net/ipv6/ip6_gre.c 				    t->parms.name);
t                 451 net/ipv6/ip6_gre.c 					    t->parms.name);
t                 464 net/ipv6/ip6_gre.c 						    t->parms.name);
t                 468 net/ipv6/ip6_gre.c 					    t->parms.name);
t                 480 net/ipv6/ip6_gre.c 	if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
t                 481 net/ipv6/ip6_gre.c 		t->err_count++;
t                 483 net/ipv6/ip6_gre.c 		t->err_count = 1;
t                 484 net/ipv6/ip6_gre.c 	t->err_time = jiffies;
t                 638 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 640 net/ipv6/ip6_gre.c 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
t                 641 net/ipv6/ip6_gre.c 		*encap_limit = t->parms.encap_limit;
t                 643 net/ipv6/ip6_gre.c 	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
t                 645 net/ipv6/ip6_gre.c 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
t                 648 net/ipv6/ip6_gre.c 		*dsfield = ip6_tclass(t->parms.flowinfo);
t                 650 net/ipv6/ip6_gre.c 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
t                 653 net/ipv6/ip6_gre.c 		fl6->flowi6_mark = t->parms.fwmark;
t                 664 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 681 net/ipv6/ip6_gre.c 	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
t                 682 net/ipv6/ip6_gre.c 		*encap_limit = t->parms.encap_limit;
t                 685 net/ipv6/ip6_gre.c 	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
t                 687 net/ipv6/ip6_gre.c 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
t                 690 net/ipv6/ip6_gre.c 		*dsfield = ip6_tclass(t->parms.flowinfo);
t                 692 net/ipv6/ip6_gre.c 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
t                 695 net/ipv6/ip6_gre.c 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
t                 698 net/ipv6/ip6_gre.c 		fl6->flowi6_mark = t->parms.fwmark;
t                 771 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 780 net/ipv6/ip6_gre.c 	if (!t->parms.collect_md)
t                 784 net/ipv6/ip6_gre.c 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
t                 803 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 811 net/ipv6/ip6_gre.c 	if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
t                 814 net/ipv6/ip6_gre.c 	if (!t->parms.collect_md &&
t                 818 net/ipv6/ip6_gre.c 	if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
t                 846 net/ipv6/ip6_gre.c static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
t                 849 net/ipv6/ip6_gre.c 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
t                 854 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 860 net/ipv6/ip6_gre.c 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
t                 861 net/ipv6/ip6_gre.c 		encap_limit = t->parms.encap_limit;
t                 863 net/ipv6/ip6_gre.c 	if (!t->parms.collect_md)
t                 864 net/ipv6/ip6_gre.c 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
t                 866 net/ipv6/ip6_gre.c 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
t                 878 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 879 net/ipv6/ip6_gre.c 	struct net_device_stats *stats = &t->dev->stats;
t                 885 net/ipv6/ip6_gre.c 	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
t                 915 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 931 net/ipv6/ip6_gre.c 	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
t                 952 net/ipv6/ip6_gre.c 	if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
t                 955 net/ipv6/ip6_gre.c 	t->parms.o_flags &= ~TUNNEL_KEY;
t                 961 net/ipv6/ip6_gre.c 	if (t->parms.collect_md) {
t                1010 net/ipv6/ip6_gre.c 			if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
t                1017 net/ipv6/ip6_gre.c 			memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
t                1021 net/ipv6/ip6_gre.c 		if (t->parms.erspan_ver == 1)
t                1022 net/ipv6/ip6_gre.c 			erspan_build_header(skb, ntohl(t->parms.o_key),
t                1023 net/ipv6/ip6_gre.c 					    t->parms.index,
t                1025 net/ipv6/ip6_gre.c 		else if (t->parms.erspan_ver == 2)
t                1026 net/ipv6/ip6_gre.c 			erspan_build_header_v2(skb, ntohl(t->parms.o_key),
t                1027 net/ipv6/ip6_gre.c 					       t->parms.dir,
t                1028 net/ipv6/ip6_gre.c 					       t->parms.hwid,
t                1033 net/ipv6/ip6_gre.c 		fl6.daddr = t->parms.raddr;
t                1037 net/ipv6/ip6_gre.c 	proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
t                1039 net/ipv6/ip6_gre.c 	gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
t                1042 net/ipv6/ip6_gre.c 	if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
t                1062 net/ipv6/ip6_gre.c 	stats = &t->dev->stats;
t                1069 net/ipv6/ip6_gre.c static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
t                1071 net/ipv6/ip6_gre.c 	struct net_device *dev = t->dev;
t                1072 net/ipv6/ip6_gre.c 	struct __ip6_tnl_parm *p = &t->parms;
t                1073 net/ipv6/ip6_gre.c 	struct flowi6 *fl6 = &t->fl.u.ip6;
t                1093 net/ipv6/ip6_gre.c 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
t                1102 net/ipv6/ip6_gre.c static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
t                1105 net/ipv6/ip6_gre.c 	const struct __ip6_tnl_parm *p = &t->parms;
t                1106 net/ipv6/ip6_gre.c 	struct net_device *dev = t->dev;
t                1112 net/ipv6/ip6_gre.c 		struct rt6_info *rt = rt6_lookup(t->net,
t                1125 net/ipv6/ip6_gre.c 				if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
t                1150 net/ipv6/ip6_gre.c static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
t                1152 net/ipv6/ip6_gre.c 	ip6gre_tnl_link_config_common(t);
t                1153 net/ipv6/ip6_gre.c 	ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
t                1156 net/ipv6/ip6_gre.c static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
t                1159 net/ipv6/ip6_gre.c 	t->parms.laddr = p->laddr;
t                1160 net/ipv6/ip6_gre.c 	t->parms.raddr = p->raddr;
t                1161 net/ipv6/ip6_gre.c 	t->parms.flags = p->flags;
t                1162 net/ipv6/ip6_gre.c 	t->parms.hop_limit = p->hop_limit;
t                1163 net/ipv6/ip6_gre.c 	t->parms.encap_limit = p->encap_limit;
t                1164 net/ipv6/ip6_gre.c 	t->parms.flowinfo = p->flowinfo;
t                1165 net/ipv6/ip6_gre.c 	t->parms.link = p->link;
t                1166 net/ipv6/ip6_gre.c 	t->parms.proto = p->proto;
t                1167 net/ipv6/ip6_gre.c 	t->parms.i_key = p->i_key;
t                1168 net/ipv6/ip6_gre.c 	t->parms.o_key = p->o_key;
t                1169 net/ipv6/ip6_gre.c 	t->parms.i_flags = p->i_flags;
t                1170 net/ipv6/ip6_gre.c 	t->parms.o_flags = p->o_flags;
t                1171 net/ipv6/ip6_gre.c 	t->parms.fwmark = p->fwmark;
t                1172 net/ipv6/ip6_gre.c 	t->parms.erspan_ver = p->erspan_ver;
t                1173 net/ipv6/ip6_gre.c 	t->parms.index = p->index;
t                1174 net/ipv6/ip6_gre.c 	t->parms.dir = p->dir;
t                1175 net/ipv6/ip6_gre.c 	t->parms.hwid = p->hwid;
t                1176 net/ipv6/ip6_gre.c 	dst_cache_reset(&t->dst_cache);
t                1179 net/ipv6/ip6_gre.c static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
t                1182 net/ipv6/ip6_gre.c 	ip6gre_tnl_copy_tnl_parm(t, p);
t                1183 net/ipv6/ip6_gre.c 	ip6gre_tnl_link_config(t, set_mtu);
t                1228 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1229 net/ipv6/ip6_gre.c 	struct net *net = t->net;
t                1242 net/ipv6/ip6_gre.c 			t = ip6gre_tunnel_locate(net, &p1, 0);
t                1243 net/ipv6/ip6_gre.c 			if (!t)
t                1244 net/ipv6/ip6_gre.c 				t = netdev_priv(dev);
t                1247 net/ipv6/ip6_gre.c 		ip6gre_tnl_parm_to_user(&p, &t->parms);
t                1272 net/ipv6/ip6_gre.c 		t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
t                1275 net/ipv6/ip6_gre.c 			if (t) {
t                1276 net/ipv6/ip6_gre.c 				if (t->dev != dev) {
t                1281 net/ipv6/ip6_gre.c 				t = netdev_priv(dev);
t                1283 net/ipv6/ip6_gre.c 				ip6gre_tunnel_unlink(ign, t);
t                1285 net/ipv6/ip6_gre.c 				ip6gre_tnl_change(t, &p1, 1);
t                1286 net/ipv6/ip6_gre.c 				ip6gre_tunnel_link(ign, t);
t                1291 net/ipv6/ip6_gre.c 		if (t) {
t                1295 net/ipv6/ip6_gre.c 			ip6gre_tnl_parm_to_user(&p, &t->parms);
t                1313 net/ipv6/ip6_gre.c 			t = ip6gre_tunnel_locate(net, &p1, 0);
t                1314 net/ipv6/ip6_gre.c 			if (!t)
t                1317 net/ipv6/ip6_gre.c 			if (t == netdev_priv(ign->fb_tunnel_dev))
t                1319 net/ipv6/ip6_gre.c 			dev = t->dev;
t                1337 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1341 net/ipv6/ip6_gre.c 	ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
t                1343 net/ipv6/ip6_gre.c 						  t->fl.u.ip6.flowlabel,
t                1344 net/ipv6/ip6_gre.c 						  true, &t->fl.u.ip6));
t                1345 net/ipv6/ip6_gre.c 	ipv6h->hop_limit = t->parms.hop_limit;
t                1347 net/ipv6/ip6_gre.c 	ipv6h->saddr = t->parms.laddr;
t                1348 net/ipv6/ip6_gre.c 	ipv6h->daddr = t->parms.raddr;
t                1351 net/ipv6/ip6_gre.c 	p[0] = t->parms.o_flags;
t                1363 net/ipv6/ip6_gre.c 		return t->hlen;
t                1365 net/ipv6/ip6_gre.c 	return -t->hlen;
t                1384 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1386 net/ipv6/ip6_gre.c 	gro_cells_destroy(&t->gro_cells);
t                1387 net/ipv6/ip6_gre.c 	dst_cache_destroy(&t->dst_cache);
t                1540 net/ipv6/ip6_gre.c 			struct ip6_tnl *t;
t                1542 net/ipv6/ip6_gre.c 			t = rtnl_dereference(ign->tunnels[prio][h]);
t                1544 net/ipv6/ip6_gre.c 			while (t) {
t                1548 net/ipv6/ip6_gre.c 				if (!net_eq(dev_net(t->dev), net))
t                1549 net/ipv6/ip6_gre.c 					unregister_netdevice_queue(t->dev,
t                1551 net/ipv6/ip6_gre.c 				t = rtnl_dereference(t->next);
t                2004 net/ipv6/ip6_gre.c 	struct ip6_tnl *t, *nt = netdev_priv(dev);
t                2021 net/ipv6/ip6_gre.c 	t = ip6gre_tunnel_locate(net, p_p, 0);
t                2023 net/ipv6/ip6_gre.c 	if (t) {
t                2024 net/ipv6/ip6_gre.c 		if (t->dev != dev)
t                2027 net/ipv6/ip6_gre.c 		t = nt;
t                2030 net/ipv6/ip6_gre.c 	return t;
t                2037 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                2038 net/ipv6/ip6_gre.c 	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
t                2041 net/ipv6/ip6_gre.c 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
t                2042 net/ipv6/ip6_gre.c 	if (IS_ERR(t))
t                2043 net/ipv6/ip6_gre.c 		return PTR_ERR(t);
t                2045 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink_md(ign, t);
t                2046 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink(ign, t);
t                2047 net/ipv6/ip6_gre.c 	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
t                2048 net/ipv6/ip6_gre.c 	ip6gre_tunnel_link_md(ign, t);
t                2049 net/ipv6/ip6_gre.c 	ip6gre_tunnel_link(ign, t);
t                2106 net/ipv6/ip6_gre.c 	struct ip6_tnl *t = netdev_priv(dev);
t                2107 net/ipv6/ip6_gre.c 	struct __ip6_tnl_parm *p = &t->parms;
t                2145 net/ipv6/ip6_gre.c 			t->encap.type) ||
t                2147 net/ipv6/ip6_gre.c 			 t->encap.sport) ||
t                2149 net/ipv6/ip6_gre.c 			 t->encap.dport) ||
t                2151 net/ipv6/ip6_gre.c 			t->encap.flags))
t                2233 net/ipv6/ip6_gre.c static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
t                2235 net/ipv6/ip6_gre.c 	ip6gre_tnl_link_config_common(t);
t                2236 net/ipv6/ip6_gre.c 	ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
t                2239 net/ipv6/ip6_gre.c static int ip6erspan_tnl_change(struct ip6_tnl *t,
t                2242 net/ipv6/ip6_gre.c 	ip6gre_tnl_copy_tnl_parm(t, p);
t                2243 net/ipv6/ip6_gre.c 	ip6erspan_tnl_link_config(t, set_mtu);
t                2253 net/ipv6/ip6_gre.c 	struct ip6_tnl *t;
t                2255 net/ipv6/ip6_gre.c 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
t                2256 net/ipv6/ip6_gre.c 	if (IS_ERR(t))
t                2257 net/ipv6/ip6_gre.c 		return PTR_ERR(t);
t                2260 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink_md(ign, t);
t                2261 net/ipv6/ip6_gre.c 	ip6gre_tunnel_unlink(ign, t);
t                2262 net/ipv6/ip6_gre.c 	ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
t                2263 net/ipv6/ip6_gre.c 	ip6erspan_tunnel_link_md(ign, t);
t                2264 net/ipv6/ip6_gre.c 	ip6gre_tunnel_link(ign, t);
t                 134 net/ipv6/ip6_tunnel.c 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
t                 140 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t;
t                 145 net/ipv6/ip6_tunnel.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                 146 net/ipv6/ip6_tunnel.c 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
t                 147 net/ipv6/ip6_tunnel.c 		    (t->dev->flags & IFF_UP))
t                 148 net/ipv6/ip6_tunnel.c 			return t;
t                 154 net/ipv6/ip6_tunnel.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                 155 net/ipv6/ip6_tunnel.c 		    ipv6_addr_any(&t->parms.raddr) &&
t                 156 net/ipv6/ip6_tunnel.c 		    (t->dev->flags & IFF_UP))
t                 157 net/ipv6/ip6_tunnel.c 			return t;
t                 162 net/ipv6/ip6_tunnel.c 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
t                 163 net/ipv6/ip6_tunnel.c 		    ipv6_addr_any(&t->parms.laddr) &&
t                 164 net/ipv6/ip6_tunnel.c 		    (t->dev->flags & IFF_UP))
t                 165 net/ipv6/ip6_tunnel.c 			return t;
t                 168 net/ipv6/ip6_tunnel.c 	t = rcu_dereference(ip6n->collect_md_tun);
t                 169 net/ipv6/ip6_tunnel.c 	if (t && t->dev->flags & IFF_UP)
t                 170 net/ipv6/ip6_tunnel.c 		return t;
t                 172 net/ipv6/ip6_tunnel.c 	t = rcu_dereference(ip6n->tnls_wc[0]);
t                 173 net/ipv6/ip6_tunnel.c 	if (t && (t->dev->flags & IFF_UP))
t                 174 net/ipv6/ip6_tunnel.c 		return t;
t                 211 net/ipv6/ip6_tunnel.c ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
t                 213 net/ipv6/ip6_tunnel.c 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
t                 215 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md)
t                 216 net/ipv6/ip6_tunnel.c 		rcu_assign_pointer(ip6n->collect_md_tun, t);
t                 217 net/ipv6/ip6_tunnel.c 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
t                 218 net/ipv6/ip6_tunnel.c 	rcu_assign_pointer(*tp, t);
t                 227 net/ipv6/ip6_tunnel.c ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
t                 232 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md)
t                 235 net/ipv6/ip6_tunnel.c 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
t                 238 net/ipv6/ip6_tunnel.c 		if (t == iter) {
t                 239 net/ipv6/ip6_tunnel.c 			rcu_assign_pointer(*tp, t->next);
t                 247 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 249 net/ipv6/ip6_tunnel.c 	gro_cells_destroy(&t->gro_cells);
t                 250 net/ipv6/ip6_tunnel.c 	dst_cache_destroy(&t->dst_cache);
t                 256 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 261 net/ipv6/ip6_tunnel.c 	t = netdev_priv(dev);
t                 268 net/ipv6/ip6_tunnel.c 	strcpy(t->parms.name, dev->name);
t                 271 net/ipv6/ip6_tunnel.c 	ip6_tnl_link(ip6n, t);
t                 293 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t;
t                 305 net/ipv6/ip6_tunnel.c 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
t                 312 net/ipv6/ip6_tunnel.c 	t = netdev_priv(dev);
t                 313 net/ipv6/ip6_tunnel.c 	t->parms = *p;
t                 314 net/ipv6/ip6_tunnel.c 	t->net = dev_net(dev);
t                 319 net/ipv6/ip6_tunnel.c 	return t;
t                 347 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t;
t                 351 net/ipv6/ip6_tunnel.c 	     (t = rtnl_dereference(*tp)) != NULL;
t                 352 net/ipv6/ip6_tunnel.c 	     tp = &t->next) {
t                 353 net/ipv6/ip6_tunnel.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                 354 net/ipv6/ip6_tunnel.c 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
t                 358 net/ipv6/ip6_tunnel.c 			return t;
t                 377 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 378 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                 384 net/ipv6/ip6_tunnel.c 		ip6_tnl_unlink(ip6n, t);
t                 385 net/ipv6/ip6_tunnel.c 	dst_cache_reset(&t->dst_cache);
t                 477 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t;
t                 488 net/ipv6/ip6_tunnel.c 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
t                 489 net/ipv6/ip6_tunnel.c 	if (!t)
t                 492 net/ipv6/ip6_tunnel.c 	tproto = READ_ONCE(t->parms.proto);
t                 503 net/ipv6/ip6_tunnel.c 				    t->parms.name);
t                 509 net/ipv6/ip6_tunnel.c 					    t->parms.name);
t                 522 net/ipv6/ip6_tunnel.c 						    t->parms.name);
t                 527 net/ipv6/ip6_tunnel.c 					    t->parms.name);
t                 695 net/ipv6/ip6_tunnel.c static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
t                 701 net/ipv6/ip6_tunnel.c 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
t                 707 net/ipv6/ip6_tunnel.c static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
t                 711 net/ipv6/ip6_tunnel.c 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
t                 717 net/ipv6/ip6_tunnel.c __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
t                 721 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
t                 742 net/ipv6/ip6_tunnel.c int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
t                 746 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
t                 748 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                 752 net/ipv6/ip6_tunnel.c 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
t                 773 net/ipv6/ip6_tunnel.c 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
t                 858 net/ipv6/ip6_tunnel.c int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
t                 863 net/ipv6/ip6_tunnel.c 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
t                 880 net/ipv6/ip6_tunnel.c 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
t                 884 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t;
t                 890 net/ipv6/ip6_tunnel.c 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
t                 892 net/ipv6/ip6_tunnel.c 	if (t) {
t                 893 net/ipv6/ip6_tunnel.c 		u8 tproto = READ_ONCE(t->parms.proto);
t                 900 net/ipv6/ip6_tunnel.c 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
t                 904 net/ipv6/ip6_tunnel.c 		if (t->parms.collect_md) {
t                 909 net/ipv6/ip6_tunnel.c 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
t                 969 net/ipv6/ip6_tunnel.c ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
t                 971 net/ipv6/ip6_tunnel.c 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
t                 974 net/ipv6/ip6_tunnel.c int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
t                 978 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
t                 980 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                 982 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md)
t                 987 net/ipv6/ip6_tunnel.c 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
t                1036 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1037 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                1038 net/ipv6/ip6_tunnel.c 	struct net_device_stats *stats = &t->dev->stats;
t                1044 net/ipv6/ip6_tunnel.c 	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
t                1045 net/ipv6/ip6_tunnel.c 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
t                1051 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md) {
t                1055 net/ipv6/ip6_tunnel.c 		hop_limit = t->parms.hop_limit;
t                1059 net/ipv6/ip6_tunnel.c 	if (ipv6_addr_any(&t->parms.raddr)) {
t                1082 net/ipv6/ip6_tunnel.c 	} else if (t->parms.proto != 0 && !(t->parms.flags &
t                1092 net/ipv6/ip6_tunnel.c 		dst = dst_cache_get(&t->dst_cache);
t                1094 net/ipv6/ip6_tunnel.c 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
t                1112 net/ipv6/ip6_tunnel.c 		if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
t                1124 net/ipv6/ip6_tunnel.c 				     t->parms.name);
t                1127 net/ipv6/ip6_tunnel.c 	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
t                1136 net/ipv6/ip6_tunnel.c 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
t                1142 net/ipv6/ip6_tunnel.c 	if (t->err_count > 0) {
t                1144 net/ipv6/ip6_tunnel.c 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
t                1145 net/ipv6/ip6_tunnel.c 			t->err_count--;
t                1149 net/ipv6/ip6_tunnel.c 			t->err_count = 0;
t                1153 net/ipv6/ip6_tunnel.c 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
t                1174 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md) {
t                1175 net/ipv6/ip6_tunnel.c 		if (t->encap.type != TUNNEL_ENCAP_NONE)
t                1179 net/ipv6/ip6_tunnel.c 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
t                1196 net/ipv6/ip6_tunnel.c 			+ dst->header_len + t->hlen;
t                1200 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_encap(skb, t, &proto, fl6);
t                1232 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1244 net/ipv6/ip6_tunnel.c 	tproto = READ_ONCE(t->parms.proto);
t                1248 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md) {
t                1264 net/ipv6/ip6_tunnel.c 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
t                1265 net/ipv6/ip6_tunnel.c 			encap_limit = t->parms.encap_limit;
t                1267 net/ipv6/ip6_tunnel.c 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
t                1270 net/ipv6/ip6_tunnel.c 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
t                1273 net/ipv6/ip6_tunnel.c 			dsfield = ip6_tclass(t->parms.flowinfo);
t                1274 net/ipv6/ip6_tunnel.c 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
t                1277 net/ipv6/ip6_tunnel.c 			fl6.flowi6_mark = t->parms.fwmark;
t                1304 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1315 net/ipv6/ip6_tunnel.c 	tproto = READ_ONCE(t->parms.proto);
t                1317 net/ipv6/ip6_tunnel.c 	    ip6_tnl_addr_conflict(t, ipv6h))
t                1320 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md) {
t                1349 net/ipv6/ip6_tunnel.c 		} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
t                1350 net/ipv6/ip6_tunnel.c 			encap_limit = t->parms.encap_limit;
t                1353 net/ipv6/ip6_tunnel.c 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
t                1356 net/ipv6/ip6_tunnel.c 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
t                1359 net/ipv6/ip6_tunnel.c 			dsfield = ip6_tclass(t->parms.flowinfo);
t                1360 net/ipv6/ip6_tunnel.c 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
t                1362 net/ipv6/ip6_tunnel.c 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
t                1365 net/ipv6/ip6_tunnel.c 			fl6.flowi6_mark = t->parms.fwmark;
t                1390 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1391 net/ipv6/ip6_tunnel.c 	struct net_device_stats *stats = &t->dev->stats;
t                1420 net/ipv6/ip6_tunnel.c static void ip6_tnl_link_config(struct ip6_tnl *t)
t                1422 net/ipv6/ip6_tunnel.c 	struct net_device *dev = t->dev;
t                1423 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
t                1424 net/ipv6/ip6_tunnel.c 	struct flowi6 *fl6 = &t->fl.u.ip6;
t                1442 net/ipv6/ip6_tunnel.c 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
t                1449 net/ipv6/ip6_tunnel.c 	t->tun_hlen = 0;
t                1450 net/ipv6/ip6_tunnel.c 	t->hlen = t->encap_hlen + t->tun_hlen;
t                1451 net/ipv6/ip6_tunnel.c 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
t                1457 net/ipv6/ip6_tunnel.c 		struct rt6_info *rt = rt6_lookup(t->net,
t                1469 net/ipv6/ip6_tunnel.c 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
t                1489 net/ipv6/ip6_tunnel.c ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
t                1491 net/ipv6/ip6_tunnel.c 	t->parms.laddr = p->laddr;
t                1492 net/ipv6/ip6_tunnel.c 	t->parms.raddr = p->raddr;
t                1493 net/ipv6/ip6_tunnel.c 	t->parms.flags = p->flags;
t                1494 net/ipv6/ip6_tunnel.c 	t->parms.hop_limit = p->hop_limit;
t                1495 net/ipv6/ip6_tunnel.c 	t->parms.encap_limit = p->encap_limit;
t                1496 net/ipv6/ip6_tunnel.c 	t->parms.flowinfo = p->flowinfo;
t                1497 net/ipv6/ip6_tunnel.c 	t->parms.link = p->link;
t                1498 net/ipv6/ip6_tunnel.c 	t->parms.proto = p->proto;
t                1499 net/ipv6/ip6_tunnel.c 	t->parms.fwmark = p->fwmark;
t                1500 net/ipv6/ip6_tunnel.c 	dst_cache_reset(&t->dst_cache);
t                1501 net/ipv6/ip6_tunnel.c 	ip6_tnl_link_config(t);
t                1505 net/ipv6/ip6_tunnel.c static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
t                1507 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                1511 net/ipv6/ip6_tunnel.c 	ip6_tnl_unlink(ip6n, t);
t                1513 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_change(t, p);
t                1514 net/ipv6/ip6_tunnel.c 	ip6_tnl_link(ip6n, t);
t                1515 net/ipv6/ip6_tunnel.c 	netdev_state_change(t->dev);
t                1519 net/ipv6/ip6_tunnel.c static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
t                1522 net/ipv6/ip6_tunnel.c 	t->parms.proto = p->proto;
t                1523 net/ipv6/ip6_tunnel.c 	netdev_state_change(t->dev);
t                1589 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1590 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                1603 net/ipv6/ip6_tunnel.c 			t = ip6_tnl_locate(net, &p1, 0);
t                1604 net/ipv6/ip6_tunnel.c 			if (IS_ERR(t))
t                1605 net/ipv6/ip6_tunnel.c 				t = netdev_priv(dev);
t                1609 net/ipv6/ip6_tunnel.c 		ip6_tnl_parm_to_user(&p, &t->parms);
t                1627 net/ipv6/ip6_tunnel.c 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
t                1629 net/ipv6/ip6_tunnel.c 			if (!IS_ERR(t)) {
t                1630 net/ipv6/ip6_tunnel.c 				if (t->dev != dev) {
t                1635 net/ipv6/ip6_tunnel.c 				t = netdev_priv(dev);
t                1637 net/ipv6/ip6_tunnel.c 				err = ip6_tnl0_update(t, &p1);
t                1639 net/ipv6/ip6_tunnel.c 				err = ip6_tnl_update(t, &p1);
t                1641 net/ipv6/ip6_tunnel.c 		if (!IS_ERR(t)) {
t                1643 net/ipv6/ip6_tunnel.c 			ip6_tnl_parm_to_user(&p, &t->parms);
t                1648 net/ipv6/ip6_tunnel.c 			err = PTR_ERR(t);
t                1662 net/ipv6/ip6_tunnel.c 			t = ip6_tnl_locate(net, &p1, 0);
t                1663 net/ipv6/ip6_tunnel.c 			if (IS_ERR(t))
t                1666 net/ipv6/ip6_tunnel.c 			if (t->dev == ip6n->fb_tnl_dev)
t                1668 net/ipv6/ip6_tunnel.c 			dev = t->dev;
t                1714 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1716 net/ipv6/ip6_tunnel.c 	return t->parms.link;
t                1750 net/ipv6/ip6_tunnel.c int ip6_tnl_encap_setup(struct ip6_tnl *t,
t                1755 net/ipv6/ip6_tunnel.c 	memset(&t->encap, 0, sizeof(t->encap));
t                1761 net/ipv6/ip6_tunnel.c 	t->encap.type = ipencap->type;
t                1762 net/ipv6/ip6_tunnel.c 	t->encap.sport = ipencap->sport;
t                1763 net/ipv6/ip6_tunnel.c 	t->encap.dport = ipencap->dport;
t                1764 net/ipv6/ip6_tunnel.c 	t->encap.flags = ipencap->flags;
t                1766 net/ipv6/ip6_tunnel.c 	t->encap_hlen = hlen;
t                1767 net/ipv6/ip6_tunnel.c 	t->hlen = t->encap_hlen + t->tun_hlen;
t                1826 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1830 net/ipv6/ip6_tunnel.c 	t->dev = dev;
t                1831 net/ipv6/ip6_tunnel.c 	t->net = dev_net(dev);
t                1836 net/ipv6/ip6_tunnel.c 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
t                1840 net/ipv6/ip6_tunnel.c 	ret = gro_cells_init(&t->gro_cells, dev);
t                1844 net/ipv6/ip6_tunnel.c 	t->tun_hlen = 0;
t                1845 net/ipv6/ip6_tunnel.c 	t->hlen = t->encap_hlen + t->tun_hlen;
t                1846 net/ipv6/ip6_tunnel.c 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
t                1851 net/ipv6/ip6_tunnel.c 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
t                1859 net/ipv6/ip6_tunnel.c 	dst_cache_destroy(&t->dst_cache);
t                1874 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1879 net/ipv6/ip6_tunnel.c 	ip6_tnl_link_config(t);
t                1880 net/ipv6/ip6_tunnel.c 	if (t->parms.collect_md)
t                1894 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                1898 net/ipv6/ip6_tunnel.c 	t->parms.proto = IPPROTO_IPV6;
t                1901 net/ipv6/ip6_tunnel.c 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
t                2001 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *nt, *t;
t                2018 net/ipv6/ip6_tunnel.c 		t = ip6_tnl_locate(net, &nt->parms, 0);
t                2019 net/ipv6/ip6_tunnel.c 		if (!IS_ERR(t))
t                2034 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = netdev_priv(dev);
t                2036 net/ipv6/ip6_tunnel.c 	struct net *net = t->net;
t                2044 net/ipv6/ip6_tunnel.c 		int err = ip6_tnl_encap_setup(t, &ipencap);
t                2053 net/ipv6/ip6_tunnel.c 	t = ip6_tnl_locate(net, &p, 0);
t                2054 net/ipv6/ip6_tunnel.c 	if (!IS_ERR(t)) {
t                2055 net/ipv6/ip6_tunnel.c 		if (t->dev != dev)
t                2058 net/ipv6/ip6_tunnel.c 		t = netdev_priv(dev);
t                2060 net/ipv6/ip6_tunnel.c 	return ip6_tnl_update(t, &p);
t                2195 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t;
t                2202 net/ipv6/ip6_tunnel.c 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
t                2203 net/ipv6/ip6_tunnel.c 		while (t) {
t                2207 net/ipv6/ip6_tunnel.c 			if (!net_eq(dev_net(t->dev), net))
t                2208 net/ipv6/ip6_tunnel.c 				unregister_netdevice_queue(t->dev, list);
t                2209 net/ipv6/ip6_tunnel.c 			t = rtnl_dereference(t->next);
t                2217 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *t = NULL;
t                2246 net/ipv6/ip6_tunnel.c 	t = netdev_priv(ip6n->fb_tnl_dev);
t                2248 net/ipv6/ip6_tunnel.c 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
t                  75 net/ipv6/ip6_vti.c 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
t                  93 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                  98 net/ipv6/ip6_vti.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                  99 net/ipv6/ip6_vti.c 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
t                 100 net/ipv6/ip6_vti.c 		    (t->dev->flags & IFF_UP))
t                 101 net/ipv6/ip6_vti.c 			return t;
t                 107 net/ipv6/ip6_vti.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                 108 net/ipv6/ip6_vti.c 		    (t->dev->flags & IFF_UP))
t                 109 net/ipv6/ip6_vti.c 			return t;
t                 114 net/ipv6/ip6_vti.c 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
t                 115 net/ipv6/ip6_vti.c 		    (t->dev->flags & IFF_UP))
t                 116 net/ipv6/ip6_vti.c 			return t;
t                 119 net/ipv6/ip6_vti.c 	t = rcu_dereference(ip6n->tnls_wc[0]);
t                 120 net/ipv6/ip6_vti.c 	if (t && (t->dev->flags & IFF_UP))
t                 121 net/ipv6/ip6_vti.c 		return t;
t                 152 net/ipv6/ip6_vti.c vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t)
t                 154 net/ipv6/ip6_vti.c 	struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms);
t                 156 net/ipv6/ip6_vti.c 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
t                 157 net/ipv6/ip6_vti.c 	rcu_assign_pointer(*tp, t);
t                 161 net/ipv6/ip6_vti.c vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
t                 166 net/ipv6/ip6_vti.c 	for (tp = vti6_tnl_bucket(ip6n, &t->parms);
t                 169 net/ipv6/ip6_vti.c 		if (t == iter) {
t                 170 net/ipv6/ip6_vti.c 			rcu_assign_pointer(*tp, t->next);
t                 183 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 193 net/ipv6/ip6_vti.c 	strcpy(t->parms.name, dev->name);
t                 196 net/ipv6/ip6_vti.c 	vti6_tnl_link(ip6n, t);
t                 207 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                 219 net/ipv6/ip6_vti.c 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
t                 225 net/ipv6/ip6_vti.c 	t = netdev_priv(dev);
t                 226 net/ipv6/ip6_vti.c 	t->parms = *p;
t                 227 net/ipv6/ip6_vti.c 	t->net = dev_net(dev);
t                 233 net/ipv6/ip6_vti.c 	return t;
t                 261 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                 265 net/ipv6/ip6_vti.c 	     (t = rtnl_dereference(*tp)) != NULL;
t                 266 net/ipv6/ip6_vti.c 	     tp = &t->next) {
t                 267 net/ipv6/ip6_vti.c 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
t                 268 net/ipv6/ip6_vti.c 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
t                 272 net/ipv6/ip6_vti.c 			return t;
t                 289 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 290 net/ipv6/ip6_vti.c 	struct vti6_net *ip6n = net_generic(t->net, vti6_net_id);
t                 295 net/ipv6/ip6_vti.c 		vti6_tnl_unlink(ip6n, t);
t                 301 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                 305 net/ipv6/ip6_vti.c 	t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
t                 306 net/ipv6/ip6_vti.c 	if (t) {
t                 307 net/ipv6/ip6_vti.c 		if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
t                 318 net/ipv6/ip6_vti.c 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t                 319 net/ipv6/ip6_vti.c 			t->dev->stats.rx_dropped++;
t                 326 net/ipv6/ip6_vti.c 		return xfrm6_rcv_tnl(skb, t);
t                 342 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
t                 346 net/ipv6/ip6_vti.c 	if (!t)
t                 349 net/ipv6/ip6_vti.c 	dev = t->dev;
t                 373 net/ipv6/ip6_vti.c 	skb->mark = be32_to_cpu(t->parms.i_key);
t                 380 net/ipv6/ip6_vti.c 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
t                 406 net/ipv6/ip6_vti.c vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
t                 408 net/ipv6/ip6_vti.c 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
t                 443 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 444 net/ipv6/ip6_vti.c 	struct net_device_stats *stats = &t->dev->stats;
t                 483 net/ipv6/ip6_vti.c 	dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
t                 491 net/ipv6/ip6_vti.c 	if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
t                 494 net/ipv6/ip6_vti.c 	if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr,
t                 503 net/ipv6/ip6_vti.c 				     t->parms.name);
t                 525 net/ipv6/ip6_vti.c 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
t                 529 net/ipv6/ip6_vti.c 	err = dst_output(t->net, skb->sk, skb);
t                 546 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 547 net/ipv6/ip6_vti.c 	struct net_device_stats *stats = &t->dev->stats;
t                 558 net/ipv6/ip6_vti.c 		if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
t                 559 net/ipv6/ip6_vti.c 		    vti6_addr_conflict(t, ipv6_hdr(skb)))
t                 574 net/ipv6/ip6_vti.c 	fl.flowi_mark = be32_to_cpu(t->parms.o_key);
t                 595 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                 603 net/ipv6/ip6_vti.c 	t = vti6_tnl_lookup(dev_net(skb->dev), &iph->daddr, &iph->saddr);
t                 604 net/ipv6/ip6_vti.c 	if (!t)
t                 607 net/ipv6/ip6_vti.c 	mark = be32_to_cpu(t->parms.o_key);
t                 645 net/ipv6/ip6_vti.c static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
t                 647 net/ipv6/ip6_vti.c 	struct net_device *dev = t->dev;
t                 648 net/ipv6/ip6_vti.c 	struct __ip6_tnl_parm *p = &t->parms;
t                 657 net/ipv6/ip6_vti.c 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
t                 672 net/ipv6/ip6_vti.c 		struct rt6_info *rt = rt6_lookup(t->net,
t                 682 net/ipv6/ip6_vti.c 		tdev = __dev_get_by_index(t->net, p->link);
t                 702 net/ipv6/ip6_vti.c vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
t                 705 net/ipv6/ip6_vti.c 	t->parms.laddr = p->laddr;
t                 706 net/ipv6/ip6_vti.c 	t->parms.raddr = p->raddr;
t                 707 net/ipv6/ip6_vti.c 	t->parms.link = p->link;
t                 708 net/ipv6/ip6_vti.c 	t->parms.i_key = p->i_key;
t                 709 net/ipv6/ip6_vti.c 	t->parms.o_key = p->o_key;
t                 710 net/ipv6/ip6_vti.c 	t->parms.proto = p->proto;
t                 711 net/ipv6/ip6_vti.c 	t->parms.fwmark = p->fwmark;
t                 712 net/ipv6/ip6_vti.c 	dst_cache_reset(&t->dst_cache);
t                 713 net/ipv6/ip6_vti.c 	vti6_link_config(t, keep_mtu);
t                 717 net/ipv6/ip6_vti.c static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p,
t                 720 net/ipv6/ip6_vti.c 	struct net *net = dev_net(t->dev);
t                 724 net/ipv6/ip6_vti.c 	vti6_tnl_unlink(ip6n, t);
t                 726 net/ipv6/ip6_vti.c 	err = vti6_tnl_change(t, p, keep_mtu);
t                 727 net/ipv6/ip6_vti.c 	vti6_tnl_link(ip6n, t);
t                 728 net/ipv6/ip6_vti.c 	netdev_state_change(t->dev);
t                 795 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = NULL;
t                 807 net/ipv6/ip6_vti.c 			t = vti6_locate(net, &p1, 0);
t                 811 net/ipv6/ip6_vti.c 		if (!t)
t                 812 net/ipv6/ip6_vti.c 			t = netdev_priv(dev);
t                 813 net/ipv6/ip6_vti.c 		vti6_parm_to_user(&p, &t->parms);
t                 829 net/ipv6/ip6_vti.c 		t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
t                 831 net/ipv6/ip6_vti.c 			if (t) {
t                 832 net/ipv6/ip6_vti.c 				if (t->dev != dev) {
t                 837 net/ipv6/ip6_vti.c 				t = netdev_priv(dev);
t                 839 net/ipv6/ip6_vti.c 			err = vti6_update(t, &p1, false);
t                 841 net/ipv6/ip6_vti.c 		if (t) {
t                 843 net/ipv6/ip6_vti.c 			vti6_parm_to_user(&p, &t->parms);
t                 861 net/ipv6/ip6_vti.c 			t = vti6_locate(net, &p1, 0);
t                 862 net/ipv6/ip6_vti.c 			if (!t)
t                 865 net/ipv6/ip6_vti.c 			if (t->dev == ip6n->fb_tnl_dev)
t                 867 net/ipv6/ip6_vti.c 			dev = t->dev;
t                 917 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 919 net/ipv6/ip6_vti.c 	t->dev = dev;
t                 920 net/ipv6/ip6_vti.c 	t->net = dev_net(dev);
t                 933 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 938 net/ipv6/ip6_vti.c 	vti6_link_config(t, true);
t                 950 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = netdev_priv(dev);
t                 954 net/ipv6/ip6_vti.c 	t->parms.proto = IPPROTO_IPV6;
t                 957 net/ipv6/ip6_vti.c 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
t                1025 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                1035 net/ipv6/ip6_vti.c 	t = vti6_locate(net, &p, 0);
t                1037 net/ipv6/ip6_vti.c 	if (t) {
t                1038 net/ipv6/ip6_vti.c 		if (t->dev != dev)
t                1041 net/ipv6/ip6_vti.c 		t = netdev_priv(dev);
t                1043 net/ipv6/ip6_vti.c 	return vti6_update(t, &p, tb && tb[IFLA_MTU]);
t                1110 net/ipv6/ip6_vti.c 	struct ip6_tnl *t;
t                1113 net/ipv6/ip6_vti.c 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
t                1114 net/ipv6/ip6_vti.c 		while (t) {
t                1115 net/ipv6/ip6_vti.c 			unregister_netdevice_queue(t->dev, list);
t                1116 net/ipv6/ip6_vti.c 			t = rtnl_dereference(t->next);
t                1120 net/ipv6/ip6_vti.c 	t = rtnl_dereference(ip6n->tnls_wc[0]);
t                1121 net/ipv6/ip6_vti.c 	if (t)
t                1122 net/ipv6/ip6_vti.c 		unregister_netdevice_queue(t->dev, list);
t                1128 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = NULL;
t                1153 net/ipv6/ip6_vti.c 	t = netdev_priv(ip6n->fb_tnl_dev);
t                1155 net/ipv6/ip6_vti.c 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
t                  96 net/ipv6/ip6mr.c static void ipmr_expire_process(struct timer_list *t);
t                 816 net/ipv6/ip6mr.c static void ipmr_expire_process(struct timer_list *t)
t                 818 net/ipv6/ip6mr.c 	struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
t                  77 net/ipv6/ipcomp6.c 	struct xfrm_state *t = NULL;
t                  79 net/ipv6/ipcomp6.c 	t = xfrm_state_alloc(net);
t                  80 net/ipv6/ipcomp6.c 	if (!t)
t                  83 net/ipv6/ipcomp6.c 	t->id.proto = IPPROTO_IPV6;
t                  84 net/ipv6/ipcomp6.c 	t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
t                  85 net/ipv6/ipcomp6.c 	if (!t->id.spi)
t                  88 net/ipv6/ipcomp6.c 	memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr));
t                  89 net/ipv6/ipcomp6.c 	memcpy(&t->sel, &x->sel, sizeof(t->sel));
t                  90 net/ipv6/ipcomp6.c 	t->props.family = AF_INET6;
t                  91 net/ipv6/ipcomp6.c 	t->props.mode = x->props.mode;
t                  92 net/ipv6/ipcomp6.c 	memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
t                  93 net/ipv6/ipcomp6.c 	memcpy(&t->mark, &x->mark, sizeof(t->mark));
t                  95 net/ipv6/ipcomp6.c 	if (xfrm_init_state(t))
t                  98 net/ipv6/ipcomp6.c 	atomic_set(&t->tunnel_users, 1);
t                 101 net/ipv6/ipcomp6.c 	return t;
t                 104 net/ipv6/ipcomp6.c 	t->km.state = XFRM_STATE_DEAD;
t                 105 net/ipv6/ipcomp6.c 	xfrm_state_put(t);
t                 106 net/ipv6/ipcomp6.c 	t = NULL;
t                 114 net/ipv6/ipcomp6.c 	struct xfrm_state *t = NULL;
t                 120 net/ipv6/ipcomp6.c 		t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr,
t                 122 net/ipv6/ipcomp6.c 	if (!t) {
t                 123 net/ipv6/ipcomp6.c 		t = ipcomp6_tunnel_create(x);
t                 124 net/ipv6/ipcomp6.c 		if (!t) {
t                 128 net/ipv6/ipcomp6.c 		xfrm_state_insert(t);
t                 129 net/ipv6/ipcomp6.c 		xfrm_state_hold(t);
t                 131 net/ipv6/ipcomp6.c 	x->tunnel = t;
t                 132 net/ipv6/ipcomp6.c 	atomic_inc(&t->tunnel_users);
t                  74 net/ipv6/mcast.c static void igmp6_timer_handler(struct timer_list *t);
t                  76 net/ipv6/mcast.c static void mld_gq_timer_expire(struct timer_list *t);
t                  77 net/ipv6/mcast.c static void mld_ifc_timer_expire(struct timer_list *t);
t                2114 net/ipv6/mcast.c static void mld_dad_timer_expire(struct timer_list *t)
t                2116 net/ipv6/mcast.c 	struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
t                2464 net/ipv6/mcast.c static void mld_gq_timer_expire(struct timer_list *t)
t                2466 net/ipv6/mcast.c 	struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
t                2473 net/ipv6/mcast.c static void mld_ifc_timer_expire(struct timer_list *t)
t                2475 net/ipv6/mcast.c 	struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
t                2495 net/ipv6/mcast.c static void igmp6_timer_handler(struct timer_list *t)
t                2497 net/ipv6/mcast.c 	struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
t                 187 net/ipv6/netfilter/ip6_tables.c 	const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
t                 189 net/ipv6/netfilter/ip6_tables.c 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
t                 191 net/ipv6/netfilter/ip6_tables.c 		*chainname = t->target.data;
t                 197 net/ipv6/netfilter/ip6_tables.c 		    strcmp(t->target.u.kernel.target->name,
t                 199 net/ipv6/netfilter/ip6_tables.c 		    t->verdict < 0) {
t                 301 net/ipv6/netfilter/ip6_tables.c 		const struct xt_entry_target *t;
t                 324 net/ipv6/netfilter/ip6_tables.c 		t = ip6t_get_target_c(e);
t                 325 net/ipv6/netfilter/ip6_tables.c 		WARN_ON(!t->u.kernel.target);
t                 334 net/ipv6/netfilter/ip6_tables.c 		if (!t->u.kernel.target->target) {
t                 337 net/ipv6/netfilter/ip6_tables.c 			v = ((struct xt_standard_target *)t)->verdict;
t                 364 net/ipv6/netfilter/ip6_tables.c 		acpar.target   = t->u.kernel.target;
t                 365 net/ipv6/netfilter/ip6_tables.c 		acpar.targinfo = t->data;
t                 367 net/ipv6/netfilter/ip6_tables.c 		verdict = t->u.kernel.target->target(skb, &acpar);
t                 405 net/ipv6/netfilter/ip6_tables.c 			const struct xt_standard_target *t
t                 416 net/ipv6/netfilter/ip6_tables.c 			     (strcmp(t->target.u.user.name,
t                 418 net/ipv6/netfilter/ip6_tables.c 			     t->verdict < 0) || visited) {
t                 444 net/ipv6/netfilter/ip6_tables.c 				int newpos = t->verdict;
t                 446 net/ipv6/netfilter/ip6_tables.c 				if (strcmp(t->target.u.user.name,
t                 518 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t = ip6t_get_target(e);
t                 523 net/ipv6/netfilter/ip6_tables.c 		.target    = t->u.kernel.target,
t                 524 net/ipv6/netfilter/ip6_tables.c 		.targinfo  = t->data,
t                 529 net/ipv6/netfilter/ip6_tables.c 	return xt_check_target(&par, t->u.target_size - sizeof(*t),
t                 539 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t;
t                 563 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target(e);
t                 564 net/ipv6/netfilter/ip6_tables.c 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t                 565 net/ipv6/netfilter/ip6_tables.c 					t->u.user.revision);
t                 570 net/ipv6/netfilter/ip6_tables.c 	t->u.kernel.target = target;
t                 577 net/ipv6/netfilter/ip6_tables.c 	module_put(t->u.kernel.target->me);
t                 592 net/ipv6/netfilter/ip6_tables.c 	const struct xt_entry_target *t;
t                 597 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target_c(e);
t                 598 net/ipv6/netfilter/ip6_tables.c 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
t                 600 net/ipv6/netfilter/ip6_tables.c 	verdict = ((struct xt_standard_target *)t)->verdict;
t                 657 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t;
t                 663 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target(e);
t                 666 net/ipv6/netfilter/ip6_tables.c 	par.target   = t->u.kernel.target;
t                 667 net/ipv6/netfilter/ip6_tables.c 	par.targinfo = t->data;
t                 757 net/ipv6/netfilter/ip6_tables.c get_counters(const struct xt_table_info *t,
t                 768 net/ipv6/netfilter/ip6_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
t                 787 net/ipv6/netfilter/ip6_tables.c static void get_old_counters(const struct xt_table_info *t,
t                 795 net/ipv6/netfilter/ip6_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
t                 849 net/ipv6/netfilter/ip6_tables.c 		const struct xt_entry_target *t;
t                 875 net/ipv6/netfilter/ip6_tables.c 		t = ip6t_get_target_c(e);
t                 876 net/ipv6/netfilter/ip6_tables.c 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
t                 911 net/ipv6/netfilter/ip6_tables.c 	const struct xt_entry_target *t;
t                 919 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target_c(e);
t                 920 net/ipv6/netfilter/ip6_tables.c 	off += xt_compat_target_offset(t->u.kernel.target);
t                 967 net/ipv6/netfilter/ip6_tables.c 	struct xt_table *t;
t                 981 net/ipv6/netfilter/ip6_tables.c 	t = xt_request_find_table_lock(net, AF_INET6, name);
t                 982 net/ipv6/netfilter/ip6_tables.c 	if (!IS_ERR(t)) {
t                 984 net/ipv6/netfilter/ip6_tables.c 		const struct xt_table_info *private = t->private;
t                 995 net/ipv6/netfilter/ip6_tables.c 		info.valid_hooks = t->valid_hooks;
t                1009 net/ipv6/netfilter/ip6_tables.c 		xt_table_unlock(t);
t                1010 net/ipv6/netfilter/ip6_tables.c 		module_put(t->me);
t                1012 net/ipv6/netfilter/ip6_tables.c 		ret = PTR_ERR(t);
t                1026 net/ipv6/netfilter/ip6_tables.c 	struct xt_table *t;
t                1037 net/ipv6/netfilter/ip6_tables.c 	t = xt_find_table_lock(net, AF_INET6, get.name);
t                1038 net/ipv6/netfilter/ip6_tables.c 	if (!IS_ERR(t)) {
t                1039 net/ipv6/netfilter/ip6_tables.c 		struct xt_table_info *private = t->private;
t                1042 net/ipv6/netfilter/ip6_tables.c 						   t, uptr->entrytable);
t                1046 net/ipv6/netfilter/ip6_tables.c 		module_put(t->me);
t                1047 net/ipv6/netfilter/ip6_tables.c 		xt_table_unlock(t);
t                1049 net/ipv6/netfilter/ip6_tables.c 		ret = PTR_ERR(t);
t                1060 net/ipv6/netfilter/ip6_tables.c 	struct xt_table *t;
t                1072 net/ipv6/netfilter/ip6_tables.c 	t = xt_request_find_table_lock(net, AF_INET6, name);
t                1073 net/ipv6/netfilter/ip6_tables.c 	if (IS_ERR(t)) {
t                1074 net/ipv6/netfilter/ip6_tables.c 		ret = PTR_ERR(t);
t                1079 net/ipv6/netfilter/ip6_tables.c 	if (valid_hooks != t->valid_hooks) {
t                1084 net/ipv6/netfilter/ip6_tables.c 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
t                1091 net/ipv6/netfilter/ip6_tables.c 		module_put(t->me);
t                1094 net/ipv6/netfilter/ip6_tables.c 		module_put(t->me);
t                1096 net/ipv6/netfilter/ip6_tables.c 	xt_table_unlock(t);
t                1114 net/ipv6/netfilter/ip6_tables.c 	module_put(t->me);
t                1115 net/ipv6/netfilter/ip6_tables.c 	xt_table_unlock(t);
t                1178 net/ipv6/netfilter/ip6_tables.c 	struct xt_table *t;
t                1187 net/ipv6/netfilter/ip6_tables.c 	t = xt_find_table_lock(net, AF_INET6, tmp.name);
t                1188 net/ipv6/netfilter/ip6_tables.c 	if (IS_ERR(t)) {
t                1189 net/ipv6/netfilter/ip6_tables.c 		ret = PTR_ERR(t);
t                1194 net/ipv6/netfilter/ip6_tables.c 	private = t->private;
t                1212 net/ipv6/netfilter/ip6_tables.c 	xt_table_unlock(t);
t                1213 net/ipv6/netfilter/ip6_tables.c 	module_put(t->me);
t                1238 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t;
t                1261 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target(e);
t                1262 net/ipv6/netfilter/ip6_tables.c 	ret = xt_compat_target_to_user(t, dstptr, size);
t                1291 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t;
t                1297 net/ipv6/netfilter/ip6_tables.c 	t = compat_ip6t_get_target(e);
t                1298 net/ipv6/netfilter/ip6_tables.c 	module_put(t->u.kernel.target->me);
t                1309 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t;
t                1342 net/ipv6/netfilter/ip6_tables.c 	t = compat_ip6t_get_target(e);
t                1343 net/ipv6/netfilter/ip6_tables.c 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t                1344 net/ipv6/netfilter/ip6_tables.c 					t->u.user.revision);
t                1349 net/ipv6/netfilter/ip6_tables.c 	t->u.kernel.target = target;
t                1360 net/ipv6/netfilter/ip6_tables.c 	module_put(t->u.kernel.target->me);
t                1375 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t;
t                1393 net/ipv6/netfilter/ip6_tables.c 	t = compat_ip6t_get_target(e);
t                1394 net/ipv6/netfilter/ip6_tables.c 	xt_compat_target_from_user(t, dstptr, size);
t                1612 net/ipv6/netfilter/ip6_tables.c 	struct xt_table *t;
t                1626 net/ipv6/netfilter/ip6_tables.c 	t = xt_find_table_lock(net, AF_INET6, get.name);
t                1627 net/ipv6/netfilter/ip6_tables.c 	if (!IS_ERR(t)) {
t                1628 net/ipv6/netfilter/ip6_tables.c 		const struct xt_table_info *private = t->private;
t                1633 net/ipv6/netfilter/ip6_tables.c 							  t, uptr->entrytable);
t                1638 net/ipv6/netfilter/ip6_tables.c 		module_put(t->me);
t                1639 net/ipv6/netfilter/ip6_tables.c 		xt_table_unlock(t);
t                1641 net/ipv6/netfilter/ip6_tables.c 		ret = PTR_ERR(t);
t                 138 net/ipv6/netfilter/nf_conntrack_reasm.c static void nf_ct_frag6_expire(struct timer_list *t)
t                 140 net/ipv6/netfilter/nf_conntrack_reasm.c 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
t                  71 net/ipv6/reassembly.c static void ip6_frag_expire(struct timer_list *t)
t                  73 net/ipv6/reassembly.c 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
t                  96 net/ipv6/sit.c 	struct ip_tunnel *t;
t                 100 net/ipv6/sit.c 	for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
t                 101 net/ipv6/sit.c 		if (local == t->parms.iph.saddr &&
t                 102 net/ipv6/sit.c 		    remote == t->parms.iph.daddr &&
t                 103 net/ipv6/sit.c 		    (!dev || !t->parms.link || ifindex == t->parms.link ||
t                 104 net/ipv6/sit.c 		     sifindex == t->parms.link) &&
t                 105 net/ipv6/sit.c 		    (t->dev->flags & IFF_UP))
t                 106 net/ipv6/sit.c 			return t;
t                 108 net/ipv6/sit.c 	for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
t                 109 net/ipv6/sit.c 		if (remote == t->parms.iph.daddr &&
t                 110 net/ipv6/sit.c 		    (!dev || !t->parms.link || ifindex == t->parms.link ||
t                 111 net/ipv6/sit.c 		     sifindex == t->parms.link) &&
t                 112 net/ipv6/sit.c 		    (t->dev->flags & IFF_UP))
t                 113 net/ipv6/sit.c 			return t;
t                 115 net/ipv6/sit.c 	for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
t                 116 net/ipv6/sit.c 		if (local == t->parms.iph.saddr &&
t                 117 net/ipv6/sit.c 		    (!dev || !t->parms.link || ifindex == t->parms.link ||
t                 118 net/ipv6/sit.c 		     sifindex == t->parms.link) &&
t                 119 net/ipv6/sit.c 		    (t->dev->flags & IFF_UP))
t                 120 net/ipv6/sit.c 			return t;
t                 122 net/ipv6/sit.c 	t = rcu_dereference(sitn->tunnels_wc[0]);
t                 123 net/ipv6/sit.c 	if (t && (t->dev->flags & IFF_UP))
t                 124 net/ipv6/sit.c 		return t;
t                 148 net/ipv6/sit.c 		struct ip_tunnel *t)
t                 150 net/ipv6/sit.c 	return __ipip6_bucket(sitn, &t->parms);
t                 153 net/ipv6/sit.c static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
t                 158 net/ipv6/sit.c 	for (tp = ipip6_bucket(sitn, t);
t                 161 net/ipv6/sit.c 		if (t == iter) {
t                 162 net/ipv6/sit.c 			rcu_assign_pointer(*tp, t->next);
t                 168 net/ipv6/sit.c static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
t                 170 net/ipv6/sit.c 	struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
t                 172 net/ipv6/sit.c 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
t                 173 net/ipv6/sit.c 	rcu_assign_pointer(*tp, t);
t                 179 net/ipv6/sit.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 182 net/ipv6/sit.c 		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
t                 183 net/ipv6/sit.c 		t->ip6rd.relay_prefix = 0;
t                 184 net/ipv6/sit.c 		t->ip6rd.prefixlen = 16;
t                 185 net/ipv6/sit.c 		t->ip6rd.relay_prefixlen = 0;
t                 188 net/ipv6/sit.c 		memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
t                 195 net/ipv6/sit.c 	struct ip_tunnel *t = netdev_priv(dev);
t                 200 net/ipv6/sit.c 	memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
t                 201 net/ipv6/sit.c 	memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
t                 203 net/ipv6/sit.c 	if ((__force u16)t->parms.i_flags & SIT_ISATAP)
t                 216 net/ipv6/sit.c 	ipip6_tunnel_link(sitn, t);
t                 228 net/ipv6/sit.c 	struct ip_tunnel *t, *nt;
t                 235 net/ipv6/sit.c 	    (t = rtnl_dereference(*tp)) != NULL;
t                 236 net/ipv6/sit.c 	     tp = &t->next) {
t                 237 net/ipv6/sit.c 		if (local == t->parms.iph.saddr &&
t                 238 net/ipv6/sit.c 		    remote == t->parms.iph.daddr &&
t                 239 net/ipv6/sit.c 		    parms->link == t->parms.link) {
t                 243 net/ipv6/sit.c 				return t;
t                 256 net/ipv6/sit.c 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
t                 283 net/ipv6/sit.c __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
t                 287 net/ipv6/sit.c 	for_each_prl_rcu(t->prl)
t                 294 net/ipv6/sit.c static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
t                 317 net/ipv6/sit.c 	ca = t->prl_count < cmax ? t->prl_count : cmax;
t                 333 net/ipv6/sit.c 	for_each_prl_rcu(t->prl) {
t                 358 net/ipv6/sit.c ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
t                 368 net/ipv6/sit.c 	for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
t                 390 net/ipv6/sit.c 	p->next = t->prl;
t                 393 net/ipv6/sit.c 	t->prl_count++;
t                 394 net/ipv6/sit.c 	rcu_assign_pointer(t->prl, p);
t                 412 net/ipv6/sit.c ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
t                 421 net/ipv6/sit.c 		for (p = &t->prl;
t                 427 net/ipv6/sit.c 				t->prl_count--;
t                 433 net/ipv6/sit.c 		x = rtnl_dereference(t->prl);
t                 435 net/ipv6/sit.c 			t->prl_count = 0;
t                 437 net/ipv6/sit.c 			t->prl = NULL;
t                 445 net/ipv6/sit.c isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
t                 451 net/ipv6/sit.c 	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
t                 462 net/ipv6/sit.c 		    ipv6_chk_prefix(addr6, t->dev))
t                 492 net/ipv6/sit.c 	struct ip_tunnel *t;
t                 526 net/ipv6/sit.c 	t = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
t                 528 net/ipv6/sit.c 	if (!t)
t                 533 net/ipv6/sit.c 				 t->parms.link, iph->protocol);
t                 538 net/ipv6/sit.c 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link,
t                 549 net/ipv6/sit.c 	if (t->parms.iph.daddr == 0)
t                 552 net/ipv6/sit.c 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
t                 555 net/ipv6/sit.c 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t                 556 net/ipv6/sit.c 		t->err_count++;
t                 558 net/ipv6/sit.c 		t->err_count = 1;
t                 559 net/ipv6/sit.c 	t->err_time = jiffies;
t                1098 net/ipv6/sit.c static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
t                1101 net/ipv6/sit.c 	struct net *net = t->net;
t                1104 net/ipv6/sit.c 	ipip6_tunnel_unlink(sitn, t);
t                1106 net/ipv6/sit.c 	t->parms.iph.saddr = p->iph.saddr;
t                1107 net/ipv6/sit.c 	t->parms.iph.daddr = p->iph.daddr;
t                1108 net/ipv6/sit.c 	memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
t                1109 net/ipv6/sit.c 	memcpy(t->dev->broadcast, &p->iph.daddr, 4);
t                1110 net/ipv6/sit.c 	ipip6_tunnel_link(sitn, t);
t                1111 net/ipv6/sit.c 	t->parms.iph.ttl = p->iph.ttl;
t                1112 net/ipv6/sit.c 	t->parms.iph.tos = p->iph.tos;
t                1113 net/ipv6/sit.c 	t->parms.iph.frag_off = p->iph.frag_off;
t                1114 net/ipv6/sit.c 	if (t->parms.link != p->link || t->fwmark != fwmark) {
t                1115 net/ipv6/sit.c 		t->parms.link = p->link;
t                1116 net/ipv6/sit.c 		t->fwmark = fwmark;
t                1117 net/ipv6/sit.c 		ipip6_tunnel_bind_dev(t->dev);
t                1119 net/ipv6/sit.c 	dst_cache_reset(&t->dst_cache);
t                1120 net/ipv6/sit.c 	netdev_state_change(t->dev);
t                1124 net/ipv6/sit.c static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
t                1146 net/ipv6/sit.c 	t->ip6rd.prefix = prefix;
t                1147 net/ipv6/sit.c 	t->ip6rd.relay_prefix = relay_prefix;
t                1148 net/ipv6/sit.c 	t->ip6rd.prefixlen = ip6rd->prefixlen;
t                1149 net/ipv6/sit.c 	t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
t                1150 net/ipv6/sit.c 	dst_cache_reset(&t->dst_cache);
t                1151 net/ipv6/sit.c 	netdev_state_change(t->dev);
t                1172 net/ipv6/sit.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1173 net/ipv6/sit.c 	struct net *net = t->net;
t                1189 net/ipv6/sit.c 			t = ipip6_tunnel_locate(net, &p, 0);
t                1190 net/ipv6/sit.c 			if (!t)
t                1191 net/ipv6/sit.c 				t = netdev_priv(dev);
t                1196 net/ipv6/sit.c 			memcpy(&p, &t->parms, sizeof(p));
t                1202 net/ipv6/sit.c 			ip6rd.prefix = t->ip6rd.prefix;
t                1203 net/ipv6/sit.c 			ip6rd.relay_prefix = t->ip6rd.relay_prefix;
t                1204 net/ipv6/sit.c 			ip6rd.prefixlen = t->ip6rd.prefixlen;
t                1205 net/ipv6/sit.c 			ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
t                1233 net/ipv6/sit.c 		t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
t                1236 net/ipv6/sit.c 			if (t) {
t                1237 net/ipv6/sit.c 				if (t->dev != dev) {
t                1247 net/ipv6/sit.c 				t = netdev_priv(dev);
t                1250 net/ipv6/sit.c 			ipip6_tunnel_update(t, &p, t->fwmark);
t                1253 net/ipv6/sit.c 		if (t) {
t                1255 net/ipv6/sit.c 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
t                1271 net/ipv6/sit.c 			t = ipip6_tunnel_locate(net, &p, 0);
t                1272 net/ipv6/sit.c 			if (!t)
t                1275 net/ipv6/sit.c 			if (t == netdev_priv(sitn->fb_tunnel_dev))
t                1277 net/ipv6/sit.c 			dev = t->dev;
t                1287 net/ipv6/sit.c 		err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
t                1305 net/ipv6/sit.c 			err = ipip6_tunnel_del_prl(t, &prl);
t                1309 net/ipv6/sit.c 			err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
t                1312 net/ipv6/sit.c 		dst_cache_reset(&t->dst_cache);
t                1330 net/ipv6/sit.c 			err = ipip6_tunnel_update_6rd(t, &ip6rd);
t                1613 net/ipv6/sit.c 	struct ip_tunnel *t = netdev_priv(dev);
t                1616 net/ipv6/sit.c 	struct net *net = t->net;
t                1621 net/ipv6/sit.c 	__u32 fwmark = t->fwmark;
t                1628 net/ipv6/sit.c 		err = ip_tunnel_encap_setup(t, &ipencap);
t                1639 net/ipv6/sit.c 	t = ipip6_tunnel_locate(net, &p, 0);
t                1641 net/ipv6/sit.c 	if (t) {
t                1642 net/ipv6/sit.c 		if (t->dev != dev)
t                1645 net/ipv6/sit.c 		t = netdev_priv(dev);
t                1647 net/ipv6/sit.c 	ipip6_tunnel_update(t, &p, fwmark);
t                1651 net/ipv6/sit.c 		return ipip6_tunnel_update_6rd(t, &ip6rd);
t                1824 net/ipv6/sit.c 			struct ip_tunnel *t;
t                1826 net/ipv6/sit.c 			t = rtnl_dereference(sitn->tunnels[prio][h]);
t                1827 net/ipv6/sit.c 			while (t) {
t                1831 net/ipv6/sit.c 				if (!net_eq(dev_net(t->dev), net))
t                1832 net/ipv6/sit.c 					unregister_netdevice_queue(t->dev,
t                1834 net/ipv6/sit.c 				t = rtnl_dereference(t->next);
t                1843 net/ipv6/sit.c 	struct ip_tunnel *t;
t                1875 net/ipv6/sit.c 	t = netdev_priv(sitn->fb_tunnel_dev);
t                1877 net/ipv6/sit.c 	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
t                  29 net/ipv6/tunnel6.c 	struct xfrm6_tunnel *t;
t                  36 net/ipv6/tunnel6.c 	     (t = rcu_dereference_protected(*pprev,
t                  38 net/ipv6/tunnel6.c 	     pprev = &t->next) {
t                  39 net/ipv6/tunnel6.c 		if (t->priority > priority)
t                  41 net/ipv6/tunnel6.c 		if (t->priority == priority)
t                  60 net/ipv6/tunnel6.c 	struct xfrm6_tunnel *t;
t                  66 net/ipv6/tunnel6.c 	     (t = rcu_dereference_protected(*pprev,
t                  68 net/ipv6/tunnel6.c 	     pprev = &t->next) {
t                  69 net/ipv6/tunnel6.c 		if (t == handler) {
t                  26 net/ipv6/xfrm6_input.c 		  struct ip6_tnl *t)
t                  28 net/ipv6/xfrm6_input.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
t                  72 net/ipv6/xfrm6_input.c int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
t                  75 net/ipv6/xfrm6_input.c 			     0, t);
t                 189 net/ipv6/xfrm6_protocol.c 	struct xfrm6_protocol *t;
t                 204 net/ipv6/xfrm6_protocol.c 	     (t = rcu_dereference_protected(*pprev,
t                 206 net/ipv6/xfrm6_protocol.c 	     pprev = &t->next) {
t                 207 net/ipv6/xfrm6_protocol.c 		if (t->priority < priority)
t                 209 net/ipv6/xfrm6_protocol.c 		if (t->priority == priority)
t                 236 net/ipv6/xfrm6_protocol.c 	struct xfrm6_protocol *t;
t                 245 net/ipv6/xfrm6_protocol.c 	     (t = rcu_dereference_protected(*pprev,
t                 247 net/ipv6/xfrm6_protocol.c 	     pprev = &t->next) {
t                 248 net/ipv6/xfrm6_protocol.c 		if (t == handler) {
t                1940 net/key/af_key.c 	struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
t                1951 net/key/af_key.c 	t->id.proto = rq->sadb_x_ipsecrequest_proto;
t                1954 net/key/af_key.c 	t->mode = mode;
t                1956 net/key/af_key.c 		t->optional = 1;
t                1958 net/key/af_key.c 		t->reqid = rq->sadb_x_ipsecrequest_reqid;
t                1959 net/key/af_key.c 		if (t->reqid > IPSEC_MANUAL_REQID_MAX)
t                1960 net/key/af_key.c 			t->reqid = 0;
t                1961 net/key/af_key.c 		if (!t->reqid && !(t->reqid = gen_reqid(net)))
t                1966 net/key/af_key.c 	if (t->mode == XFRM_MODE_TUNNEL) {
t                1972 net/key/af_key.c 			&t->saddr, &t->id.daddr, &t->encap_family);
t                1976 net/key/af_key.c 		t->encap_family = xp->family;
t                1979 net/key/af_key.c 	t->allalgs = 1;
t                2021 net/key/af_key.c 	const struct xfrm_tmpl *t;
t                2027 net/key/af_key.c 		t = xp->xfrm_vec + i;
t                2028 net/key/af_key.c 		socklen += pfkey_sockaddr_len(t->encap_family);
t                2147 net/key/af_key.c 		const struct xfrm_tmpl *t = xp->xfrm_vec + i;
t                2153 net/key/af_key.c 		if (t->mode == XFRM_MODE_TUNNEL) {
t                2154 net/key/af_key.c 			socklen = pfkey_sockaddr_len(t->encap_family);
t                2163 net/key/af_key.c 		rq->sadb_x_ipsecrequest_proto = t->id.proto;
t                2164 net/key/af_key.c 		if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0)
t                2168 net/key/af_key.c 		if (t->reqid)
t                2170 net/key/af_key.c 		if (t->optional)
t                2172 net/key/af_key.c 		rq->sadb_x_ipsecrequest_reqid = t->reqid;
t                2174 net/key/af_key.c 		if (t->mode == XFRM_MODE_TUNNEL) {
t                2176 net/key/af_key.c 			pfkey_sockaddr_fill(&t->saddr, 0,
t                2178 net/key/af_key.c 					    t->encap_family);
t                2179 net/key/af_key.c 			pfkey_sockaddr_fill(&t->id.daddr, 0,
t                2181 net/key/af_key.c 					    t->encap_family);
t                2866 net/key/af_key.c static inline int aalg_tmpl_set(const struct xfrm_tmpl *t,
t                2871 net/key/af_key.c 	if (id >= sizeof(t->aalgos) * 8)
t                2874 net/key/af_key.c 	return (t->aalgos >> id) & 1;
t                2877 net/key/af_key.c static inline int ealg_tmpl_set(const struct xfrm_tmpl *t,
t                2882 net/key/af_key.c 	if (id >= sizeof(t->ealgos) * 8)
t                2885 net/key/af_key.c 	return (t->ealgos >> id) & 1;
t                2888 net/key/af_key.c static int count_ah_combs(const struct xfrm_tmpl *t)
t                2898 net/key/af_key.c 		if (aalg_tmpl_set(t, aalg) && aalg->available)
t                2904 net/key/af_key.c static int count_esp_combs(const struct xfrm_tmpl *t)
t                2916 net/key/af_key.c 		if (!(ealg_tmpl_set(t, ealg) && ealg->available))
t                2927 net/key/af_key.c 			if (aalg_tmpl_set(t, aalg) && aalg->available)
t                2934 net/key/af_key.c static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
t                2953 net/key/af_key.c 		if (aalg_tmpl_set(t, aalg) && aalg->available) {
t                2968 net/key/af_key.c static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
t                2987 net/key/af_key.c 		if (!(ealg_tmpl_set(t, ealg) && ealg->available))
t                2997 net/key/af_key.c 			if (!(aalg_tmpl_set(t, aalg) && aalg->available))
t                3132 net/key/af_key.c static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
t                3154 net/key/af_key.c 		size += count_ah_combs(t);
t                3156 net/key/af_key.c 		size += count_esp_combs(t);
t                3218 net/key/af_key.c 		dump_ah_combs(skb, t);
t                3220 net/key/af_key.c 		dump_esp_combs(skb, t);
t                  71 net/lapb/lapb_timer.c static void lapb_t2timer_expiry(struct timer_list *t)
t                  73 net/lapb/lapb_timer.c 	struct lapb_cb *lapb = from_timer(lapb, t, t2timer);
t                  81 net/lapb/lapb_timer.c static void lapb_t1timer_expiry(struct timer_list *t)
t                  83 net/lapb/lapb_timer.c 	struct lapb_cb *lapb = from_timer(lapb, t, t1timer);
t                1336 net/llc/llc_c_ac.c void llc_conn_pf_cycle_tmr_cb(struct timer_list *t)
t                1338 net/llc/llc_c_ac.c 	struct llc_sock *llc = from_timer(llc, t, pf_cycle_timer.timer);
t                1343 net/llc/llc_c_ac.c void llc_conn_busy_tmr_cb(struct timer_list *t)
t                1345 net/llc/llc_c_ac.c 	struct llc_sock *llc = from_timer(llc, t, busy_state_timer.timer);
t                1350 net/llc/llc_c_ac.c void llc_conn_ack_tmr_cb(struct timer_list *t)
t                1352 net/llc/llc_c_ac.c 	struct llc_sock *llc = from_timer(llc, t, ack_timer.timer);
t                1357 net/llc/llc_c_ac.c void llc_conn_rej_tmr_cb(struct timer_list *t)
t                1359 net/llc/llc_c_ac.c 	struct llc_sock *llc = from_timer(llc, t, rej_sent_timer.timer);
t                 152 net/mac80211/agg-rx.c static void sta_rx_agg_session_timer_expired(struct timer_list *t)
t                 154 net/mac80211/agg-rx.c 	struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer);
t                 172 net/mac80211/agg-rx.c static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
t                 174 net/mac80211/agg-rx.c 	struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer);
t                 431 net/mac80211/agg-tx.c static void sta_addba_resp_timer_expired(struct timer_list *t)
t                 433 net/mac80211/agg-tx.c 	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
t                 536 net/mac80211/agg-tx.c static void sta_tx_agg_session_timer_expired(struct timer_list *t)
t                 538 net/mac80211/agg-tx.c 	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
t                 588 net/mac80211/debugfs_sta.c #define PFLAG(t, n, a, b)						\
t                 590 net/mac80211/debugfs_sta.c 		if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a)		\
t                 594 net/mac80211/debugfs_sta.c #define PFLAG_RANGE(t, i, n, s, m, off, fmt)				\
t                 596 net/mac80211/debugfs_sta.c 		u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK;	\
t                 601 net/mac80211/debugfs_sta.c #define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b)		\
t                 603 net/mac80211/debugfs_sta.c 		if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) {	\
t                 607 net/mac80211/debugfs_sta.c 		PFLAG_RANGE(t, i, n, s, m, off, fmt);			\
t                1721 net/mac80211/ibss.c static void ieee80211_ibss_timer(struct timer_list *t)
t                1724 net/mac80211/ibss.c 		from_timer(sdata, t, u.ibss.timer);
t                2006 net/mac80211/ieee80211_i.h void ieee80211_dynamic_ps_timer(struct timer_list *t);
t                 258 net/mac80211/led.c static void tpt_trig_timer(struct timer_list *t)
t                 260 net/mac80211/led.c 	struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer);
t                  38 net/mac80211/mesh.c static void ieee80211_mesh_housekeeping_timer(struct timer_list *t)
t                  41 net/mac80211/mesh.c 		from_timer(sdata, t, u.mesh.housekeeping_timer);
t                 590 net/mac80211/mesh.c static void ieee80211_mesh_path_timer(struct timer_list *t)
t                 593 net/mac80211/mesh.c 		from_timer(sdata, t, u.mesh.mesh_path_timer);
t                 598 net/mac80211/mesh.c static void ieee80211_mesh_path_root_timer(struct timer_list *t)
t                 601 net/mac80211/mesh.c 		from_timer(sdata, t, u.mesh.mesh_path_root_timer);
t                 291 net/mac80211/mesh.h void mesh_plink_timer(struct timer_list *t);
t                 312 net/mac80211/mesh.h void mesh_path_timer(struct timer_list *t);
t                1219 net/mac80211/mesh_hwmp.c void mesh_path_timer(struct timer_list *t)
t                1221 net/mac80211/mesh_hwmp.c 	struct mesh_path *mpath = from_timer(mpath, t, timer);
t                  20 net/mac80211/mesh_plink.c #define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
t                  21 net/mac80211/mesh_plink.c 				jiffies + msecs_to_jiffies(t)))
t                 628 net/mac80211/mesh_plink.c void mesh_plink_timer(struct timer_list *t)
t                 630 net/mac80211/mesh_plink.c 	struct mesh_sta *mesh = from_timer(mesh, t, plink_timer);
t                1248 net/mac80211/mlme.c static void ieee80211_chswitch_timer(struct timer_list *t)
t                1251 net/mac80211/mlme.c 		from_timer(sdata, t, u.mgd.chswitch_timer);
t                1807 net/mac80211/mlme.c void ieee80211_dynamic_ps_timer(struct timer_list *t)
t                1809 net/mac80211/mlme.c 	struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer);
t                4149 net/mac80211/mlme.c static void ieee80211_sta_timer(struct timer_list *t)
t                4152 net/mac80211/mlme.c 		from_timer(sdata, t, u.mgd.timer);
t                4444 net/mac80211/mlme.c static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
t                4447 net/mac80211/mlme.c 		from_timer(sdata, t, u.mgd.bcn_mon_timer);
t                4458 net/mac80211/mlme.c static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
t                4461 net/mac80211/mlme.c 		from_timer(sdata, t, u.mgd.conn_mon_timer);
t                 150 net/mac80211/ocb.c static void ieee80211_ocb_housekeeping_timer(struct timer_list *t)
t                 153 net/mac80211/ocb.c 		from_timer(sdata, t, u.ocb.housekeeping_timer);
t                1120 net/mac80211/sta_info.c static void sta_info_cleanup(struct timer_list *t)
t                1122 net/mac80211/sta_info.c 	struct ieee80211_local *local = from_timer(local, t, sta_cleanup);
t                 164 net/mac802154/ieee802154_i.h 			 struct ieee802154_llsec_table **t);
t                 203 net/mac802154/mib.c 			 struct ieee802154_llsec_table **t)
t                 209 net/mac802154/mib.c 	*t = &sdata->sec.table;
t                  87 net/ncsi/ncsi-manage.c static void ncsi_channel_monitor(struct timer_list *t)
t                  89 net/ncsi/ncsi-manage.c 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
t                 423 net/ncsi/ncsi-manage.c static void ncsi_request_timeout(struct timer_list *t)
t                 425 net/ncsi/ncsi-manage.c 	struct ncsi_request *nr = from_timer(nr, t, timer);
t                  36 net/netfilter/ipset/ip_set_bitmap_gen.h mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
t                 265 net/netfilter/ipset/ip_set_bitmap_gen.h mtype_gc(struct timer_list *t)
t                 267 net/netfilter/ipset/ip_set_bitmap_gen.h 	struct mtype *map = from_timer(map, t, gc);
t                 121 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	u32 t = ext->timeout;
t                 124 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		if (t == set->timeout)
t                 126 net/netfilter/ipset/ip_set_bitmap_ipmac.c 			t = *timeout;
t                 127 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ip_set_timeout_set(timeout, t);
t                 135 net/netfilter/ipset/ip_set_bitmap_ipmac.c 			ip_set_timeout_set(timeout, t);
t                 137 net/netfilter/ipset/ip_set_bitmap_ipmac.c 			*timeout = t;
t                 402 net/netfilter/ipset/ip_set_hash_gen.h mtype_ahash_memsize(const struct htype *h, const struct htable *t)
t                 404 net/netfilter/ipset/ip_set_hash_gen.h 	return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
t                 426 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                 430 net/netfilter/ipset/ip_set_hash_gen.h 	t = ipset_dereference_nfnl(h->table);
t                 431 net/netfilter/ipset/ip_set_hash_gen.h 	for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
t                 432 net/netfilter/ipset/ip_set_hash_gen.h 		spin_lock_bh(&t->hregion[r].lock);
t                 433 net/netfilter/ipset/ip_set_hash_gen.h 		for (i = ahash_bucket_start(r, t->htable_bits);
t                 434 net/netfilter/ipset/ip_set_hash_gen.h 		     i < ahash_bucket_end(r, t->htable_bits); i++) {
t                 435 net/netfilter/ipset/ip_set_hash_gen.h 			n = __ipset_dereference(hbucket(t, i));
t                 441 net/netfilter/ipset/ip_set_hash_gen.h 			rcu_assign_pointer(hbucket(t, i), NULL);
t                 444 net/netfilter/ipset/ip_set_hash_gen.h 		t->hregion[r].ext_size = 0;
t                 445 net/netfilter/ipset/ip_set_hash_gen.h 		t->hregion[r].elements = 0;
t                 446 net/netfilter/ipset/ip_set_hash_gen.h 		spin_unlock_bh(&t->hregion[r].lock);
t                 455 net/netfilter/ipset/ip_set_hash_gen.h mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
t                 460 net/netfilter/ipset/ip_set_hash_gen.h 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
t                 461 net/netfilter/ipset/ip_set_hash_gen.h 		n = __ipset_dereference(hbucket(t, i));
t                 470 net/netfilter/ipset/ip_set_hash_gen.h 	ip_set_free(t->hregion);
t                 471 net/netfilter/ipset/ip_set_hash_gen.h 	ip_set_free(t);
t                 513 net/netfilter/ipset/ip_set_hash_gen.h mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
t                 522 net/netfilter/ipset/ip_set_hash_gen.h 	u8 htable_bits = t->htable_bits;
t                 524 net/netfilter/ipset/ip_set_hash_gen.h 	spin_lock_bh(&t->hregion[r].lock);
t                 527 net/netfilter/ipset/ip_set_hash_gen.h 		n = __ipset_dereference(hbucket(t, i));
t                 547 net/netfilter/ipset/ip_set_hash_gen.h 			t->hregion[r].elements--;
t                 553 net/netfilter/ipset/ip_set_hash_gen.h 				t->hregion[r].ext_size -=
t                 555 net/netfilter/ipset/ip_set_hash_gen.h 				rcu_assign_pointer(hbucket(t, i), NULL);
t                 576 net/netfilter/ipset/ip_set_hash_gen.h 			t->hregion[r].ext_size -=
t                 578 net/netfilter/ipset/ip_set_hash_gen.h 			rcu_assign_pointer(hbucket(t, i), tmp);
t                 582 net/netfilter/ipset/ip_set_hash_gen.h 	spin_unlock_bh(&t->hregion[r].lock);
t                 591 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                 600 net/netfilter/ipset/ip_set_hash_gen.h 	t = ipset_dereference_set(h->table, set);
t                 601 net/netfilter/ipset/ip_set_hash_gen.h 	atomic_inc(&t->uref);
t                 602 net/netfilter/ipset/ip_set_hash_gen.h 	numof_locks = ahash_numof_locks(t->htable_bits);
t                 612 net/netfilter/ipset/ip_set_hash_gen.h 	mtype_gc_do(set, h, t, r);
t                 614 net/netfilter/ipset/ip_set_hash_gen.h 	if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
t                 615 net/netfilter/ipset/ip_set_hash_gen.h 		pr_debug("Table destroy after resize by expire: %p\n", t);
t                 616 net/netfilter/ipset/ip_set_hash_gen.h 		mtype_ahash_destroy(set, t, false);
t                 645 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t, *orig;
t                 678 net/netfilter/ipset/ip_set_hash_gen.h 	t = ip_set_alloc(htable_size(htable_bits));
t                 679 net/netfilter/ipset/ip_set_hash_gen.h 	if (!t) {
t                 683 net/netfilter/ipset/ip_set_hash_gen.h 	t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
t                 684 net/netfilter/ipset/ip_set_hash_gen.h 	if (!t->hregion) {
t                 685 net/netfilter/ipset/ip_set_hash_gen.h 		kfree(t);
t                 689 net/netfilter/ipset/ip_set_hash_gen.h 	t->htable_bits = htable_bits;
t                 690 net/netfilter/ipset/ip_set_hash_gen.h 	t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
t                 692 net/netfilter/ipset/ip_set_hash_gen.h 		spin_lock_init(&t->hregion[i].lock);
t                 726 net/netfilter/ipset/ip_set_hash_gen.h 				m = __ipset_dereference(hbucket(t, key));
t                 737 net/netfilter/ipset/ip_set_hash_gen.h 					t->hregion[nr].ext_size +=
t                 740 net/netfilter/ipset/ip_set_hash_gen.h 					RCU_INIT_POINTER(hbucket(t, key), m);
t                 759 net/netfilter/ipset/ip_set_hash_gen.h 					t->hregion[nr].ext_size +=
t                 764 net/netfilter/ipset/ip_set_hash_gen.h 					RCU_INIT_POINTER(hbucket(t, key), ht);
t                 769 net/netfilter/ipset/ip_set_hash_gen.h 				t->hregion[nr].elements++;
t                 779 net/netfilter/ipset/ip_set_hash_gen.h 	rcu_assign_pointer(h->table, t);
t                 785 net/netfilter/ipset/ip_set_hash_gen.h 		 orig->htable_bits, orig, t->htable_bits, t);
t                 816 net/netfilter/ipset/ip_set_hash_gen.h 	mtype_ahash_destroy(set, t, false);
t                 827 net/netfilter/ipset/ip_set_hash_gen.h 	const struct htable *t;
t                 832 net/netfilter/ipset/ip_set_hash_gen.h 	t = rcu_dereference_bh(h->table);
t                 833 net/netfilter/ipset/ip_set_hash_gen.h 	for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
t                 834 net/netfilter/ipset/ip_set_hash_gen.h 		for (i = ahash_bucket_start(r, t->htable_bits);
t                 835 net/netfilter/ipset/ip_set_hash_gen.h 		     i < ahash_bucket_end(r, t->htable_bits); i++) {
t                 836 net/netfilter/ipset/ip_set_hash_gen.h 			n = rcu_dereference_bh(hbucket(t, i));
t                 847 net/netfilter/ipset/ip_set_hash_gen.h 		*ext_size += t->hregion[r].ext_size;
t                 859 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                 869 net/netfilter/ipset/ip_set_hash_gen.h 	t = rcu_dereference_bh(h->table);
t                 870 net/netfilter/ipset/ip_set_hash_gen.h 	key = HKEY(value, h->initval, t->htable_bits);
t                 871 net/netfilter/ipset/ip_set_hash_gen.h 	r = ahash_region(key, t->htable_bits);
t                 872 net/netfilter/ipset/ip_set_hash_gen.h 	atomic_inc(&t->uref);
t                 873 net/netfilter/ipset/ip_set_hash_gen.h 	elements = t->hregion[r].elements;
t                 874 net/netfilter/ipset/ip_set_hash_gen.h 	maxelem = t->maxelem;
t                 879 net/netfilter/ipset/ip_set_hash_gen.h 			mtype_gc_do(set, h, t, r);
t                 884 net/netfilter/ipset/ip_set_hash_gen.h 		for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
t                 885 net/netfilter/ipset/ip_set_hash_gen.h 			elements += t->hregion[e].elements;
t                 891 net/netfilter/ipset/ip_set_hash_gen.h 	spin_lock_bh(&t->hregion[r].lock);
t                 892 net/netfilter/ipset/ip_set_hash_gen.h 	n = rcu_dereference_bh(hbucket(t, key));
t                 904 net/netfilter/ipset/ip_set_hash_gen.h 		t->hregion[r].ext_size +=
t                 945 net/netfilter/ipset/ip_set_hash_gen.h 			t->hregion[r].elements--;
t                 971 net/netfilter/ipset/ip_set_hash_gen.h 		t->hregion[r].ext_size +=
t                 979 net/netfilter/ipset/ip_set_hash_gen.h 	t->hregion[r].elements++;
t                1001 net/netfilter/ipset/ip_set_hash_gen.h 		rcu_assign_pointer(hbucket(t, key), n);
t                1007 net/netfilter/ipset/ip_set_hash_gen.h 	spin_unlock_bh(&t->hregion[r].lock);
t                1008 net/netfilter/ipset/ip_set_hash_gen.h 	if (atomic_read(&t->ref) && ext->target) {
t                1033 net/netfilter/ipset/ip_set_hash_gen.h 	spin_unlock_bh(&t->hregion[r].lock);
t                1035 net/netfilter/ipset/ip_set_hash_gen.h 	if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
t                1036 net/netfilter/ipset/ip_set_hash_gen.h 		pr_debug("Table destroy after resize by add: %p\n", t);
t                1037 net/netfilter/ipset/ip_set_hash_gen.h 		mtype_ahash_destroy(set, t, false);
t                1049 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                1062 net/netfilter/ipset/ip_set_hash_gen.h 	t = rcu_dereference_bh(h->table);
t                1063 net/netfilter/ipset/ip_set_hash_gen.h 	key = HKEY(value, h->initval, t->htable_bits);
t                1064 net/netfilter/ipset/ip_set_hash_gen.h 	r = ahash_region(key, t->htable_bits);
t                1065 net/netfilter/ipset/ip_set_hash_gen.h 	atomic_inc(&t->uref);
t                1068 net/netfilter/ipset/ip_set_hash_gen.h 	spin_lock_bh(&t->hregion[r].lock);
t                1069 net/netfilter/ipset/ip_set_hash_gen.h 	n = rcu_dereference_bh(hbucket(t, key));
t                1088 net/netfilter/ipset/ip_set_hash_gen.h 		t->hregion[r].elements--;
t                1096 net/netfilter/ipset/ip_set_hash_gen.h 		if (atomic_read(&t->ref) && ext->target) {
t                1114 net/netfilter/ipset/ip_set_hash_gen.h 			t->hregion[r].ext_size -= ext_size(n->size, dsize);
t                1115 net/netfilter/ipset/ip_set_hash_gen.h 			rcu_assign_pointer(hbucket(t, key), NULL);
t                1133 net/netfilter/ipset/ip_set_hash_gen.h 			t->hregion[r].ext_size -=
t                1135 net/netfilter/ipset/ip_set_hash_gen.h 			rcu_assign_pointer(hbucket(t, key), tmp);
t                1142 net/netfilter/ipset/ip_set_hash_gen.h 	spin_unlock_bh(&t->hregion[r].lock);
t                1148 net/netfilter/ipset/ip_set_hash_gen.h 	if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
t                1149 net/netfilter/ipset/ip_set_hash_gen.h 		pr_debug("Table destroy after resize by del: %p\n", t);
t                1150 net/netfilter/ipset/ip_set_hash_gen.h 		mtype_ahash_destroy(set, t, false);
t                1175 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t = rcu_dereference_bh(h->table);
t                1198 net/netfilter/ipset/ip_set_hash_gen.h 		key = HKEY(d, h->initval, t->htable_bits);
t                1199 net/netfilter/ipset/ip_set_hash_gen.h 		n = rcu_dereference_bh(hbucket(t, key));
t                1230 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                1238 net/netfilter/ipset/ip_set_hash_gen.h 	t = rcu_dereference_bh(h->table);
t                1252 net/netfilter/ipset/ip_set_hash_gen.h 	key = HKEY(d, h->initval, t->htable_bits);
t                1253 net/netfilter/ipset/ip_set_hash_gen.h 	n = rcu_dereference_bh(hbucket(t, key));
t                1278 net/netfilter/ipset/ip_set_hash_gen.h 	const struct htable *t;
t                1286 net/netfilter/ipset/ip_set_hash_gen.h 	t = rcu_dereference_bh(h->table);
t                1288 net/netfilter/ipset/ip_set_hash_gen.h 	memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
t                1289 net/netfilter/ipset/ip_set_hash_gen.h 	htable_bits = t->htable_bits;
t                1326 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                1330 net/netfilter/ipset/ip_set_hash_gen.h 		t = ipset_dereference_bh_nfnl(h->table);
t                1331 net/netfilter/ipset/ip_set_hash_gen.h 		atomic_inc(&t->uref);
t                1332 net/netfilter/ipset/ip_set_hash_gen.h 		cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
t                1335 net/netfilter/ipset/ip_set_hash_gen.h 		t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
t                1336 net/netfilter/ipset/ip_set_hash_gen.h 		if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
t                1338 net/netfilter/ipset/ip_set_hash_gen.h 				 " by dump: %p\n", t);
t                1339 net/netfilter/ipset/ip_set_hash_gen.h 			mtype_ahash_destroy(set, t, false);
t                1350 net/netfilter/ipset/ip_set_hash_gen.h 	const struct htable *t;
t                1364 net/netfilter/ipset/ip_set_hash_gen.h 	t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
t                1367 net/netfilter/ipset/ip_set_hash_gen.h 	for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
t                1371 net/netfilter/ipset/ip_set_hash_gen.h 		n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
t                1373 net/netfilter/ipset/ip_set_hash_gen.h 			 cb->args[IPSET_CB_ARG0], t, n);
t                1464 net/netfilter/ipset/ip_set_hash_gen.h 	struct htable *t;
t                1529 net/netfilter/ipset/ip_set_hash_gen.h 	t = ip_set_alloc(hsize);
t                1530 net/netfilter/ipset/ip_set_hash_gen.h 	if (!t) {
t                1534 net/netfilter/ipset/ip_set_hash_gen.h 	t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
t                1535 net/netfilter/ipset/ip_set_hash_gen.h 	if (!t->hregion) {
t                1536 net/netfilter/ipset/ip_set_hash_gen.h 		kfree(t);
t                1542 net/netfilter/ipset/ip_set_hash_gen.h 		spin_lock_init(&t->hregion[i].lock);
t                1552 net/netfilter/ipset/ip_set_hash_gen.h 	t->htable_bits = hbits;
t                1553 net/netfilter/ipset/ip_set_hash_gen.h 	t->maxelem = h->maxelem / ahash_numof_locks(hbits);
t                1554 net/netfilter/ipset/ip_set_hash_gen.h 	RCU_INIT_POINTER(h->table, t);
t                1586 net/netfilter/ipset/ip_set_hash_gen.h 		 set->name, jhash_size(t->htable_bits),
t                1587 net/netfilter/ipset/ip_set_hash_gen.h 		 t->htable_bits, h->maxelem, set->data, t);
t                 564 net/netfilter/ipset/ip_set_list_set.c list_set_gc(struct timer_list *t)
t                 566 net/netfilter/ipset/ip_set_list_set.c 	struct list_set *map = from_timer(map, t, gc);
t                 578 net/netfilter/ipset/ip_set_list_set.c list_set_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
t                 102 net/netfilter/ipvs/ip_vs_conn.c static void ip_vs_conn_expire(struct timer_list *t);
t                 458 net/netfilter/ipvs/ip_vs_conn.c 	unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
t                 460 net/netfilter/ipvs/ip_vs_conn.c 	mod_timer(&cp->timer, jiffies+t);
t                 810 net/netfilter/ipvs/ip_vs_conn.c static void ip_vs_conn_expire(struct timer_list *t)
t                 812 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_conn *cp = from_timer(cp, t, timer);
t                1233 net/netfilter/ipvs/ip_vs_ctl.c static void ip_vs_dest_trash_expire(struct timer_list *t)
t                1235 net/netfilter/ipvs/ip_vs_ctl.c 	struct netns_ipvs *ipvs = from_timer(ipvs, t, dest_trash_timer);
t                2864 net/netfilter/ipvs/ip_vs_ctl.c 		struct ip_vs_timeout_user t;
t                2866 net/netfilter/ipvs/ip_vs_ctl.c 		__ip_vs_get_timeouts(ipvs, &t);
t                2867 net/netfilter/ipvs/ip_vs_ctl.c 		if (copy_to_user(user, &t, sizeof(t)) != 0)
t                3582 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_timeout_user t;
t                3584 net/netfilter/ipvs/ip_vs_ctl.c 	__ip_vs_get_timeouts(ipvs, &t);
t                3587 net/netfilter/ipvs/ip_vs_ctl.c 		t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
t                3590 net/netfilter/ipvs/ip_vs_ctl.c 		t.tcp_fin_timeout =
t                3594 net/netfilter/ipvs/ip_vs_ctl.c 		t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
t                3596 net/netfilter/ipvs/ip_vs_ctl.c 	return ip_vs_set_timeout(ipvs, &t);
t                3808 net/netfilter/ipvs/ip_vs_ctl.c 		struct ip_vs_timeout_user t;
t                3810 net/netfilter/ipvs/ip_vs_ctl.c 		__ip_vs_get_timeouts(ipvs, &t);
t                3813 net/netfilter/ipvs/ip_vs_ctl.c 				t.tcp_timeout) ||
t                3815 net/netfilter/ipvs/ip_vs_ctl.c 				t.tcp_fin_timeout))
t                3819 net/netfilter/ipvs/ip_vs_ctl.c 		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
t                  96 net/netfilter/ipvs/ip_vs_est.c static void estimation_timer(struct timer_list *t)
t                 101 net/netfilter/ipvs/ip_vs_est.c 	struct netns_ipvs *ipvs = from_timer(ipvs, t, est_timer);
t                 294 net/netfilter/ipvs/ip_vs_lblc.c static void ip_vs_lblc_check_expire(struct timer_list *t)
t                 296 net/netfilter/ipvs/ip_vs_lblc.c 	struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer);
t                 458 net/netfilter/ipvs/ip_vs_lblcr.c static void ip_vs_lblcr_check_expire(struct timer_list *t)
t                 460 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer);
t                1840 net/netfilter/nf_conntrack_core.c 			       struct nf_conntrack_tuple *t)
t                1845 net/netfilter/nf_conntrack_core.c 	t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
t                1846 net/netfilter/nf_conntrack_core.c 	t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
t                  69 net/netfilter/nf_conntrack_expect.c static void nf_ct_expectation_timed_out(struct timer_list *t)
t                  71 net/netfilter/nf_conntrack_expect.c 	struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
t                  23 net/netfilter/nf_conntrack_extend.c 	struct nf_ct_ext_type *t;
t                  27 net/netfilter/nf_conntrack_extend.c 		t = rcu_dereference(nf_ct_ext_types[i]);
t                  33 net/netfilter/nf_conntrack_extend.c 		if (t && t->destroy)
t                  34 net/netfilter/nf_conntrack_extend.c 			t->destroy(ct);
t                  44 net/netfilter/nf_conntrack_extend.c 	struct nf_ct_ext_type *t;
t                  60 net/netfilter/nf_conntrack_extend.c 	t = rcu_dereference(nf_ct_ext_types[id]);
t                  61 net/netfilter/nf_conntrack_extend.c 	if (!t) {
t                  66 net/netfilter/nf_conntrack_extend.c 	newoff = ALIGN(oldlen, t->align);
t                  67 net/netfilter/nf_conntrack_extend.c 	newlen = newoff + t->len;
t                 997 net/netfilter/nf_conntrack_netlink.c 				struct nf_conntrack_tuple *t)
t                1002 net/netfilter/nf_conntrack_netlink.c 	t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
t                1003 net/netfilter/nf_conntrack_netlink.c 	t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
t                1009 net/netfilter/nf_conntrack_netlink.c 				struct nf_conntrack_tuple *t)
t                1014 net/netfilter/nf_conntrack_netlink.c 	t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
t                1015 net/netfilter/nf_conntrack_netlink.c 	t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
t                 150 net/netfilter/nf_conntrack_pptp.c 				  const struct nf_conntrack_tuple *t)
t                 158 net/netfilter/nf_conntrack_pptp.c 	nf_ct_dump_tuple(t);
t                 161 net/netfilter/nf_conntrack_pptp.c 	h = nf_conntrack_find_get(net, zone, t);
t                 171 net/netfilter/nf_conntrack_pptp.c 		exp = nf_ct_expect_find_get(net, zone, t);
t                 187 net/netfilter/nf_conntrack_pptp.c 	struct nf_conntrack_tuple t;
t                 192 net/netfilter/nf_conntrack_pptp.c 	memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
t                 193 net/netfilter/nf_conntrack_pptp.c 	t.dst.protonum = IPPROTO_GRE;
t                 194 net/netfilter/nf_conntrack_pptp.c 	t.src.u.gre.key = ct_pptp_info->pns_call_id;
t                 195 net/netfilter/nf_conntrack_pptp.c 	t.dst.u.gre.key = ct_pptp_info->pac_call_id;
t                 196 net/netfilter/nf_conntrack_pptp.c 	if (!destroy_sibling_or_exp(net, ct, &t))
t                 200 net/netfilter/nf_conntrack_pptp.c 	memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
t                 201 net/netfilter/nf_conntrack_pptp.c 	t.dst.protonum = IPPROTO_GRE;
t                 202 net/netfilter/nf_conntrack_pptp.c 	t.src.u.gre.key = ct_pptp_info->pac_call_id;
t                 203 net/netfilter/nf_conntrack_pptp.c 	t.dst.u.gre.key = ct_pptp_info->pns_call_id;
t                 204 net/netfilter/nf_conntrack_pptp.c 	if (!destroy_sibling_or_exp(net, ct, &t))
t                  72 net/netfilter/nf_conntrack_proto_gre.c 				const struct nf_conntrack_tuple *t)
t                  74 net/netfilter/nf_conntrack_proto_gre.c 	return km->tuple.src.l3num == t->src.l3num &&
t                  75 net/netfilter/nf_conntrack_proto_gre.c 	       !memcmp(&km->tuple.src.u3, &t->src.u3, sizeof(t->src.u3)) &&
t                  76 net/netfilter/nf_conntrack_proto_gre.c 	       !memcmp(&km->tuple.dst.u3, &t->dst.u3, sizeof(t->dst.u3)) &&
t                  77 net/netfilter/nf_conntrack_proto_gre.c 	       km->tuple.dst.protonum == t->dst.protonum &&
t                  78 net/netfilter/nf_conntrack_proto_gre.c 	       km->tuple.dst.u.all == t->dst.u.all;
t                  82 net/netfilter/nf_conntrack_proto_gre.c static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
t                  89 net/netfilter/nf_conntrack_proto_gre.c 		if (gre_key_cmpfn(km, t)) {
t                  96 net/netfilter/nf_conntrack_proto_gre.c 	nf_ct_dump_tuple(t);
t                 103 net/netfilter/nf_conntrack_proto_gre.c 			 struct nf_conntrack_tuple *t)
t                 114 net/netfilter/nf_conntrack_proto_gre.c 			if (gre_key_cmpfn(km, t) && km == *kmp)
t                 125 net/netfilter/nf_conntrack_proto_gre.c 	memcpy(&km->tuple, t, sizeof(*t));
t                 259 net/netfilter/nf_conntrack_proto_icmp.c 				const struct nf_conntrack_tuple *t)
t                 261 net/netfilter/nf_conntrack_proto_icmp.c 	if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
t                 262 net/netfilter/nf_conntrack_proto_icmp.c 	    nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
t                 263 net/netfilter/nf_conntrack_proto_icmp.c 	    nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
t                 177 net/netfilter/nf_conntrack_proto_icmpv6.c 				  const struct nf_conntrack_tuple *t)
t                 179 net/netfilter/nf_conntrack_proto_icmpv6.c 	if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
t                 180 net/netfilter/nf_conntrack_proto_icmpv6.c 	    nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
t                 181 net/netfilter/nf_conntrack_proto_icmpv6.c 	    nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
t                1013 net/netfilter/nf_conntrack_sip.c 	const struct sdp_media_type *t;
t                1017 net/netfilter/nf_conntrack_sip.c 		t = &sdp_media_types[i];
t                1018 net/netfilter/nf_conntrack_sip.c 		if (matchlen < t->len ||
t                1019 net/netfilter/nf_conntrack_sip.c 		    strncmp(dptr + matchoff, t->name, t->len))
t                1021 net/netfilter/nf_conntrack_sip.c 		return t;
t                1041 net/netfilter/nf_conntrack_sip.c 	const struct sdp_media_type *t;
t                1071 net/netfilter/nf_conntrack_sip.c 		t = sdp_media_type(*dptr, mediaoff, medialen);
t                1072 net/netfilter/nf_conntrack_sip.c 		if (!t) {
t                1076 net/netfilter/nf_conntrack_sip.c 		mediaoff += t->len;
t                1077 net/netfilter/nf_conntrack_sip.c 		medialen -= t->len;
t                1103 net/netfilter/nf_conntrack_sip.c 					    &rtp_addr, htons(port), t->class,
t                  61 net/netfilter/nf_nat_core.c 	const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
t                  65 net/netfilter/nf_nat_core.c 		fl4->daddr = t->dst.u3.ip;
t                  66 net/netfilter/nf_nat_core.c 		if (t->dst.protonum == IPPROTO_TCP ||
t                  67 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDP ||
t                  68 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDPLITE ||
t                  69 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_DCCP ||
t                  70 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_SCTP)
t                  71 net/netfilter/nf_nat_core.c 			fl4->fl4_dport = t->dst.u.all;
t                  77 net/netfilter/nf_nat_core.c 		fl4->saddr = t->src.u3.ip;
t                  78 net/netfilter/nf_nat_core.c 		if (t->dst.protonum == IPPROTO_TCP ||
t                  79 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDP ||
t                  80 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDPLITE ||
t                  81 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_DCCP ||
t                  82 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_SCTP)
t                  83 net/netfilter/nf_nat_core.c 			fl4->fl4_sport = t->src.u.all;
t                  94 net/netfilter/nf_nat_core.c 	const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
t                  98 net/netfilter/nf_nat_core.c 		fl6->daddr = t->dst.u3.in6;
t                  99 net/netfilter/nf_nat_core.c 		if (t->dst.protonum == IPPROTO_TCP ||
t                 100 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDP ||
t                 101 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDPLITE ||
t                 102 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_DCCP ||
t                 103 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_SCTP)
t                 104 net/netfilter/nf_nat_core.c 			fl6->fl6_dport = t->dst.u.all;
t                 110 net/netfilter/nf_nat_core.c 		fl6->saddr = t->src.u3.in6;
t                 111 net/netfilter/nf_nat_core.c 		if (t->dst.protonum == IPPROTO_TCP ||
t                 112 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDP ||
t                 113 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_UDPLITE ||
t                 114 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_DCCP ||
t                 115 net/netfilter/nf_nat_core.c 		    t->dst.protonum == IPPROTO_SCTP)
t                 116 net/netfilter/nf_nat_core.c 			fl6->fl6_sport = t->src.u.all;
t                 220 net/netfilter/nf_nat_core.c static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
t                 223 net/netfilter/nf_nat_core.c 	if (t->src.l3num == NFPROTO_IPV4)
t                 224 net/netfilter/nf_nat_core.c 		return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
t                 225 net/netfilter/nf_nat_core.c 		       ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
t                 227 net/netfilter/nf_nat_core.c 	return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
t                 228 net/netfilter/nf_nat_core.c 	       ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
t                 286 net/netfilter/nf_nat_core.c 	const struct nf_conntrack_tuple *t;
t                 288 net/netfilter/nf_nat_core.c 	t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
t                 289 net/netfilter/nf_nat_core.c 	return (t->dst.protonum == tuple->dst.protonum &&
t                 290 net/netfilter/nf_nat_core.c 		nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
t                 291 net/netfilter/nf_nat_core.c 		t->src.u.all == tuple->src.u.all);
t                  35 net/netfilter/nf_nat_proto.c 			   const struct nf_conntrack_tuple *t,
t                 447 net/netfilter/nf_nat_proto.c 				    const struct nf_conntrack_tuple *t,
t                 455 net/netfilter/nf_nat_proto.c 		newip = t->src.u3.ip;
t                 458 net/netfilter/nf_nat_proto.c 		newip = t->dst.u3.ip;
t                 465 net/netfilter/nf_nat_proto.c 				    const struct nf_conntrack_tuple *t,
t                 474 net/netfilter/nf_nat_proto.c 		newip = &t->src.u3.in6;
t                 477 net/netfilter/nf_nat_proto.c 		newip = &t->dst.u3.in6;
t                 486 net/netfilter/nf_nat_proto.c 			   const struct nf_conntrack_tuple *t,
t                 489 net/netfilter/nf_nat_proto.c 	switch (t->src.l3num) {
t                 491 net/netfilter/nf_nat_proto.c 		nf_nat_ipv4_csum_update(skb, iphdroff, check, t, maniptype);
t                 494 net/netfilter/nf_nat_proto.c 		nf_nat_ipv6_csum_update(skb, iphdroff, check, t, maniptype);
t                 536 net/netfilter/nfnetlink_cttimeout.c static void ctnl_timeout_put(struct nf_ct_timeout *t)
t                 539 net/netfilter/nfnetlink_cttimeout.c 		container_of(t, struct ctnl_timeout, timeout);
t                 151 net/netfilter/nfnetlink_log.c static void nfulnl_timer(struct timer_list *t);
t                 377 net/netfilter/nfnetlink_log.c nfulnl_timer(struct timer_list *t)
t                 379 net/netfilter/nfnetlink_log.c 	struct nfulnl_instance *inst = from_timer(inst, t, timer);
t                 177 net/netfilter/nft_compat.c static void target_compat_from_user(struct xt_target *t, void *in, void *out)
t                 181 net/netfilter/nft_compat.c 	memcpy(out, in, t->targetsize);
t                 182 net/netfilter/nft_compat.c 	pad = XT_ALIGN(t->targetsize) - t->targetsize;
t                 184 net/netfilter/nft_compat.c 		memset(out + t->targetsize, 0, pad);
t                 228 net/netfilter/x_tables.c 	struct xt_target *t;
t                 235 net/netfilter/x_tables.c 	list_for_each_entry(t, &xt[af].target, list) {
t                 236 net/netfilter/x_tables.c 		if (strcmp(t->name, name) == 0) {
t                 237 net/netfilter/x_tables.c 			if (t->revision == revision) {
t                 238 net/netfilter/x_tables.c 				if (try_module_get(t->me)) {
t                 240 net/netfilter/x_tables.c 					return t;
t                 320 net/netfilter/x_tables.c int xt_target_to_user(const struct xt_entry_target *t,
t                 323 net/netfilter/x_tables.c 	return XT_OBJ_TO_USER(u, t, target, 0) ||
t                 324 net/netfilter/x_tables.c 	       XT_DATA_TO_USER(u, t, target);
t                 350 net/netfilter/x_tables.c 	const struct xt_target *t;
t                 353 net/netfilter/x_tables.c 	list_for_each_entry(t, &xt[af].target, list) {
t                 354 net/netfilter/x_tables.c 		if (strcmp(t->name, name) == 0) {
t                 355 net/netfilter/x_tables.c 			if (t->revision > *bestp)
t                 356 net/netfilter/x_tables.c 				*bestp = t->revision;
t                 357 net/netfilter/x_tables.c 			if (t->revision == revision)
t                 792 net/netfilter/x_tables.c 	struct compat_xt_entry_target t;
t                 797 net/netfilter/x_tables.c 	struct compat_xt_entry_target t;
t                 806 net/netfilter/x_tables.c 	const struct compat_xt_entry_target *t;
t                 812 net/netfilter/x_tables.c 	if (target_offset + sizeof(*t) > next_offset)
t                 815 net/netfilter/x_tables.c 	t = (void *)(e + target_offset);
t                 816 net/netfilter/x_tables.c 	if (t->u.target_size < sizeof(*t))
t                 819 net/netfilter/x_tables.c 	if (target_offset + t->u.target_size > next_offset)
t                 822 net/netfilter/x_tables.c 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
t                 823 net/netfilter/x_tables.c 		const struct compat_xt_standard_target *st = (const void *)t;
t                 830 net/netfilter/x_tables.c 	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
t                 831 net/netfilter/x_tables.c 		const struct compat_xt_error_target *et = (const void *)t;
t                 833 net/netfilter/x_tables.c 		if (!error_tg_ok(t->u.target_size, sizeof(*et),
t                 899 net/netfilter/x_tables.c 	const struct xt_entry_target *t;
t                 906 net/netfilter/x_tables.c 	if (target_offset + sizeof(*t) > next_offset)
t                 909 net/netfilter/x_tables.c 	t = (void *)(e + target_offset);
t                 910 net/netfilter/x_tables.c 	if (t->u.target_size < sizeof(*t))
t                 913 net/netfilter/x_tables.c 	if (target_offset + t->u.target_size > next_offset)
t                 916 net/netfilter/x_tables.c 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
t                 917 net/netfilter/x_tables.c 		const struct xt_standard_target *st = (const void *)t;
t                 924 net/netfilter/x_tables.c 	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
t                 925 net/netfilter/x_tables.c 		const struct xt_error_target *et = (const void *)t;
t                 927 net/netfilter/x_tables.c 		if (!error_tg_ok(t->u.target_size, sizeof(*et),
t                1112 net/netfilter/x_tables.c void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
t                1115 net/netfilter/x_tables.c 	const struct xt_target *target = t->u.kernel.target;
t                1116 net/netfilter/x_tables.c 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
t                1119 net/netfilter/x_tables.c 	char name[sizeof(t->u.user.name)];
t                1121 net/netfilter/x_tables.c 	t = *dstptr;
t                1122 net/netfilter/x_tables.c 	memcpy(t, ct, sizeof(*ct));
t                1124 net/netfilter/x_tables.c 		target->compat_from_user(t->data, ct->data);
t                1126 net/netfilter/x_tables.c 		memcpy(t->data, ct->data, tsize - sizeof(*ct));
t                1129 net/netfilter/x_tables.c 		memset(t->data + target->targetsize, 0, pad);
t                1132 net/netfilter/x_tables.c 	t->u.user.target_size = tsize;
t                1135 net/netfilter/x_tables.c 	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
t                1142 net/netfilter/x_tables.c int xt_compat_target_to_user(const struct xt_entry_target *t,
t                1145 net/netfilter/x_tables.c 	const struct xt_target *target = t->u.kernel.target;
t                1148 net/netfilter/x_tables.c 	u_int16_t tsize = t->u.user.target_size - off;
t                1150 net/netfilter/x_tables.c 	if (XT_OBJ_TO_USER(ct, t, target, tsize))
t                1154 net/netfilter/x_tables.c 		if (target->compat_to_user((void __user *)ct->data, t->data))
t                1157 net/netfilter/x_tables.c 		if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
t                1204 net/netfilter/x_tables.c 	struct xt_table *t, *found = NULL;
t                1207 net/netfilter/x_tables.c 	list_for_each_entry(t, &net->xt.tables[af], list)
t                1208 net/netfilter/x_tables.c 		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
t                1209 net/netfilter/x_tables.c 			return t;
t                1215 net/netfilter/x_tables.c 	list_for_each_entry(t, &init_net.xt.tables[af], list) {
t                1218 net/netfilter/x_tables.c 		if (strcmp(t->name, name))
t                1220 net/netfilter/x_tables.c 		if (!try_module_get(t->me))
t                1223 net/netfilter/x_tables.c 		err = t->table_init(net);
t                1225 net/netfilter/x_tables.c 			module_put(t->me);
t                1229 net/netfilter/x_tables.c 		found = t;
t                1239 net/netfilter/x_tables.c 	list_for_each_entry(t, &net->xt.tables[af], list)
t                1240 net/netfilter/x_tables.c 		if (strcmp(t->name, name) == 0)
t                1241 net/netfilter/x_tables.c 			return t;
t                1253 net/netfilter/x_tables.c 	struct xt_table *t = xt_find_table_lock(net, af, name);
t                1256 net/netfilter/x_tables.c 	if (IS_ERR(t)) {
t                1260 net/netfilter/x_tables.c 		t = xt_find_table_lock(net, af, name);
t                1264 net/netfilter/x_tables.c 	return t;
t                1431 net/netfilter/x_tables.c 	struct xt_table *t, *table;
t                1442 net/netfilter/x_tables.c 	list_for_each_entry(t, &net->xt.tables[table->af], list) {
t                1443 net/netfilter/x_tables.c 		if (strcmp(t->name, table->name) == 0) {
t                  77 net/netfilter/xt_HMARK.c hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
t                  92 net/netfilter/xt_HMARK.c 	t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
t                  94 net/netfilter/xt_HMARK.c 	t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
t                 100 net/netfilter/xt_HMARK.c 	t->proto = nf_ct_protonum(ct);
t                 101 net/netfilter/xt_HMARK.c 	if (t->proto != IPPROTO_ICMP) {
t                 102 net/netfilter/xt_HMARK.c 		t->uports.b16.src = otuple->src.u.all;
t                 103 net/netfilter/xt_HMARK.c 		t->uports.b16.dst = rtuple->src.u.all;
t                 104 net/netfilter/xt_HMARK.c 		hmark_swap_ports(&t->uports, info);
t                 116 net/netfilter/xt_HMARK.c hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
t                 119 net/netfilter/xt_HMARK.c 	u32 src = ntohl(t->src);
t                 120 net/netfilter/xt_HMARK.c 	u32 dst = ntohl(t->dst);
t                 125 net/netfilter/xt_HMARK.c 	hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
t                 126 net/netfilter/xt_HMARK.c 	hash = hash ^ (t->proto & info->proto_mask);
t                 133 net/netfilter/xt_HMARK.c 		      struct hmark_tuple *t, const struct xt_hmark_info *info)
t                 137 net/netfilter/xt_HMARK.c 	protoff = proto_ports_offset(t->proto);
t                 142 net/netfilter/xt_HMARK.c 	if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
t                 145 net/netfilter/xt_HMARK.c 	hmark_swap_ports(&t->uports, info);
t                 165 net/netfilter/xt_HMARK.c hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
t                 193 net/netfilter/xt_HMARK.c 	t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
t                 194 net/netfilter/xt_HMARK.c 	t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
t                 199 net/netfilter/xt_HMARK.c 	t->proto = nexthdr;
t                 200 net/netfilter/xt_HMARK.c 	if (t->proto == IPPROTO_ICMPV6)
t                 206 net/netfilter/xt_HMARK.c 	hmark_set_tuple_ports(skb, nhoff, t, info);
t                 214 net/netfilter/xt_HMARK.c 	struct hmark_tuple t;
t                 216 net/netfilter/xt_HMARK.c 	memset(&t, 0, sizeof(struct hmark_tuple));
t                 219 net/netfilter/xt_HMARK.c 		if (hmark_ct_set_htuple(skb, &t, info) < 0)
t                 222 net/netfilter/xt_HMARK.c 		if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
t                 226 net/netfilter/xt_HMARK.c 	skb->mark = hmark_hash(&t, info);
t                 254 net/netfilter/xt_HMARK.c hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
t                 270 net/netfilter/xt_HMARK.c 	t->src = ip->saddr & info->src_mask.ip;
t                 271 net/netfilter/xt_HMARK.c 	t->dst = ip->daddr & info->dst_mask.ip;
t                 276 net/netfilter/xt_HMARK.c 	t->proto = ip->protocol;
t                 279 net/netfilter/xt_HMARK.c 	if (t->proto == IPPROTO_ICMP)
t                 286 net/netfilter/xt_HMARK.c 	hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
t                 295 net/netfilter/xt_HMARK.c 	struct hmark_tuple t;
t                 297 net/netfilter/xt_HMARK.c 	memset(&t, 0, sizeof(struct hmark_tuple));
t                 300 net/netfilter/xt_HMARK.c 		if (hmark_ct_set_htuple(skb, &t, info) < 0)
t                 303 net/netfilter/xt_HMARK.c 		if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
t                 307 net/netfilter/xt_HMARK.c 	skb->mark = hmark_hash(&t, info);
t                  89 net/netfilter/xt_IDLETIMER.c static void idletimer_tg_expired(struct timer_list *t)
t                  91 net/netfilter/xt_IDLETIMER.c 	struct idletimer_tg *timer = from_timer(timer, t, timer);
t                  74 net/netfilter/xt_LED.c static void led_timeout_callback(struct timer_list *t)
t                  76 net/netfilter/xt_LED.c 	struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t,
t                 144 net/netfilter/xt_recent.c static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
t                 149 net/netfilter/xt_recent.c 	t->entries--;
t                 155 net/netfilter/xt_recent.c static void recent_entry_reap(struct recent_table *t, unsigned long time)
t                 162 net/netfilter/xt_recent.c 	e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
t                 168 net/netfilter/xt_recent.c 		recent_entry_remove(t, e);
t                 172 net/netfilter/xt_recent.c recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
t                 176 net/netfilter/xt_recent.c 	unsigned int nstamps_max = t->nstamps_max_mask;
t                 178 net/netfilter/xt_recent.c 	if (t->entries >= ip_list_tot) {
t                 179 net/netfilter/xt_recent.c 		e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
t                 180 net/netfilter/xt_recent.c 		recent_entry_remove(t, e);
t                 194 net/netfilter/xt_recent.c 		list_add_tail(&e->list, &t->iphash[recent_entry_hash4(addr)]);
t                 196 net/netfilter/xt_recent.c 		list_add_tail(&e->list, &t->iphash[recent_entry_hash6(addr)]);
t                 197 net/netfilter/xt_recent.c 	list_add_tail(&e->lru_list, &t->lru_list);
t                 198 net/netfilter/xt_recent.c 	t->entries++;
t                 202 net/netfilter/xt_recent.c static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
t                 204 net/netfilter/xt_recent.c 	e->index &= t->nstamps_max_mask;
t                 208 net/netfilter/xt_recent.c 	list_move_tail(&e->lru_list, &t->lru_list);
t                 214 net/netfilter/xt_recent.c 	struct recent_table *t;
t                 216 net/netfilter/xt_recent.c 	list_for_each_entry(t, &recent_net->tables, list)
t                 217 net/netfilter/xt_recent.c 		if (!strcmp(t->name, name))
t                 218 net/netfilter/xt_recent.c 			return t;
t                 222 net/netfilter/xt_recent.c static void recent_table_flush(struct recent_table *t)
t                 228 net/netfilter/xt_recent.c 		list_for_each_entry_safe(e, next, &t->iphash[i], list)
t                 229 net/netfilter/xt_recent.c 			recent_entry_remove(t, e);
t                 238 net/netfilter/xt_recent.c 	struct recent_table *t;
t                 270 net/netfilter/xt_recent.c 	t = recent_table_lookup(recent_net, info->name);
t                 272 net/netfilter/xt_recent.c 	nf_inet_addr_mask(&addr, &addr_mask, &t->mask);
t                 274 net/netfilter/xt_recent.c 	e = recent_entry_lookup(t, &addr_mask, xt_family(par),
t                 279 net/netfilter/xt_recent.c 		e = recent_entry_init(t, &addr_mask, xt_family(par), ttl);
t                 289 net/netfilter/xt_recent.c 		recent_entry_remove(t, e);
t                 306 net/netfilter/xt_recent.c 			recent_entry_reap(t, time);
t                 311 net/netfilter/xt_recent.c 		recent_entry_update(t, e);
t                 328 net/netfilter/xt_recent.c 	struct recent_table *t;
t                 372 net/netfilter/xt_recent.c 	t = recent_table_lookup(recent_net, info->name);
t                 373 net/netfilter/xt_recent.c 	if (t != NULL) {
t                 374 net/netfilter/xt_recent.c 		if (nstamp_mask > t->nstamps_max_mask) {
t                 376 net/netfilter/xt_recent.c 			recent_table_flush(t);
t                 377 net/netfilter/xt_recent.c 			t->nstamps_max_mask = nstamp_mask;
t                 381 net/netfilter/xt_recent.c 		t->refcnt++;
t                 386 net/netfilter/xt_recent.c 	t = kvzalloc(struct_size(t, iphash, ip_list_hash_size), GFP_KERNEL);
t                 387 net/netfilter/xt_recent.c 	if (t == NULL) {
t                 391 net/netfilter/xt_recent.c 	t->refcnt = 1;
t                 392 net/netfilter/xt_recent.c 	t->nstamps_max_mask = nstamp_mask;
t                 394 net/netfilter/xt_recent.c 	memcpy(&t->mask, &info->mask, sizeof(t->mask));
t                 395 net/netfilter/xt_recent.c 	strcpy(t->name, info->name);
t                 396 net/netfilter/xt_recent.c 	INIT_LIST_HEAD(&t->lru_list);
t                 398 net/netfilter/xt_recent.c 		INIT_LIST_HEAD(&t->iphash[i]);
t                 403 net/netfilter/xt_recent.c 		recent_table_free(t);
t                 407 net/netfilter/xt_recent.c 	pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
t                 408 net/netfilter/xt_recent.c 		  &recent_mt_fops, t);
t                 410 net/netfilter/xt_recent.c 		recent_table_free(t);
t                 417 net/netfilter/xt_recent.c 	list_add_tail(&t->list, &recent_net->tables);
t                 447 net/netfilter/xt_recent.c 	struct recent_table *t;
t                 450 net/netfilter/xt_recent.c 	t = recent_table_lookup(recent_net, info->name);
t                 451 net/netfilter/xt_recent.c 	if (--t->refcnt == 0) {
t                 453 net/netfilter/xt_recent.c 		list_del(&t->list);
t                 457 net/netfilter/xt_recent.c 			remove_proc_entry(t->name, recent_net->xt_recent);
t                 459 net/netfilter/xt_recent.c 		recent_table_flush(t);
t                 460 net/netfilter/xt_recent.c 		recent_table_free(t);
t                 475 net/netfilter/xt_recent.c 	const struct recent_table *t = st->table;
t                 482 net/netfilter/xt_recent.c 		list_for_each_entry(e, &t->iphash[st->bucket], list)
t                 491 net/netfilter/xt_recent.c 	const struct recent_table *t = st->table;
t                 496 net/netfilter/xt_recent.c 	while (head == &t->iphash[st->bucket]) {
t                 499 net/netfilter/xt_recent.c 		head = t->iphash[st->bucket].next;
t                 514 net/netfilter/xt_recent.c 	const struct recent_table *t = st->table;
t                 517 net/netfilter/xt_recent.c 	i = (e->index - 1) & t->nstamps_max_mask;
t                 554 net/netfilter/xt_recent.c 	struct recent_table *t = PDE_DATA(file_inode(file));
t                 575 net/netfilter/xt_recent.c 		recent_table_flush(t);
t                 603 net/netfilter/xt_recent.c 	e = recent_entry_lookup(t, &addr, family, 0);
t                 606 net/netfilter/xt_recent.c 			recent_entry_init(t, &addr, family, 0);
t                 609 net/netfilter/xt_recent.c 			recent_entry_update(t, e);
t                 611 net/netfilter/xt_recent.c 			recent_entry_remove(t, e);
t                 641 net/netfilter/xt_recent.c 	struct recent_table *t;
t                 648 net/netfilter/xt_recent.c 	list_for_each_entry(t, &recent_net->tables, list)
t                 649 net/netfilter/xt_recent.c 	        remove_proc_entry(t->name, recent_net->xt_recent);
t                  38 net/netfilter/xt_set.c #define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo)	\
t                  44 net/netfilter/xt_set.c 	.ext.timeout = t,				\
t                 219 net/netrom/af_netrom.c static void nr_destroy_timer(struct timer_list *t)
t                 221 net/netrom/af_netrom.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 513 net/netrom/nr_route.c 	struct nr_node  *t;
t                 520 net/netrom/nr_route.c 			nr_node_for_each_safe(t, node2t, &nr_node_list) {
t                 521 net/netrom/nr_route.c 				nr_node_lock(t);
t                 522 net/netrom/nr_route.c 				for (i = 0; i < t->count; i++) {
t                 523 net/netrom/nr_route.c 					if (t->routes[i].neighbour == s) {
t                 524 net/netrom/nr_route.c 						t->count--;
t                 528 net/netrom/nr_route.c 							t->routes[0] = t->routes[1];
t                 531 net/netrom/nr_route.c 							t->routes[1] = t->routes[2];
t                 538 net/netrom/nr_route.c 				if (t->count <= 0)
t                 539 net/netrom/nr_route.c 					nr_remove_node_locked(t);
t                 540 net/netrom/nr_route.c 				nr_node_unlock(t);
t                 957 net/netrom/nr_route.c 	struct nr_node  *t = NULL;
t                 962 net/netrom/nr_route.c 	nr_node_for_each_safe(t, nodet, &nr_node_list) {
t                 963 net/netrom/nr_route.c 		nr_node_lock(t);
t                 964 net/netrom/nr_route.c 		nr_remove_node_locked(t);
t                 965 net/netrom/nr_route.c 		nr_node_unlock(t);
t                 112 net/netrom/nr_timer.c static void nr_heartbeat_expiry(struct timer_list *t)
t                 114 net/netrom/nr_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 151 net/netrom/nr_timer.c static void nr_t2timer_expiry(struct timer_list *t)
t                 153 net/netrom/nr_timer.c 	struct nr_sock *nr = from_timer(nr, t, t2timer);
t                 164 net/netrom/nr_timer.c static void nr_t4timer_expiry(struct timer_list *t)
t                 166 net/netrom/nr_timer.c 	struct nr_sock *nr = from_timer(nr, t, t4timer);
t                 174 net/netrom/nr_timer.c static void nr_idletimer_expiry(struct timer_list *t)
t                 176 net/netrom/nr_timer.c 	struct nr_sock *nr = from_timer(nr, t, idletimer);
t                 202 net/netrom/nr_timer.c static void nr_t1timer_expiry(struct timer_list *t)
t                 204 net/netrom/nr_timer.c 	struct nr_sock *nr = from_timer(nr, t, t1timer);
t                1006 net/nfc/core.c static void nfc_check_pres_timeout(struct timer_list *t)
t                1008 net/nfc/core.c 	struct nfc_dev *dev = from_timer(dev, t, check_pres_timer);
t                 442 net/nfc/hci/core.c static void nfc_hci_cmd_timeout(struct timer_list *t)
t                 444 net/nfc/hci/core.c 	struct nfc_hci_dev *hdev = from_timer(hdev, t, cmd_timer);
t                 572 net/nfc/hci/llc_shdlc.c static void llc_shdlc_connect_timeout(struct timer_list *t)
t                 574 net/nfc/hci/llc_shdlc.c 	struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer);
t                 581 net/nfc/hci/llc_shdlc.c static void llc_shdlc_t1_timeout(struct timer_list *t)
t                 583 net/nfc/hci/llc_shdlc.c 	struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer);
t                 590 net/nfc/hci/llc_shdlc.c static void llc_shdlc_t2_timeout(struct timer_list *t)
t                 592 net/nfc/hci/llc_shdlc.c 	struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer);
t                 233 net/nfc/llcp_core.c static void nfc_llcp_symm_timer(struct timer_list *t)
t                 235 net/nfc/llcp_core.c 	struct nfc_llcp_local *local = from_timer(local, t, link_timer);
t                 276 net/nfc/llcp_core.c static void nfc_llcp_sdreq_timer(struct timer_list *t)
t                 278 net/nfc/llcp_core.c 	struct nfc_llcp_local *local = from_timer(local, t, sdreq_timer);
t                 582 net/nfc/nci/core.c static void nci_cmd_timer(struct timer_list *t)
t                 584 net/nfc/nci/core.c 	struct nci_dev *ndev = from_timer(ndev, t, cmd_timer);
t                 591 net/nfc/nci/core.c static void nci_data_timer(struct timer_list *t)
t                 593 net/nfc/nci/core.c 	struct nci_dev *ndev = from_timer(ndev, t, data_timer);
t                  34 net/nfc/nci/spi.c 	struct spi_transfer t;
t                  36 net/nfc/nci/spi.c 	memset(&t, 0, sizeof(struct spi_transfer));
t                  39 net/nfc/nci/spi.c 		t.tx_buf = skb->data;
t                  40 net/nfc/nci/spi.c 		t.len = skb->len;
t                  43 net/nfc/nci/spi.c 		t.tx_buf = &t;
t                  44 net/nfc/nci/spi.c 		t.len = 0;
t                  46 net/nfc/nci/spi.c 	t.cs_change = cs_change;
t                  47 net/nfc/nci/spi.c 	t.delay_usecs = nspi->xfer_udelay;
t                  48 net/nfc/nci/spi.c 	t.speed_hz = nspi->xfer_speed_hz;
t                  51 net/nfc/nci/spi.c 	spi_message_add_tail(&t, &m);
t                 635 net/packet/af_packet.c static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
t                 638 net/packet/af_packet.c 		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
t                 574 net/rds/recv.c 		struct rds_cmsg_rx_trace t;
t                 577 net/rds/recv.c 		memset(&t, 0, sizeof(t));
t                 579 net/rds/recv.c 		t.rx_traces =  rs->rs_rx_traces;
t                 582 net/rds/recv.c 			t.rx_trace_pos[i] = j;
t                 583 net/rds/recv.c 			t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
t                 588 net/rds/recv.c 			       sizeof(t), &t);
t                 296 net/rose/af_rose.c static void rose_destroy_timer(struct timer_list *t)
t                 298 net/rose/af_rose.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                  75 net/rose/rose_link.c static void rose_ftimer_expiry(struct timer_list *t)
t                  79 net/rose/rose_link.c static void rose_t0timer_expiry(struct timer_list *t)
t                  81 net/rose/rose_link.c 	struct rose_neigh *neigh = from_timer(neigh, t, t0timer);
t                 480 net/rose/rose_route.c 	struct rose_node  *t, *rose_node;
t                 496 net/rose/rose_route.c 			t         = rose_node;
t                 499 net/rose/rose_route.c 			for (i = 0; i < t->count; i++) {
t                 500 net/rose/rose_route.c 				if (t->neighbour[i] != s)
t                 503 net/rose/rose_route.c 				t->count--;
t                 507 net/rose/rose_route.c 					t->neighbour[0] = t->neighbour[1];
t                 510 net/rose/rose_route.c 					t->neighbour[1] = t->neighbour[2];
t                 516 net/rose/rose_route.c 			if (t->count <= 0)
t                 517 net/rose/rose_route.c 				rose_remove_node(t);
t                 555 net/rose/rose_route.c 	struct rose_node  *t, *rose_node;
t                 564 net/rose/rose_route.c 		t         = rose_node;
t                 566 net/rose/rose_route.c 		if (!t->loopback)
t                 567 net/rose/rose_route.c 			rose_remove_node(t);
t                1305 net/rose/rose_route.c 	struct rose_node  *t, *rose_node  = rose_node_list;
t                1316 net/rose/rose_route.c 		t         = rose_node;
t                1319 net/rose/rose_route.c 		rose_remove_node(t);
t                  28 net/rose/rose_timer.c static void rose_heartbeat_expiry(struct timer_list *t);
t                 119 net/rose/rose_timer.c static void rose_heartbeat_expiry(struct timer_list *t)
t                 121 net/rose/rose_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 157 net/rose/rose_timer.c static void rose_timer_expiry(struct timer_list *t)
t                 159 net/rose/rose_timer.c 	struct rose_sock *rose = from_timer(rose, t, timer);
t                 186 net/rose/rose_timer.c static void rose_idletimer_expiry(struct timer_list *t)
t                 188 net/rose/rose_timer.c 	struct rose_sock *rose = from_timer(rose, t, idletimer);
t                 301 net/rxrpc/call_event.c 	unsigned long now, next, t;
t                 329 net/rxrpc/call_event.c 	t = READ_ONCE(call->expect_rx_by);
t                 330 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 335 net/rxrpc/call_event.c 	t = READ_ONCE(call->expect_req_by);
t                 337 net/rxrpc/call_event.c 	    time_after_eq(now, t)) {
t                 342 net/rxrpc/call_event.c 	t = READ_ONCE(call->expect_term_by);
t                 343 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 348 net/rxrpc/call_event.c 	t = READ_ONCE(call->ack_at);
t                 349 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 351 net/rxrpc/call_event.c 		cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
t                 355 net/rxrpc/call_event.c 	t = READ_ONCE(call->ack_lost_at);
t                 356 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 358 net/rxrpc/call_event.c 		cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
t                 362 net/rxrpc/call_event.c 	t = READ_ONCE(call->keepalive_at);
t                 363 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 365 net/rxrpc/call_event.c 		cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
t                 371 net/rxrpc/call_event.c 	t = READ_ONCE(call->ping_at);
t                 372 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 374 net/rxrpc/call_event.c 		cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
t                 378 net/rxrpc/call_event.c 	t = READ_ONCE(call->resend_at);
t                 379 net/rxrpc/call_event.c 	if (time_after_eq(now, t)) {
t                 381 net/rxrpc/call_event.c 		cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
t                 427 net/rxrpc/call_event.c #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
t                  44 net/rxrpc/call_object.c static void rxrpc_call_timer_expired(struct timer_list *t)
t                  46 net/rxrpc/call_object.c 	struct rxrpc_call *call = from_timer(call, t, timer);
t                1042 net/sched/act_api.c 	struct tcamsg *t;
t                1047 net/sched/act_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
t                1050 net/sched/act_api.c 	t = nlmsg_data(nlh);
t                1051 net/sched/act_api.c 	t->tca_family = AF_UNSPEC;
t                1052 net/sched/act_api.c 	t->tca__pad1 = 0;
t                1053 net/sched/act_api.c 	t->tca__pad2 = 0;
t                1143 net/sched/act_api.c 	struct tcamsg *t;
t                1171 net/sched/act_api.c 			sizeof(*t), 0);
t                1176 net/sched/act_api.c 	t = nlmsg_data(nlh);
t                1177 net/sched/act_api.c 	t->tca_family = AF_UNSPEC;
t                1178 net/sched/act_api.c 	t->tca__pad1 = 0;
t                1179 net/sched/act_api.c 	t->tca__pad2 = 0;
t                1463 net/sched/act_api.c 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
t                1498 net/sched/act_api.c 			cb->nlh->nlmsg_type, sizeof(*t), 0);
t                1505 net/sched/act_api.c 	t = nlmsg_data(nlh);
t                1506 net/sched/act_api.c 	t->tca_family = AF_UNSPEC;
t                1507 net/sched/act_api.c 	t->tca__pad1 = 0;
t                1508 net/sched/act_api.c 	t->tca__pad2 = 0;
t                 179 net/sched/act_connmark.c 	struct tcf_t t;
t                 187 net/sched/act_connmark.c 	tcf_tm_dump(&t, &ci->tcf_tm);
t                 188 net/sched/act_connmark.c 	if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
t                 643 net/sched/act_csum.c 	struct tcf_t t;
t                 654 net/sched/act_csum.c 	tcf_tm_dump(&t, &p->tcf_tm);
t                 655 net/sched/act_csum.c 	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
t                 841 net/sched/act_ct.c 	struct tcf_t t;
t                 885 net/sched/act_ct.c 	tcf_tm_dump(&t, &c->tcf_tm);
t                 886 net/sched/act_ct.c 	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
t                 293 net/sched/act_ctinfo.c 	struct tcf_t t;
t                 299 net/sched/act_ctinfo.c 	tcf_tm_dump(&t, &ci->tcf_tm);
t                 300 net/sched/act_ctinfo.c 	if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD))
t                 202 net/sched/act_gact.c 	struct tcf_t t;
t                 220 net/sched/act_gact.c 	tcf_tm_dump(&t, &gact->tcf_tm);
t                 221 net/sched/act_gact.c 	if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
t                 629 net/sched/act_ife.c 	struct tcf_t t;
t                 640 net/sched/act_ife.c 	tcf_tm_dump(&t, &ife->tcf_tm);
t                 641 net/sched/act_ife.c 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
t                  33 net/sched/act_ipt.c static int ipt_init_target(struct net *net, struct xt_entry_target *t,
t                  41 net/sched/act_ipt.c 	target = xt_request_find_target(AF_INET, t->u.user.name,
t                  42 net/sched/act_ipt.c 					t->u.user.revision);
t                  46 net/sched/act_ipt.c 	t->u.kernel.target = target;
t                  52 net/sched/act_ipt.c 	par.targinfo  = t->data;
t                  56 net/sched/act_ipt.c 	ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
t                  58 net/sched/act_ipt.c 		module_put(t->u.kernel.target->me);
t                  64 net/sched/act_ipt.c static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
t                  67 net/sched/act_ipt.c 		.target   = t->u.kernel.target,
t                  68 net/sched/act_ipt.c 		.targinfo = t->data,
t                 103 net/sched/act_ipt.c 	struct xt_entry_target *td, *t;
t                 172 net/sched/act_ipt.c 	t = kmemdup(td, td->u.target_size, GFP_KERNEL);
t                 173 net/sched/act_ipt.c 	if (unlikely(!t))
t                 176 net/sched/act_ipt.c 	err = ipt_init_target(net, t, tname, hook);
t                 189 net/sched/act_ipt.c 	ipt->tcfi_t     = t;
t                 197 net/sched/act_ipt.c 	kfree(t);
t                 280 net/sched/act_ipt.c 	struct xt_entry_target *t;
t                 290 net/sched/act_ipt.c 	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
t                 291 net/sched/act_ipt.c 	if (unlikely(!t))
t                 296 net/sched/act_ipt.c 	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
t                 298 net/sched/act_ipt.c 	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
t                 310 net/sched/act_ipt.c 	kfree(t);
t                 316 net/sched/act_ipt.c 	kfree(t);
t                 339 net/sched/act_mirred.c 	struct tcf_t t;
t                 351 net/sched/act_mirred.c 	tcf_tm_dump(&t, &m->tcf_tm);
t                 352 net/sched/act_mirred.c 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
t                 308 net/sched/act_mpls.c 	struct tcf_t t;
t                 336 net/sched/act_mpls.c 	tcf_tm_dump(&t, &m->tcf_tm);
t                 338 net/sched/act_mpls.c 	if (nla_put_64bit(skb, TCA_MPLS_TM, sizeof(t), &t, TCA_MPLS_PAD))
t                 272 net/sched/act_nat.c 	struct tcf_t t;
t                 284 net/sched/act_nat.c 	tcf_tm_dump(&t, &p->tcf_tm);
t                 285 net/sched/act_nat.c 	if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
t                 417 net/sched/act_pedit.c 	struct tcf_t t;
t                 450 net/sched/act_pedit.c 	tcf_tm_dump(&t, &p->tcf_tm);
t                 451 net/sched/act_pedit.c 	if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
t                 315 net/sched/act_police.c 	struct tcf_t t;
t                 348 net/sched/act_police.c 	t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
t                 349 net/sched/act_police.c 	t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
t                 350 net/sched/act_police.c 	t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
t                 351 net/sched/act_police.c 	t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
t                 352 net/sched/act_police.c 	if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
t                 209 net/sched/act_sample.c 	struct tcf_t t;
t                 216 net/sched/act_sample.c 	tcf_tm_dump(&t, &s->tcf_tm);
t                 217 net/sched/act_sample.c 	if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
t                 180 net/sched/act_simple.c 	struct tcf_t t;
t                 188 net/sched/act_simple.c 	tcf_tm_dump(&t, &d->tcf_tm);
t                 189 net/sched/act_simple.c 	if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
t                 240 net/sched/act_skbedit.c 	struct tcf_t t;
t                 270 net/sched/act_skbedit.c 	tcf_tm_dump(&t, &d->tcf_tm);
t                 271 net/sched/act_skbedit.c 	if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
t                 225 net/sched/act_skbmod.c 	struct tcf_t t;
t                 244 net/sched/act_skbmod.c 	tcf_tm_dump(&t, &d->tcf_tm);
t                 245 net/sched/act_skbmod.c 	if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
t                  27 net/sched/act_tunnel_key.c 	struct tcf_tunnel_key *t = to_tunnel_key(a);
t                  31 net/sched/act_tunnel_key.c 	params = rcu_dereference_bh(t->params);
t                  33 net/sched/act_tunnel_key.c 	tcf_lastuse_update(&t->tcf_tm);
t                  34 net/sched/act_tunnel_key.c 	bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
t                  35 net/sched/act_tunnel_key.c 	action = READ_ONCE(t->tcf_action);
t                 224 net/sched/act_tunnel_key.c 	struct tcf_tunnel_key *t;
t                 374 net/sched/act_tunnel_key.c 	t = to_tunnel_key(*a);
t                 386 net/sched/act_tunnel_key.c 	spin_lock_bh(&t->tcf_lock);
t                 388 net/sched/act_tunnel_key.c 	rcu_swap_protected(t->params, params_new,
t                 389 net/sched/act_tunnel_key.c 			   lockdep_is_held(&t->tcf_lock));
t                 390 net/sched/act_tunnel_key.c 	spin_unlock_bh(&t->tcf_lock);
t                 418 net/sched/act_tunnel_key.c 	struct tcf_tunnel_key *t = to_tunnel_key(a);
t                 421 net/sched/act_tunnel_key.c 	params = rcu_dereference_protected(t->params, 1);
t                 516 net/sched/act_tunnel_key.c 	struct tcf_tunnel_key *t = to_tunnel_key(a);
t                 519 net/sched/act_tunnel_key.c 		.index    = t->tcf_index,
t                 520 net/sched/act_tunnel_key.c 		.refcnt   = refcount_read(&t->tcf_refcnt) - ref,
t                 521 net/sched/act_tunnel_key.c 		.bindcnt  = atomic_read(&t->tcf_bindcnt) - bind,
t                 525 net/sched/act_tunnel_key.c 	spin_lock_bh(&t->tcf_lock);
t                 526 net/sched/act_tunnel_key.c 	params = rcu_dereference_protected(t->params,
t                 527 net/sched/act_tunnel_key.c 					   lockdep_is_held(&t->tcf_lock));
t                 528 net/sched/act_tunnel_key.c 	opt.action   = t->tcf_action;
t                 559 net/sched/act_tunnel_key.c 	tcf_tm_dump(&tm, &t->tcf_tm);
t                 563 net/sched/act_tunnel_key.c 	spin_unlock_bh(&t->tcf_lock);
t                 568 net/sched/act_tunnel_key.c 	spin_unlock_bh(&t->tcf_lock);
t                 263 net/sched/act_vlan.c 	struct tcf_t t;
t                 281 net/sched/act_vlan.c 	tcf_tm_dump(&t, &v->tcf_tm);
t                 282 net/sched/act_vlan.c 	if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
t                 111 net/sched/cls_api.c 	const struct tcf_proto_ops *t, *res = NULL;
t                 115 net/sched/cls_api.c 		list_for_each_entry(t, &tcf_proto_base, head) {
t                 116 net/sched/cls_api.c 			if (strcmp(kind, t->kind) == 0) {
t                 117 net/sched/cls_api.c 				if (try_module_get(t->owner))
t                 118 net/sched/cls_api.c 					res = t;
t                 161 net/sched/cls_api.c 	struct tcf_proto_ops *t;
t                 165 net/sched/cls_api.c 	list_for_each_entry(t, &tcf_proto_base, head)
t                 166 net/sched/cls_api.c 		if (!strcmp(ops->kind, t->kind))
t                 181 net/sched/cls_api.c 	struct tcf_proto_ops *t;
t                 191 net/sched/cls_api.c 	list_for_each_entry(t, &tcf_proto_base, head) {
t                 192 net/sched/cls_api.c 		if (t == ops) {
t                 193 net/sched/cls_api.c 			list_del(&t->head);
t                1925 net/sched/cls_api.c 	struct tcmsg *t;
t                1948 net/sched/cls_api.c 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
t                1953 net/sched/cls_api.c 	t = nlmsg_data(n);
t                1954 net/sched/cls_api.c 	protocol = TC_H_MIN(t->tcm_info);
t                1955 net/sched/cls_api.c 	prio = TC_H_MAJ(t->tcm_info);
t                1957 net/sched/cls_api.c 	parent = t->tcm_parent;
t                1977 net/sched/cls_api.c 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
t                1998 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
t                2002 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
t                2083 net/sched/cls_api.c 	fh = tp->ops->get(tp, t->tcm_handle);
t                2104 net/sched/cls_api.c 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
t                2152 net/sched/cls_api.c 	struct tcmsg *t;
t                2170 net/sched/cls_api.c 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
t                2175 net/sched/cls_api.c 	t = nlmsg_data(n);
t                2176 net/sched/cls_api.c 	protocol = TC_H_MIN(t->tcm_info);
t                2177 net/sched/cls_api.c 	prio = TC_H_MAJ(t->tcm_info);
t                2178 net/sched/cls_api.c 	parent = t->tcm_parent;
t                2180 net/sched/cls_api.c 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
t                2187 net/sched/cls_api.c 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
t                2207 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
t                2211 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
t                2257 net/sched/cls_api.c 	} else if (t->tcm_handle == 0) {
t                2270 net/sched/cls_api.c 	fh = tp->ops->get(tp, t->tcm_handle);
t                2312 net/sched/cls_api.c 	struct tcmsg *t;
t                2327 net/sched/cls_api.c 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
t                2332 net/sched/cls_api.c 	t = nlmsg_data(n);
t                2333 net/sched/cls_api.c 	protocol = TC_H_MIN(t->tcm_info);
t                2334 net/sched/cls_api.c 	prio = TC_H_MAJ(t->tcm_info);
t                2335 net/sched/cls_api.c 	parent = t->tcm_parent;
t                2344 net/sched/cls_api.c 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
t                2363 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
t                2367 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
t                2401 net/sched/cls_api.c 	fh = tp->ops->get(tp, t->tcm_handle);
t                2764 net/sched/cls_api.c 	struct tcmsg *t;
t                2778 net/sched/cls_api.c 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
t                2783 net/sched/cls_api.c 	t = nlmsg_data(n);
t                2784 net/sched/cls_api.c 	parent = t->tcm_parent;
t                2788 net/sched/cls_api.c 			       t->tcm_ifindex, t->tcm_block_index, extack);
t                 280 net/sched/cls_basic.c 		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 290 net/sched/cls_basic.c 	t->tcm_handle = f->handle;
t                 171 net/sched/cls_cgroup.c 			   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 176 net/sched/cls_cgroup.c 	t->tcm_handle = head->handle;
t                 344 net/sched/cls_flow.c static void flow_perturbation(struct timer_list *t)
t                 346 net/sched/cls_flow.c 	struct flow_filter *f = from_timer(f, t, perturb_timer);
t                 619 net/sched/cls_flow.c 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 627 net/sched/cls_flow.c 	t->tcm_handle = f->handle;
t                2434 net/sched/cls_flower.c 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                2444 net/sched/cls_flower.c 	t->tcm_handle = f->handle;
t                 376 net/sched/cls_fw.c 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 385 net/sched/cls_fw.c 	t->tcm_handle = f->id;
t                 345 net/sched/cls_matchall.c 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 358 net/sched/cls_matchall.c 	t->tcm_handle = head->handle;
t                 596 net/sched/cls_route.c 		       struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 605 net/sched/cls_route.c 	t->tcm_handle = f->handle;
t                 691 net/sched/cls_rsvp.h 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 702 net/sched/cls_rsvp.h 	t->tcm_handle = f->handle;
t                 631 net/sched/cls_tcindex.c 			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                 638 net/sched/cls_tcindex.c 		 tp, fh, skb, t, p, r);
t                 646 net/sched/cls_tcindex.c 		t->tcm_handle = ~0; /* whatever ... */
t                 655 net/sched/cls_tcindex.c 			t->tcm_handle = r - p->perfect;
t                 661 net/sched/cls_tcindex.c 			t->tcm_handle = 0;
t                 662 net/sched/cls_tcindex.c 			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
t                 665 net/sched/cls_tcindex.c 				     !t->tcm_handle && f;
t                 668 net/sched/cls_tcindex.c 						t->tcm_handle = f->key;
t                 672 net/sched/cls_tcindex.c 		pr_debug("handle = %d\n", t->tcm_handle);
t                1272 net/sched/cls_u32.c 		    struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
t                1281 net/sched/cls_u32.c 	t->tcm_handle = n->handle;
t                2194 net/sched/sch_api.c 	int t, s_t;
t                2203 net/sched/sch_api.c 	t = 0;
t                2205 net/sched/sch_api.c 	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
t                2211 net/sched/sch_api.c 				&t, s_t) < 0)
t                2215 net/sched/sch_api.c 	cb->args[0] = t;
t                 146 net/sched/sch_cake.c 	u16 t:3, b:10;
t                 438 net/sched/sch_cake.c static ktime_t cobalt_control(ktime_t t,
t                 442 net/sched/sch_cake.c 	return ktime_add_ns(t, reciprocal_scale(interval,
t                1364 net/sched/sch_cake.c 	q->tins[ii.t].overflow_idx[ii.b] = j;
t                1365 net/sched/sch_cake.c 	q->tins[jj.t].overflow_idx[jj.b] = i;
t                1372 net/sched/sch_cake.c 	return q->tins[ii.t].backlogs[ii.b];
t                1481 net/sched/sch_cake.c 	tin = qq.t;
t                1590 net/sched/sch_cake.c static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
t                1624 net/sched/sch_cake.c 	*t = cake_select_tin(sch, skb);
t                1625 net/sched/sch_cake.c 	return cake_hash(*t, skb, flow_mode, flow, host) + 1;
t                2510 net/sched/sch_cake.c 		u64 t = q->rate_bps * q->interval;
t                2512 net/sched/sch_cake.c 		do_div(t, USEC_PER_SEC / 4);
t                2513 net/sched/sch_cake.c 		q->buffer_limit = max_t(u32, t, 4U << 20);
t                2718 net/sched/sch_cake.c 			q->overflow_heap[k].t = i;
t                 418 net/sched/sch_generic.c static void dev_watchdog(struct timer_list *t)
t                 420 net/sched/sch_generic.c 	struct net_device *dev = from_timer(dev, t, watchdog_timer);
t                 168 net/sched/sch_gred.c 	struct gred_sched *t = qdisc_priv(sch);
t                 172 net/sched/sch_gred.c 	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
t                 173 net/sched/sch_gred.c 		dp = t->def;
t                 175 net/sched/sch_gred.c 		q = t->tab[dp];
t                 194 net/sched/sch_gred.c 	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
t                 197 net/sched/sch_gred.c 		for (i = 0; i < t->DPs; i++) {
t                 198 net/sched/sch_gred.c 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
t                 199 net/sched/sch_gred.c 			    !red_is_idling(&t->tab[i]->vars))
t                 200 net/sched/sch_gred.c 				qavg += t->tab[i]->vars.qavg;
t                 208 net/sched/sch_gred.c 	if (gred_wred_mode(t))
t                 209 net/sched/sch_gred.c 		gred_load_wred_set(t, q);
t                 213 net/sched/sch_gred.c 				     gred_backlog(t, q, sch));
t                 218 net/sched/sch_gred.c 	if (gred_wred_mode(t))
t                 219 net/sched/sch_gred.c 		gred_store_wred_set(t, q);
t                 246 net/sched/sch_gred.c 	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
t                 263 net/sched/sch_gred.c 	struct gred_sched *t = qdisc_priv(sch);
t                 271 net/sched/sch_gred.c 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
t                 277 net/sched/sch_gred.c 			if (gred_wred_mode(t)) {
t                 279 net/sched/sch_gred.c 					red_start_of_idle_period(&t->wred_set);
t                 295 net/sched/sch_gred.c 	struct gred_sched *t = qdisc_priv(sch);
t                 299 net/sched/sch_gred.c 	for (i = 0; i < t->DPs; i++) {
t                 300 net/sched/sch_gred.c 		struct gred_sched_data *q = t->tab[i];
t                 323 net/sched/sch_netem.c 	long t;
t                 335 net/sched/sch_netem.c 	t = dist->table[rnd % dist->size];
t                 336 net/sched/sch_netem.c 	x = (sigma % NETEM_DIST_SCALE) * t;
t                 342 net/sched/sch_netem.c 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
t                 443 net/sched/sch_pie.c static void pie_timer(struct timer_list *t)
t                 445 net/sched/sch_pie.c 	struct pie_sched_data *q = from_timer(q, t, adapt_timer);
t                 261 net/sched/sch_red.c static inline void red_adaptative_timer(struct timer_list *t)
t                 263 net/sched/sch_red.c 	struct red_sched_data *q = from_timer(q, t, adapt_timer);
t                 605 net/sched/sch_sfq.c static void sfq_perturbation(struct timer_list *t)
t                 607 net/sched/sch_sfq.c 	struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
t                 748 net/sctp/associola.c 	struct sctp_transport *t;
t                 752 net/sctp/associola.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                 754 net/sctp/associola.c 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
t                 755 net/sctp/associola.c 			return t;
t                 766 net/sctp/associola.c 	struct sctp_transport	*t;
t                 768 net/sctp/associola.c 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
t                 771 net/sctp/associola.c 		if (t != primary)
t                 772 net/sctp/associola.c 			sctp_assoc_rm_peer(asoc, t);
t                1426 net/sctp/associola.c 	struct sctp_transport *t;
t                1433 net/sctp/associola.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
t                1434 net/sctp/associola.c 		if (t->pmtu_pending && t->dst) {
t                1435 net/sctp/associola.c 			sctp_transport_update_pmtu(t,
t                1436 net/sctp/associola.c 						   atomic_read(&t->mtu_info));
t                1437 net/sctp/associola.c 			t->pmtu_pending = 0;
t                1439 net/sctp/associola.c 		if (!pmtu || (t->pathmtu < pmtu))
t                1440 net/sctp/associola.c 			pmtu = t->pathmtu;
t                 266 net/sctp/endpointola.c 	struct sctp_transport *t;
t                 277 net/sctp/endpointola.c 	t = sctp_epaddr_lookup_transport(ep, paddr);
t                 278 net/sctp/endpointola.c 	if (!t)
t                 281 net/sctp/endpointola.c 	*transport = t;
t                 282 net/sctp/endpointola.c 	asoc = t->asoc;
t                 293 net/sctp/input.c 	struct sctp_transport *t = chunk->transport;
t                 351 net/sctp/input.c 		sctp_transport_put(t);
t                 363 net/sctp/input.c 	struct sctp_transport *t = chunk->transport;
t                 374 net/sctp/input.c 			sctp_transport_hold(t);
t                 386 net/sctp/input.c 			   struct sctp_transport *t, __u32 pmtu)
t                 388 net/sctp/input.c 	if (!t || (t->pathmtu <= pmtu))
t                 392 net/sctp/input.c 		atomic_set(&t->mtu_info, pmtu);
t                 394 net/sctp/input.c 		t->pmtu_pending = 1;
t                 398 net/sctp/input.c 	if (!(t->param_flags & SPP_PMTUD_ENABLE))
t                 409 net/sctp/input.c 	if (!sctp_transport_update_pmtu(t, pmtu))
t                 416 net/sctp/input.c 	sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
t                 419 net/sctp/input.c void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
t                 424 net/sctp/input.c 	if (sock_owned_by_user(sk) || !t)
t                 426 net/sctp/input.c 	dst = sctp_transport_dst_check(t);
t                 444 net/sctp/input.c 			   struct sctp_transport *t)
t                 447 net/sctp/input.c 		if (timer_pending(&t->proto_unreach_timer))
t                 450 net/sctp/input.c 			if (!mod_timer(&t->proto_unreach_timer,
t                 460 net/sctp/input.c 		if (del_timer(&t->proto_unreach_timer))
t                 465 net/sctp/input.c 			   asoc->state, asoc->ep, asoc, t,
t                 550 net/sctp/input.c void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
t                 553 net/sctp/input.c 	sctp_transport_put(t);
t                 876 net/sctp/input.c 	struct sctp_transport *t = (struct sctp_transport *)ptr;
t                 880 net/sctp/input.c 	if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
t                 882 net/sctp/input.c 	if (!sctp_transport_hold(t))
t                 885 net/sctp/input.c 	if (!net_eq(t->asoc->base.net, x->net))
t                 887 net/sctp/input.c 	if (x->lport != htons(t->asoc->base.bind_addr.port))
t                 892 net/sctp/input.c 	sctp_transport_put(t);
t                 898 net/sctp/input.c 	const struct sctp_transport *t = data;
t                 900 net/sctp/input.c 	return sctp_hashfn(t->asoc->base.net,
t                 901 net/sctp/input.c 			   htons(t->asoc->base.bind_addr.port),
t                 902 net/sctp/input.c 			   &t->ipaddr, seed);
t                 930 net/sctp/input.c int sctp_hash_transport(struct sctp_transport *t)
t                 937 net/sctp/input.c 	if (t->asoc->temp)
t                 940 net/sctp/input.c 	arg.net   = sock_net(t->asoc->base.sk);
t                 941 net/sctp/input.c 	arg.paddr = &t->ipaddr;
t                 942 net/sctp/input.c 	arg.lport = htons(t->asoc->base.bind_addr.port);
t                 949 net/sctp/input.c 		if (transport->asoc->ep == t->asoc->ep) {
t                 956 net/sctp/input.c 				  &t->node, sctp_hash_params);
t                 963 net/sctp/input.c void sctp_unhash_transport(struct sctp_transport *t)
t                 965 net/sctp/input.c 	if (t->asoc->temp)
t                 968 net/sctp/input.c 	rhltable_remove(&sctp_transport_hashtable, &t->node,
t                 979 net/sctp/input.c 	struct sctp_transport *t;
t                 989 net/sctp/input.c 	rhl_for_each_entry_rcu(t, tmp, list, node) {
t                 990 net/sctp/input.c 		if (!sctp_transport_hold(t))
t                 993 net/sctp/input.c 		if (sctp_bind_addr_match(&t->asoc->base.bind_addr,
t                 994 net/sctp/input.c 					 laddr, sctp_sk(t->asoc->base.sk)))
t                 995 net/sctp/input.c 			return t;
t                 996 net/sctp/input.c 		sctp_transport_put(t);
t                1009 net/sctp/input.c 	struct sctp_transport *t;
t                1019 net/sctp/input.c 	rhl_for_each_entry_rcu(t, tmp, list, node)
t                1020 net/sctp/input.c 		if (ep == t->asoc->ep)
t                1021 net/sctp/input.c 			return t;
t                1033 net/sctp/input.c 	struct sctp_transport *t;
t                1036 net/sctp/input.c 	t = sctp_addrs_lookup_transport(net, local, peer);
t                1037 net/sctp/input.c 	if (!t)
t                1040 net/sctp/input.c 	asoc = t->asoc;
t                1041 net/sctp/input.c 	*pt = t;
t                 226 net/sctp/ipv6.c static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
t                 229 net/sctp/ipv6.c 	struct sctp_association *asoc = t->asoc;
t                 236 net/sctp/ipv6.c 	union sctp_addr *daddr = &t->ipaddr;
t                 250 net/sctp/ipv6.c 	if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK)
t                 251 net/sctp/ipv6.c 		fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK);
t                 281 net/sctp/ipv6.c 		t->dst = dst;
t                 307 net/sctp/ipv6.c 				t->dst = dst;
t                 347 net/sctp/ipv6.c 			t->dst = dst;
t                 362 net/sctp/ipv6.c 		t->dst = dst;
t                 372 net/sctp/ipv6.c 		t->dst_cookie = rt6_get_cookie(rt);
t                 377 net/sctp/ipv6.c 		t->dst = NULL;
t                 395 net/sctp/ipv6.c 			      struct sctp_transport *t,
t                 399 net/sctp/ipv6.c 	union sctp_addr *saddr = &t->saddr;
t                 401 net/sctp/ipv6.c 	pr_debug("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst);
t                 403 net/sctp/ipv6.c 	if (t->dst) {
t                1139 net/sctp/outqueue.c 	struct sctp_transport *t;
t                1143 net/sctp/outqueue.c 		t = list_entry(ltransport, struct sctp_transport, send_ready);
t                1144 net/sctp/outqueue.c 		packet = &t->packet;
t                1152 net/sctp/outqueue.c 		sctp_transport_burst_reset(t);
t                 407 net/sctp/protocol.c static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
t                 410 net/sctp/protocol.c 	struct sctp_association *asoc = t->asoc;
t                 417 net/sctp/protocol.c 	union sctp_addr *daddr = &t->ipaddr;
t                 421 net/sctp/protocol.c 	if (t->dscp & SCTP_DSCP_SET_MASK)
t                 422 net/sctp/protocol.c 		tos = t->dscp & SCTP_DSCP_VAL_MASK;
t                 444 net/sctp/protocol.c 		t->dst = dst;
t                 511 net/sctp/protocol.c 				t->dst = dst;
t                 521 net/sctp/protocol.c 		t->dst = dst;
t                 533 net/sctp/protocol.c 		t->dst = NULL;
t                 542 net/sctp/protocol.c 			      struct sctp_transport *t,
t                 545 net/sctp/protocol.c 	union sctp_addr *saddr = &t->saddr;
t                 546 net/sctp/protocol.c 	struct rtable *rt = (struct rtable *)t->dst;
t                 618 net/sctp/protocol.c static void sctp_addr_wq_timeout_handler(struct timer_list *t)
t                 620 net/sctp/protocol.c 	struct net *net = from_timer(net, t, sctp.addr_wq_timer);
t                2501 net/sctp/sm_make_chunk.c 	struct sctp_transport *t;
t                2638 net/sctp/sm_make_chunk.c 		t = sctp_assoc_lookup_paddr(asoc, &addr);
t                2639 net/sctp/sm_make_chunk.c 		if (!t)
t                2642 net/sctp/sm_make_chunk.c 		sctp_assoc_set_primary(asoc, t);
t                 231 net/sctp/sm_sideeffect.c void sctp_generate_t3_rtx_event(struct timer_list *t)
t                 234 net/sctp/sm_sideeffect.c 		from_timer(transport, t, T3_rtx_timer);
t                 308 net/sctp/sm_sideeffect.c static void sctp_generate_t1_cookie_event(struct timer_list *t)
t                 311 net/sctp/sm_sideeffect.c 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
t                 316 net/sctp/sm_sideeffect.c static void sctp_generate_t1_init_event(struct timer_list *t)
t                 319 net/sctp/sm_sideeffect.c 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
t                 324 net/sctp/sm_sideeffect.c static void sctp_generate_t2_shutdown_event(struct timer_list *t)
t                 327 net/sctp/sm_sideeffect.c 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
t                 332 net/sctp/sm_sideeffect.c static void sctp_generate_t4_rto_event(struct timer_list *t)
t                 335 net/sctp/sm_sideeffect.c 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
t                 340 net/sctp/sm_sideeffect.c static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t)
t                 343 net/sctp/sm_sideeffect.c 		from_timer(asoc, t,
t                 351 net/sctp/sm_sideeffect.c static void sctp_generate_autoclose_event(struct timer_list *t)
t                 354 net/sctp/sm_sideeffect.c 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
t                 362 net/sctp/sm_sideeffect.c void sctp_generate_heartbeat_event(struct timer_list *t)
t                 364 net/sctp/sm_sideeffect.c 	struct sctp_transport *transport = from_timer(transport, t, hb_timer);
t                 407 net/sctp/sm_sideeffect.c void sctp_generate_proto_unreach_event(struct timer_list *t)
t                 410 net/sctp/sm_sideeffect.c 		from_timer(transport, t, proto_unreach_timer);
t                 442 net/sctp/sm_sideeffect.c void sctp_generate_reconf_event(struct timer_list *t)
t                 445 net/sctp/sm_sideeffect.c 		from_timer(transport, t, reconf_timer);
t                 475 net/sctp/sm_sideeffect.c static void sctp_generate_sack_event(struct timer_list *t)
t                 478 net/sctp/sm_sideeffect.c 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
t                 679 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                 685 net/sctp/sm_sideeffect.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
t                 686 net/sctp/sm_sideeffect.c 		sctp_transport_reset_hb_timer(t);
t                 692 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                 696 net/sctp/sm_sideeffect.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                 698 net/sctp/sm_sideeffect.c 		if (del_timer(&t->hb_timer))
t                 699 net/sctp/sm_sideeffect.c 			sctp_transport_put(t);
t                 707 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                 709 net/sctp/sm_sideeffect.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                 711 net/sctp/sm_sideeffect.c 		if (del_timer(&t->T3_rtx_timer))
t                 712 net/sctp/sm_sideeffect.c 			sctp_transport_put(t);
t                 720 net/sctp/sm_sideeffect.c 				  struct sctp_transport *t,
t                 730 net/sctp/sm_sideeffect.c 	t->error_count = 0;
t                 740 net/sctp/sm_sideeffect.c 	if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
t                 741 net/sctp/sm_sideeffect.c 		t->asoc->overall_error_count = 0;
t                 746 net/sctp/sm_sideeffect.c 	t->hb_sent = 0;
t                 751 net/sctp/sm_sideeffect.c 	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
t                 753 net/sctp/sm_sideeffect.c 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
t                 757 net/sctp/sm_sideeffect.c 	if (t->state == SCTP_PF)
t                 758 net/sctp/sm_sideeffect.c 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
t                 764 net/sctp/sm_sideeffect.c 	if (t->dst)
t                 765 net/sctp/sm_sideeffect.c 		sctp_transport_dst_confirm(t);
t                 774 net/sctp/sm_sideeffect.c 	if (t->rto_pending == 0)
t                 775 net/sctp/sm_sideeffect.c 		t->rto_pending = 1;
t                 778 net/sctp/sm_sideeffect.c 	sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
t                 781 net/sctp/sm_sideeffect.c 	sctp_transport_reset_hb_timer(t);
t                 784 net/sctp/sm_sideeffect.c 		sctp_transport_immediate_rtx(t);
t                 815 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                 818 net/sctp/sm_sideeffect.c 		t = chunk->transport;
t                 820 net/sctp/sm_sideeffect.c 		t = sctp_assoc_choose_alter_transport(asoc,
t                 822 net/sctp/sm_sideeffect.c 		chunk->transport = t;
t                 824 net/sctp/sm_sideeffect.c 	asoc->shutdown_last_sent_to = t;
t                 825 net/sctp/sm_sideeffect.c 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
t                 942 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                 944 net/sctp/sm_sideeffect.c 	t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
t                 945 net/sctp/sm_sideeffect.c 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
t                 946 net/sctp/sm_sideeffect.c 	chunk->transport = t;
t                1005 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                1010 net/sctp/sm_sideeffect.c 		t = list_entry(pos, struct sctp_transport, transports);
t                1011 net/sctp/sm_sideeffect.c 		if (!sctp_cmp_addr_exact(&t->ipaddr,
t                1013 net/sctp/sm_sideeffect.c 			sctp_assoc_rm_peer(asoc, t);
t                1069 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                1071 net/sctp/sm_sideeffect.c 	t = asoc->init_last_sent_to;
t                1074 net/sctp/sm_sideeffect.c 	if (t->init_sent_count > (asoc->init_cycle + 1)) {
t                1267 net/sctp/sm_sideeffect.c 	struct sctp_transport *t;
t                1553 net/sctp/sm_sideeffect.c 			t = sctp_assoc_choose_alter_transport(asoc,
t                1555 net/sctp/sm_sideeffect.c 			asoc->init_last_sent_to = t;
t                1556 net/sctp/sm_sideeffect.c 			chunk->transport = t;
t                1557 net/sctp/sm_sideeffect.c 			t->init_sent_count++;
t                1559 net/sctp/sm_sideeffect.c 			sctp_assoc_set_primary(asoc, t);
t                1591 net/sctp/sm_sideeffect.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                1593 net/sctp/sm_sideeffect.c 				sctp_retransmit_mark(&asoc->outqueue, t,
t                1618 net/sctp/sm_sideeffect.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                1620 net/sctp/sm_sideeffect.c 				t->init_sent_count = 0;
t                1640 net/sctp/sm_sideeffect.c 			t = cmd->obj.transport;
t                1641 net/sctp/sm_sideeffect.c 			sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
t                1645 net/sctp/sm_sideeffect.c 			t = cmd->obj.transport;
t                1647 net/sctp/sm_sideeffect.c 						     t, 1);
t                1648 net/sctp/sm_sideeffect.c 			t->hb_sent = 1;
t                1652 net/sctp/sm_sideeffect.c 			t = cmd->obj.transport;
t                1653 net/sctp/sm_sideeffect.c 			sctp_cmd_transport_on(commands, asoc, t, chunk);
t                1661 net/sctp/sm_sideeffect.c 			t = cmd->obj.transport;
t                1662 net/sctp/sm_sideeffect.c 			sctp_transport_reset_hb_timer(t);
t                1698 net/sctp/sm_sideeffect.c 			t = cmd->obj.transport;
t                1699 net/sctp/sm_sideeffect.c 			t->rto_pending = 1;
t                1729 net/sctp/sm_sideeffect.c 			t = asoc->peer.retran_path;
t                1733 net/sctp/sm_sideeffect.c 			asoc->peer.retran_path = t;
t                 171 net/sctp/socket.c 	struct sctp_transport *t;
t                 173 net/sctp/socket.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
t                 174 net/sctp/socket.c 		list_for_each_entry(chunk, &t->transmitted, transmitted_list)
t                1133 net/sctp/socket.c 	struct sctp_transport *t;
t                1140 net/sctp/socket.c 	old = sctp_endpoint_lookup_assoc(ep, daddr, &t);
t                1148 net/sctp/socket.c 	t = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
t                1149 net/sctp/socket.c 	if (!t)
t                2581 net/sctp/socket.c 			struct sctp_transport *t;
t                2583 net/sctp/socket.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                2585 net/sctp/socket.c 				if (t->ipaddr.sa.sa_family != AF_INET6)
t                2587 net/sctp/socket.c 				t->flowlabel = params->spp_ipv6_flowlabel &
t                2589 net/sctp/socket.c 				t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
t                2606 net/sctp/socket.c 			struct sctp_transport *t;
t                2608 net/sctp/socket.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
t                2610 net/sctp/socket.c 				t->dscp = params->spp_dscp &
t                2612 net/sctp/socket.c 				t->dscp |= SCTP_DSCP_SET_MASK;
t                5313 net/sctp/socket.c 	struct sctp_transport *t;
t                5315 net/sctp/socket.c 	t = rhashtable_walk_next(iter);
t                5316 net/sctp/socket.c 	for (; t; t = rhashtable_walk_next(iter)) {
t                5317 net/sctp/socket.c 		if (IS_ERR(t)) {
t                5318 net/sctp/socket.c 			if (PTR_ERR(t) == -EAGAIN)
t                5323 net/sctp/socket.c 		if (!sctp_transport_hold(t))
t                5326 net/sctp/socket.c 		if (net_eq(sock_net(t->asoc->base.sk), net) &&
t                5327 net/sctp/socket.c 		    t->asoc->peer.primary_path == t)
t                5330 net/sctp/socket.c 		sctp_transport_put(t);
t                5333 net/sctp/socket.c 	return t;
t                5340 net/sctp/socket.c 	struct sctp_transport *t;
t                5345 net/sctp/socket.c 	while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
t                5348 net/sctp/socket.c 		sctp_transport_put(t);
t                5351 net/sctp/socket.c 	return t;
t                 570 net/sctp/stream.c 			struct sctp_transport *t;
t                 572 net/sctp/stream.c 			t = asoc->strreset_chunk->transport;
t                 573 net/sctp/stream.c 			if (del_timer(&t->reconf_timer))
t                 574 net/sctp/stream.c 				sctp_transport_put(t);
t                 819 net/sctp/stream.c 			struct sctp_transport *t;
t                 821 net/sctp/stream.c 			t = asoc->strreset_chunk->transport;
t                 822 net/sctp/stream.c 			if (del_timer(&t->reconf_timer))
t                 823 net/sctp/stream.c 				sctp_transport_put(t);
t                 915 net/sctp/stream.c 	struct sctp_transport *t;
t                1070 net/sctp/stream.c 		t = asoc->strreset_chunk->transport;
t                1071 net/sctp/stream.c 		if (del_timer(&t->reconf_timer))
t                1072 net/sctp/stream.c 			sctp_transport_put(t);
t                 246 net/sctp/transport.c bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
t                 248 net/sctp/transport.c 	struct dst_entry *dst = sctp_transport_dst_check(t);
t                 249 net/sctp/transport.c 	struct sock *sk = t->asoc->base.sk;
t                 265 net/sctp/transport.c 		pf->to_sk_daddr(&t->ipaddr, sk);
t                 269 net/sctp/transport.c 		dst = sctp_transport_dst_check(t);
t                 273 net/sctp/transport.c 		t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
t                 274 net/sctp/transport.c 		dst = t->dst;
t                 280 net/sctp/transport.c 		change = t->pathmtu != pmtu;
t                 282 net/sctp/transport.c 	t->pathmtu = pmtu;
t                 597 net/sctp/transport.c void sctp_transport_burst_limited(struct sctp_transport *t)
t                 599 net/sctp/transport.c 	struct sctp_association *asoc = t->asoc;
t                 600 net/sctp/transport.c 	u32 old_cwnd = t->cwnd;
t                 603 net/sctp/transport.c 	if (t->burst_limited || asoc->max_burst == 0)
t                 606 net/sctp/transport.c 	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
t                 608 net/sctp/transport.c 		t->cwnd = max_burst_bytes;
t                 609 net/sctp/transport.c 		t->burst_limited = old_cwnd;
t                 616 net/sctp/transport.c void sctp_transport_burst_reset(struct sctp_transport *t)
t                 618 net/sctp/transport.c 	if (t->burst_limited) {
t                 619 net/sctp/transport.c 		t->cwnd = t->burst_limited;
t                 620 net/sctp/transport.c 		t->burst_limited = 0;
t                 638 net/sctp/transport.c void sctp_transport_reset(struct sctp_transport *t)
t                 640 net/sctp/transport.c 	struct sctp_association *asoc = t->asoc;
t                 647 net/sctp/transport.c 	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
t                 648 net/sctp/transport.c 	t->burst_limited = 0;
t                 649 net/sctp/transport.c 	t->ssthresh = asoc->peer.i.a_rwnd;
t                 650 net/sctp/transport.c 	t->rto = asoc->rto_initial;
t                 651 net/sctp/transport.c 	sctp_max_rto(asoc, t);
t                 652 net/sctp/transport.c 	t->rtt = 0;
t                 653 net/sctp/transport.c 	t->srtt = 0;
t                 654 net/sctp/transport.c 	t->rttvar = 0;
t                 657 net/sctp/transport.c 	t->partial_bytes_acked = 0;
t                 658 net/sctp/transport.c 	t->flight_size = 0;
t                 659 net/sctp/transport.c 	t->error_count = 0;
t                 660 net/sctp/transport.c 	t->rto_pending = 0;
t                 661 net/sctp/transport.c 	t->hb_sent = 0;
t                 664 net/sctp/transport.c 	t->cacc.changeover_active = 0;
t                 665 net/sctp/transport.c 	t->cacc.cycling_changeover = 0;
t                 666 net/sctp/transport.c 	t->cacc.next_tsn_at_change = 0;
t                 667 net/sctp/transport.c 	t->cacc.cacc_saw_newack = 0;
t                 671 net/sctp/transport.c void sctp_transport_immediate_rtx(struct sctp_transport *t)
t                 674 net/sctp/transport.c 	if (del_timer(&t->T3_rtx_timer))
t                 675 net/sctp/transport.c 		sctp_transport_put(t);
t                 677 net/sctp/transport.c 	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
t                 678 net/sctp/transport.c 	if (!timer_pending(&t->T3_rtx_timer)) {
t                 679 net/sctp/transport.c 		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
t                 680 net/sctp/transport.c 			sctp_transport_hold(t);
t                 685 net/sctp/transport.c void sctp_transport_dst_release(struct sctp_transport *t)
t                 687 net/sctp/transport.c 	dst_release(t->dst);
t                 688 net/sctp/transport.c 	t->dst = NULL;
t                 689 net/sctp/transport.c 	t->dst_pending_confirm = 0;
t                 693 net/sctp/transport.c void sctp_transport_dst_confirm(struct sctp_transport *t)
t                 695 net/sctp/transport.c 	t->dst_pending_confirm = 1;
t                 237 net/smc/smc.h  	__be32 t;
t                 239 net/smc/smc.h  	t = cpu_to_be32(host);
t                 240 net/smc/smc.h  	memcpy(net, ((u8 *)&t) + 1, 3);
t                 246 net/smc/smc.h  	__be32 t = 0;
t                 248 net/smc/smc.h  	memcpy(((u8 *)&t) + 1, net, 3);
t                 249 net/smc/smc.h  	return be32_to_cpu(t);
t                  50 net/sunrpc/clnt.c #define dprint_status(t)					\
t                  51 net/sunrpc/clnt.c 	dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,		\
t                  52 net/sunrpc/clnt.c 			__func__, t->tk_status)
t                 142 net/sunrpc/sched.c 	struct rpc_task *t;
t                 144 net/sunrpc/sched.c 	list_for_each_entry(t, q, u.tk_wait.list) {
t                 145 net/sunrpc/sched.c 		if (t->tk_owner == task->tk_owner) {
t                 147 net/sunrpc/sched.c 					&t->u.tk_wait.links);
t                 165 net/sunrpc/sched.c 	struct rpc_task *t;
t                 172 net/sunrpc/sched.c 		t = list_first_entry(&task->u.tk_wait.links,
t                 176 net/sunrpc/sched.c 		q = t->u.tk_wait.list.next;
t                 177 net/sunrpc/sched.c 		list_add_tail(&t->u.tk_wait.list, q);
t                  32 net/sunrpc/svc_xprt.c static void svc_age_temp_xprts(struct timer_list *t);
t                 928 net/sunrpc/svc_xprt.c static void svc_age_temp_xprts(struct timer_list *t)
t                 930 net/sunrpc/svc_xprt.c 	struct svc_serv *serv = from_timer(serv, t, sv_temptimer);
t                1008 net/sunrpc/svcsock.c 	int t = 0;
t                1010 net/sunrpc/svcsock.c 	while (t < len) {
t                1014 net/sunrpc/svcsock.c 		t += PAGE_SIZE;
t                 100 net/sunrpc/xprt.c 	struct xprt_class *t;
t                 105 net/sunrpc/xprt.c 	list_for_each_entry(t, &xprt_list, list) {
t                 107 net/sunrpc/xprt.c 		if (t->ident == transport->ident)
t                 132 net/sunrpc/xprt.c 	struct xprt_class *t;
t                 137 net/sunrpc/xprt.c 	list_for_each_entry(t, &xprt_list, list) {
t                 138 net/sunrpc/xprt.c 		if (t == transport) {
t                 164 net/sunrpc/xprt.c 	struct xprt_class *t;
t                 169 net/sunrpc/xprt.c 	list_for_each_entry(t, &xprt_list, list) {
t                 170 net/sunrpc/xprt.c 		if (strcmp(t->name, transport_name) == 0) {
t                 766 net/sunrpc/xprt.c xprt_init_autodisconnect(struct timer_list *t)
t                 768 net/sunrpc/xprt.c 	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
t                1888 net/sunrpc/xprt.c 	struct xprt_class *t;
t                1891 net/sunrpc/xprt.c 	list_for_each_entry(t, &xprt_list, list) {
t                1892 net/sunrpc/xprt.c 		if (t->ident == args->ident) {
t                1902 net/sunrpc/xprt.c 	xprt = t->setup(args);
t                 287 net/tipc/discover.c static void tipc_disc_timeout(struct timer_list *t)
t                 289 net/tipc/discover.c 	struct tipc_discoverer *d = from_timer(d, t, timer);
t                 590 net/tipc/monitor.c static void mon_timeout(struct timer_list *t)
t                 592 net/tipc/monitor.c 	struct tipc_monitor *mon = from_timer(mon, t, timer);
t                 162 net/tipc/node.c static void tipc_node_timeout(struct timer_list *t);
t                 632 net/tipc/node.c static void tipc_node_timeout(struct timer_list *t)
t                 634 net/tipc/node.c 	struct tipc_node *n = from_timer(n, t, timer);
t                 129 net/tipc/socket.c static void tipc_sk_timeout(struct timer_list *t);
t                2675 net/tipc/socket.c static void tipc_sk_timeout(struct timer_list *t)
t                2677 net/tipc/socket.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 103 net/tipc/subscr.c static void tipc_sub_timeout(struct timer_list *t)
t                 105 net/tipc/subscr.c 	struct tipc_subscription *sub = from_timer(sub, t, timer);
t                 284 net/tipc/trace.h 	TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
t                 286 net/tipc/trace.h 	TP_ARGS(r, f, t, tq),
t                 300 net/tipc/trace.h 		__entry->to = t;
t                 312 net/tipc/trace.h 	TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
t                 313 net/tipc/trace.h 	TP_ARGS(r, f, t, tq)
t                 317 net/tipc/trace.h 	TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
t                 318 net/tipc/trace.h 	TP_ARGS(r, f, t, tq),
t                1925 net/vmw_vsock/af_vsock.c int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
t                1941 net/vmw_vsock/af_vsock.c 	transport = t;
t                  34 net/vmw_vsock/virtio_transport_common.c 	const struct vsock_transport *t = vsock_core_get_transport();
t                  36 net/vmw_vsock/virtio_transport_common.c 	return container_of(t, struct virtio_transport, transport);
t                 701 net/vmw_vsock/virtio_transport_common.c 	const struct virtio_transport *t;
t                 721 net/vmw_vsock/virtio_transport_common.c 	t = virtio_transport_get_ops();
t                 722 net/vmw_vsock/virtio_transport_common.c 	if (!t) {
t                 727 net/vmw_vsock/virtio_transport_common.c 	return t->send_pkt(reply);
t                  48 net/wireless/lib80211.c static void lib80211_crypt_deinit_handler(struct timer_list *t);
t                 120 net/wireless/lib80211.c static void lib80211_crypt_deinit_handler(struct timer_list *t)
t                 122 net/wireless/lib80211.c 	struct lib80211_crypt_info *info = from_timer(info, t,
t                 209 net/wireless/lib80211_crypt_tkip.c 	u16 t = Sbox[Hi8(v)];
t                 210 net/wireless/lib80211_crypt_tkip.c 	return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
t                 370 net/x25/af_x25.c static void x25_destroy_timer(struct timer_list *t)
t                 372 net/x25/af_x25.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                  47 net/x25/x25_link.c static void x25_t20timer_expiry(struct timer_list *t)
t                  49 net/x25/x25_link.c 	struct x25_neigh *nb = from_timer(nb, t, t20timer);
t                  24 net/x25/x25_timer.c static void x25_heartbeat_expiry(struct timer_list *t);
t                  25 net/x25/x25_timer.c static void x25_timer_expiry(struct timer_list *t);
t                  90 net/x25/x25_timer.c static void x25_heartbeat_expiry(struct timer_list *t)
t                  92 net/x25/x25_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
t                 157 net/x25/x25_timer.c static void x25_timer_expiry(struct timer_list *t)
t                 159 net/x25/x25_timer.c 	struct x25_sock *x25 = from_timer(x25, t, timer);
t                 163 net/xfrm/xfrm_policy.c static void xfrm_policy_queue_process(struct timer_list *t);
t                 310 net/xfrm/xfrm_policy.c static void xfrm_policy_timer(struct timer_list *t)
t                 312 net/xfrm/xfrm_policy.c 	struct xfrm_policy *xp = from_timer(xp, t, timer);
t                1103 net/xfrm/xfrm_policy.c 	struct xfrm_pol_inexact_bin *bin, *t;
t                1107 net/xfrm/xfrm_policy.c 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
t                2749 net/xfrm/xfrm_policy.c static void xfrm_policy_queue_process(struct timer_list *t)
t                2754 net/xfrm/xfrm_policy.c 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
t                4070 net/xfrm/xfrm_policy.c 	struct xfrm_pol_inexact_bin *b, *t;
t                4098 net/xfrm/xfrm_policy.c 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
t                4284 net/xfrm/xfrm_policy.c static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
t                4288 net/xfrm/xfrm_policy.c 	if (t->mode == m->mode && t->id.proto == m->proto &&
t                4289 net/xfrm/xfrm_policy.c 	    (m->reqid == 0 || t->reqid == m->reqid)) {
t                4290 net/xfrm/xfrm_policy.c 		switch (t->mode) {
t                4293 net/xfrm/xfrm_policy.c 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
t                4295 net/xfrm/xfrm_policy.c 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
t                 178 net/xfrm/xfrm_state.c int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
t                 609 net/xfrm/xfrm_state.c static void xfrm_replay_timer_handler(struct timer_list *t);
t                2102 net/xfrm/xfrm_state.c static void xfrm_replay_timer_handler(struct timer_list *t)
t                2104 net/xfrm/xfrm_state.c 	struct xfrm_state *x = from_timer(x, t, rtimer);
t                2159 net/xfrm/xfrm_state.c int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
t                2166 net/xfrm/xfrm_state.c 		acqret = km->acquire(x, t, pol);
t                2399 net/xfrm/xfrm_state.c 		struct xfrm_state *t = x->tunnel;
t                2401 net/xfrm/xfrm_state.c 		if (atomic_read(&t->tunnel_users) == 2)
t                2402 net/xfrm/xfrm_state.c 			xfrm_state_delete(t);
t                2403 net/xfrm/xfrm_state.c 		atomic_dec(&t->tunnel_users);
t                2404 net/xfrm/xfrm_state.c 		xfrm_state_put_sync(t);
t                1473 net/xfrm/xfrm_user.c 		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
t                1475 net/xfrm/xfrm_user.c 		memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
t                1476 net/xfrm/xfrm_user.c 		memcpy(&t->saddr, &ut->saddr,
t                1478 net/xfrm/xfrm_user.c 		t->reqid = ut->reqid;
t                1479 net/xfrm/xfrm_user.c 		t->mode = ut->mode;
t                1480 net/xfrm/xfrm_user.c 		t->share = ut->share;
t                1481 net/xfrm/xfrm_user.c 		t->optional = ut->optional;
t                1482 net/xfrm/xfrm_user.c 		t->aalgos = ut->aalgos;
t                1483 net/xfrm/xfrm_user.c 		t->ealgos = ut->ealgos;
t                1484 net/xfrm/xfrm_user.c 		t->calgos = ut->calgos;
t                1486 net/xfrm/xfrm_user.c 		t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
t                1487 net/xfrm/xfrm_user.c 		t->encap_family = ut->family;
t                2296 net/xfrm/xfrm_user.c 		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
t                2297 net/xfrm/xfrm_user.c 		memcpy(&x->id, &t->id, sizeof(x->id));
t                2298 net/xfrm/xfrm_user.c 		x->props.mode = t->mode;
t                2299 net/xfrm/xfrm_user.c 		x->props.reqid = t->reqid;
t                2301 net/xfrm/xfrm_user.c 		t->aalgos = ua->aalgos;
t                2302 net/xfrm/xfrm_user.c 		t->ealgos = ua->ealgos;
t                2303 net/xfrm/xfrm_user.c 		t->calgos = ua->calgos;
t                2304 net/xfrm/xfrm_user.c 		err = km_query(x, t, xp);
t                  24 samples/bpf/map_perf_test_user.c #define TEST_BIT(t) (1U << (t))
t                  73 samples/bpf/map_perf_test_user.c static int check_test_flags(enum test_type t)
t                  75 samples/bpf/map_perf_test_user.c 	return test_flags & TEST_BIT(t);
t                  35 samples/bpf/tracex2_user.c 	struct task t;
t                  71 samples/bpf/xdp_monitor_user.c 	struct timespec t;
t                  74 samples/bpf/xdp_monitor_user.c 	res = clock_gettime(CLOCK_MONOTONIC, &t);
t                  79 samples/bpf/xdp_monitor_user.c 	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
t                 300 samples/bpf/xdp_monitor_user.c 	double t = 0, pps = 0;
t                 317 samples/bpf/xdp_monitor_user.c 		t = calc_period_u64(rec, prev);
t                 323 samples/bpf/xdp_monitor_user.c 			pps = calc_pps_u64(r, p, t);
t                 329 samples/bpf/xdp_monitor_user.c 		pps = calc_pps_u64(&rec->total, &prev->total, t);
t                 342 samples/bpf/xdp_monitor_user.c 		t = calc_period_u64(rec, prev);
t                 348 samples/bpf/xdp_monitor_user.c 			pps = calc_pps_u64(r, p, t);
t                 353 samples/bpf/xdp_monitor_user.c 		pps = calc_pps_u64(&rec->total, &prev->total, t);
t                 369 samples/bpf/xdp_monitor_user.c 		t = calc_period(rec, prev);
t                 374 samples/bpf/xdp_monitor_user.c 			pps  = calc_pps(r, p, t);
t                 375 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(r, p, t);
t                 376 samples/bpf/xdp_monitor_user.c 			info = calc_info(r, p, t);
t                 385 samples/bpf/xdp_monitor_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 387 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(&rec->total, &prev->total, t);
t                 388 samples/bpf/xdp_monitor_user.c 			info = calc_info(&rec->total, &prev->total, t);
t                 408 samples/bpf/xdp_monitor_user.c 		t = calc_period(rec, prev);
t                 413 samples/bpf/xdp_monitor_user.c 			pps  = calc_pps(r, p, t);
t                 414 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(r, p, t);
t                 415 samples/bpf/xdp_monitor_user.c 			info = calc_info(r, p, t);
t                 422 samples/bpf/xdp_monitor_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 423 samples/bpf/xdp_monitor_user.c 		drop = calc_drop(&rec->total, &prev->total, t);
t                 424 samples/bpf/xdp_monitor_user.c 		info = calc_info(&rec->total, &prev->total, t);
t                 441 samples/bpf/xdp_monitor_user.c 		t = calc_period(rec, prev);
t                 446 samples/bpf/xdp_monitor_user.c 			pps  = calc_pps(r, p, t);
t                 447 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(r, p, t);
t                 448 samples/bpf/xdp_monitor_user.c 			info = calc_info(r, p, t);
t                 449 samples/bpf/xdp_monitor_user.c 			err  = calc_err(r, p, t);
t                 460 samples/bpf/xdp_monitor_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 461 samples/bpf/xdp_monitor_user.c 		drop = calc_drop(&rec->total, &prev->total, t);
t                 462 samples/bpf/xdp_monitor_user.c 		info = calc_info(&rec->total, &prev->total, t);
t                 463 samples/bpf/xdp_monitor_user.c 		err  = calc_err(&rec->total, &prev->total, t);
t                 146 samples/bpf/xdp_redirect_cpu_user.c 	struct timespec t;
t                 149 samples/bpf/xdp_redirect_cpu_user.c 	res = clock_gettime(CLOCK_MONOTONIC, &t);
t                 154 samples/bpf/xdp_redirect_cpu_user.c 	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
t                 316 samples/bpf/xdp_redirect_cpu_user.c 	double t;
t                 332 samples/bpf/xdp_redirect_cpu_user.c 		t = calc_period(rec, prev);
t                 337 samples/bpf/xdp_redirect_cpu_user.c 			pps = calc_pps(r, p, t);
t                 338 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
t                 339 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(r, p, t);
t                 346 samples/bpf/xdp_redirect_cpu_user.c 		pps  = calc_pps(&rec->total, &prev->total, t);
t                 347 samples/bpf/xdp_redirect_cpu_user.c 		drop = calc_drop_pps(&rec->total, &prev->total, t);
t                 348 samples/bpf/xdp_redirect_cpu_user.c 		err  = calc_errs_pps(&rec->total, &prev->total, t);
t                 360 samples/bpf/xdp_redirect_cpu_user.c 		t = calc_period(rec, prev);
t                 365 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
t                 366 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
t                 367 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(r, p, t);
t                 376 samples/bpf/xdp_redirect_cpu_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 378 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(&rec->total, &prev->total, t);
t                 379 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(&rec->total, &prev->total, t);
t                 397 samples/bpf/xdp_redirect_cpu_user.c 		t = calc_period(rec, prev);
t                 402 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
t                 403 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
t                 404 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(r, p, t);
t                 411 samples/bpf/xdp_redirect_cpu_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 412 samples/bpf/xdp_redirect_cpu_user.c 		drop = calc_drop_pps(&rec->total, &prev->total, t);
t                 413 samples/bpf/xdp_redirect_cpu_user.c 		err  = calc_errs_pps(&rec->total, &prev->total, t);
t                 426 samples/bpf/xdp_redirect_cpu_user.c 		t = calc_period(rec, prev);
t                 431 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
t                 432 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
t                 436 samples/bpf/xdp_redirect_cpu_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 437 samples/bpf/xdp_redirect_cpu_user.c 		drop = calc_drop_pps(&rec->total, &prev->total, t);
t                 448 samples/bpf/xdp_redirect_cpu_user.c 		t = calc_period(rec, prev);
t                 453 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
t                 454 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
t                 458 samples/bpf/xdp_redirect_cpu_user.c 		pps = calc_pps(&rec->total, &prev->total, t);
t                 459 samples/bpf/xdp_redirect_cpu_user.c 		drop = calc_drop_pps(&rec->total, &prev->total, t);
t                 171 samples/bpf/xdp_rxq_info_user.c 	struct timespec t;
t                 174 samples/bpf/xdp_rxq_info_user.c 	res = clock_gettime(CLOCK_MONOTONIC, &t);
t                 179 samples/bpf/xdp_rxq_info_user.c 	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
t                 349 samples/bpf/xdp_rxq_info_user.c 	double t;
t                 368 samples/bpf/xdp_rxq_info_user.c 		t = calc_period(rec, prev);
t                 373 samples/bpf/xdp_rxq_info_user.c 			pps = calc_pps     (r, p, t);
t                 374 samples/bpf/xdp_rxq_info_user.c 			err = calc_errs_pps(r, p, t);
t                 381 samples/bpf/xdp_rxq_info_user.c 		pps  = calc_pps     (&rec->total, &prev->total, t);
t                 382 samples/bpf/xdp_rxq_info_user.c 		err  = calc_errs_pps(&rec->total, &prev->total, t);
t                 402 samples/bpf/xdp_rxq_info_user.c 		t = calc_period(rec, prev);
t                 407 samples/bpf/xdp_rxq_info_user.c 			pps = calc_pps     (r, p, t);
t                 408 samples/bpf/xdp_rxq_info_user.c 			err = calc_errs_pps(r, p, t);
t                 419 samples/bpf/xdp_rxq_info_user.c 		pps  = calc_pps     (&rec->total, &prev->total, t);
t                 420 samples/bpf/xdp_rxq_info_user.c 		err  = calc_errs_pps(&rec->total, &prev->total, t);
t                1763 samples/mic/mpssd/mpssd.c 	time_t t;
t                1772 samples/mic/mpssd/mpssd.c 	time(&t);
t                1773 samples/mic/mpssd/mpssd.c 	ts1 = ctime_r(&t, ts);
t                 749 scripts/asn1_compiler.c 	unsigned nr, t, n;
t                 774 scripts/asn1_compiler.c 	t = 0;
t                 775 scripts/asn1_compiler.c 	types[t].flags |= TYPE_BEGIN;
t                 779 scripts/asn1_compiler.c 			types[t].name = &token_list[n];
t                 780 scripts/asn1_compiler.c 			type_index[t] = &types[t];
t                 781 scripts/asn1_compiler.c 			t++;
t                 784 scripts/asn1_compiler.c 	types[t].name = &token_list[n + 1];
t                 785 scripts/asn1_compiler.c 	types[t].flags |= TYPE_STOP_MARKER;
t                1213 scripts/asn1_compiler.c 	const struct type *t = e->type_def;
t                1215 scripts/asn1_compiler.c 	const char *tname = t && t->name ? t->name->content : ".";
t                  93 scripts/dtc/dtc.h #define for_each_marker_of_type(m, t) \
t                  95 scripts/dtc/dtc.h 		if ((m)->type == (t))
t                  59 scripts/dtc/fdtdump.c 	const char *p, *s, *t;
t                 136 scripts/dtc/fdtdump.c 		t = p;
t                 141 scripts/dtc/fdtdump.c 		print_data(t, sz);
t                  14 scripts/dtc/include-prefixes/dt-bindings/usb/pd.h #define PDO_TYPE(t)	((t) << PDO_TYPE_SHIFT)
t                  67 scripts/dtc/include-prefixes/dt-bindings/usb/pd.h #define PDO_APDO_TYPE(t)	((t) << PDO_APDO_TYPE_SHIFT)
t                 414 scripts/gcc-plugins/gcc-common.h static inline bool tree_fits_shwi_p(const_tree t)
t                 416 scripts/gcc-plugins/gcc-common.h 	if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST)
t                 419 scripts/gcc-plugins/gcc-common.h 	if (TREE_INT_CST_HIGH(t) == 0 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) >= 0)
t                 422 scripts/gcc-plugins/gcc-common.h 	if (TREE_INT_CST_HIGH(t) == -1 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) < 0 && !TYPE_UNSIGNED(TREE_TYPE(t)))
t                 428 scripts/gcc-plugins/gcc-common.h static inline bool tree_fits_uhwi_p(const_tree t)
t                 430 scripts/gcc-plugins/gcc-common.h 	if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST)
t                 433 scripts/gcc-plugins/gcc-common.h 	return TREE_INT_CST_HIGH(t) == 0;
t                 436 scripts/gcc-plugins/gcc-common.h static inline HOST_WIDE_INT tree_to_shwi(const_tree t)
t                 438 scripts/gcc-plugins/gcc-common.h 	gcc_assert(tree_fits_shwi_p(t));
t                 439 scripts/gcc-plugins/gcc-common.h 	return TREE_INT_CST_LOW(t);
t                 442 scripts/gcc-plugins/gcc-common.h static inline unsigned HOST_WIDE_INT tree_to_uhwi(const_tree t)
t                 444 scripts/gcc-plugins/gcc-common.h 	gcc_assert(tree_fits_uhwi_p(t));
t                 445 scripts/gcc-plugins/gcc-common.h 	return TREE_INT_CST_LOW(t);
t                 956 scripts/gcc-plugins/gcc-common.h static inline void debug_tree(const_tree t)
t                 958 scripts/gcc-plugins/gcc-common.h 	debug_tree(CONST_CAST_TREE(t));
t                 966 scripts/gcc-plugins/gcc-common.h #define debug_tree(t) debug_tree(CONST_CAST_TREE(t))
t                 139 scripts/genksyms/genksyms.c static enum symbol_type map_to_ns(enum symbol_type t)
t                 141 scripts/genksyms/genksyms.c 	switch (t) {
t                 151 scripts/genksyms/genksyms.c 	return t;
t                 195 scripts/recordmcount.h 	uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
t                 198 scripts/recordmcount.h 	shstr->sh_size = _w(t);
t                 200 scripts/recordmcount.h 	t += sb.st_size;
t                 201 scripts/recordmcount.h 	t += (_align & -t);  /* word-byte align */
t                 202 scripts/recordmcount.h 	new_e_shoff = t;
t                 213 scripts/recordmcount.h 	if (ulseek(t, SEEK_SET) < 0)
t                 215 scripts/recordmcount.h 	t += sizeof(Elf_Shdr) * old_shnum;
t                 221 scripts/recordmcount.h 	t += 2*sizeof(mcsec);
t                 227 scripts/recordmcount.h 	mcsec.sh_offset = _w(t);
t                 242 scripts/recordmcount.h 	mcsec.sh_offset = _w((void *)mlocp - (void *)mloc0 + t);
t                 333 scripts/recordmcount.h 	unsigned t;
t                 337 scripts/recordmcount.h 	for (t = nrel; t; --t) {
t                 380 scripts/recordmcount.h 	unsigned t;
t                 385 scripts/recordmcount.h 	for (t = nrel; t; --t) {
t                 443 scripts/recordmcount.h 	unsigned t;
t                 445 scripts/recordmcount.h 	for (symp = sym0, t = nsym; t; --t, ++symp) {
t                1190 scripts/unifdef.c strlcmp(const char *s, const char *t, size_t n)
t                1192 scripts/unifdef.c 	while (n-- && *t != '\0')
t                1193 scripts/unifdef.c 		if (*s != *t)
t                1194 scripts/unifdef.c 			return ((unsigned char)*s - (unsigned char)*t);
t                1196 scripts/unifdef.c 			++s, ++t;
t                  77 security/apparmor/apparmorfs.c 	char *t = target;
t                  85 security/apparmor/apparmorfs.c 				*(t)++ = '.';
t                  87 security/apparmor/apparmorfs.c 				*(t)++ = '_';
t                  89 security/apparmor/apparmorfs.c 				*(t)++ = *name;
t                  92 security/apparmor/apparmorfs.c 		*t = 0;
t                 104 security/apparmor/apparmorfs.c 	return t - target;
t                 822 security/apparmor/apparmorfs.c 	struct multi_transaction *t;
t                 824 security/apparmor/apparmorfs.c 	t = container_of(kref, struct multi_transaction, count);
t                 825 security/apparmor/apparmorfs.c 	free_page((unsigned long) t);
t                 829 security/apparmor/apparmorfs.c get_multi_transaction(struct multi_transaction *t)
t                 831 security/apparmor/apparmorfs.c 	if  (t)
t                 832 security/apparmor/apparmorfs.c 		kref_get(&(t->count));
t                 834 security/apparmor/apparmorfs.c 	return t;
t                 837 security/apparmor/apparmorfs.c static void put_multi_transaction(struct multi_transaction *t)
t                 839 security/apparmor/apparmorfs.c 	if (t)
t                 840 security/apparmor/apparmorfs.c 		kref_put(&(t->count), multi_transaction_kref);
t                 863 security/apparmor/apparmorfs.c 	struct multi_transaction *t;
t                 868 security/apparmor/apparmorfs.c 	t = (struct multi_transaction *)get_zeroed_page(GFP_KERNEL);
t                 869 security/apparmor/apparmorfs.c 	if (!t)
t                 871 security/apparmor/apparmorfs.c 	kref_init(&t->count);
t                 872 security/apparmor/apparmorfs.c 	if (copy_from_user(t->data, buf, size))
t                 875 security/apparmor/apparmorfs.c 	return t;
t                 881 security/apparmor/apparmorfs.c 	struct multi_transaction *t;
t                 885 security/apparmor/apparmorfs.c 	t = get_multi_transaction(file->private_data);
t                 887 security/apparmor/apparmorfs.c 	if (!t)
t                 890 security/apparmor/apparmorfs.c 	ret = simple_read_from_buffer(buf, size, pos, t->data, t->size);
t                 891 security/apparmor/apparmorfs.c 	put_multi_transaction(t);
t                 936 security/apparmor/apparmorfs.c 	struct multi_transaction *t;
t                 942 security/apparmor/apparmorfs.c 	t = multi_transaction_new(file, ubuf, count);
t                 943 security/apparmor/apparmorfs.c 	if (IS_ERR(t))
t                 944 security/apparmor/apparmorfs.c 		return PTR_ERR(t);
t                 947 security/apparmor/apparmorfs.c 	    !memcmp(t->data, QUERY_CMD_PROFILE, QUERY_CMD_PROFILE_LEN)) {
t                 948 security/apparmor/apparmorfs.c 		len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
t                 949 security/apparmor/apparmorfs.c 				  t->data + QUERY_CMD_PROFILE_LEN,
t                 952 security/apparmor/apparmorfs.c 		   !memcmp(t->data, QUERY_CMD_LABEL, QUERY_CMD_LABEL_LEN)) {
t                 953 security/apparmor/apparmorfs.c 		len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
t                 954 security/apparmor/apparmorfs.c 				  t->data + QUERY_CMD_LABEL_LEN,
t                 957 security/apparmor/apparmorfs.c 		   !memcmp(t->data, QUERY_CMD_LABELALL,
t                 959 security/apparmor/apparmorfs.c 		len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
t                 960 security/apparmor/apparmorfs.c 				  t->data + QUERY_CMD_LABELALL_LEN,
t                 963 security/apparmor/apparmorfs.c 		   !memcmp(t->data, QUERY_CMD_DATA, QUERY_CMD_DATA_LEN)) {
t                 964 security/apparmor/apparmorfs.c 		len = query_data(t->data, MULTI_TRANSACTION_LIMIT,
t                 965 security/apparmor/apparmorfs.c 				 t->data + QUERY_CMD_DATA_LEN,
t                 971 security/apparmor/apparmorfs.c 		put_multi_transaction(t);
t                 975 security/apparmor/apparmorfs.c 	multi_transaction_set(file, t, len);
t                  39 security/keys/request_key.c 	struct task_struct *t = current;
t                  41 security/keys/request_key.c 	key_put(t->cached_requested_key);
t                  42 security/keys/request_key.c 	t->cached_requested_key = key_get(key);
t                  43 security/keys/request_key.c 	set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
t                1159 security/selinux/ss/policydb.c static void type_set_init(struct type_set *t)
t                1161 security/selinux/ss/policydb.c 	ebitmap_init(&t->types);
t                1162 security/selinux/ss/policydb.c 	ebitmap_init(&t->negset);
t                1165 security/selinux/ss/policydb.c static int type_set_read(struct type_set *t, void *fp)
t                1170 security/selinux/ss/policydb.c 	if (ebitmap_read(&t->types, fp))
t                1172 security/selinux/ss/policydb.c 	if (ebitmap_read(&t->negset, fp))
t                1178 security/selinux/ss/policydb.c 	t->flags = le32_to_cpu(buf[0]);
t                2788 security/selinux/ss/policydb.c static int type_set_write(struct type_set *t, void *fp)
t                2793 security/selinux/ss/policydb.c 	if (ebitmap_write(&t->types, fp))
t                2795 security/selinux/ss/policydb.c 	if (ebitmap_write(&t->negset, fp))
t                2798 security/selinux/ss/policydb.c 	buf[0] = cpu_to_le32(t->flags);
t                 725 security/selinux/ss/services.c 	char *o = NULL, *n = NULL, *t = NULL;
t                 732 security/selinux/ss/services.c 	if (context_struct_to_string(p, tcontext, &t, &tlen))
t                 737 security/selinux/ss/services.c 		  o, n, t, sym_name(p, SYM_CLASSES, tclass-1));
t                 741 security/selinux/ss/services.c 	kfree(t);
t                1583 security/selinux/ss/services.c 	char *s = NULL, *t = NULL, *n = NULL;
t                1589 security/selinux/ss/services.c 	if (context_struct_to_string(policydb, tcontext, &t, &tlen))
t                1599 security/selinux/ss/services.c 			 s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
t                1603 security/selinux/ss/services.c 	kfree(t);
t                 396 security/smack/smack.h 						const struct task_struct *t)
t                 403 security/smack/smack.h 	cred = __task_cred(t);
t                 463 security/smack/smack.h 					 struct task_struct *t)
t                 465 security/smack/smack.h 	a->a.u.tsk = t;
t                 495 security/smack/smack.h 					 struct task_struct *t)
t                  33 sound/core/hrtimer.c 	struct snd_timer *t = stime->timer;
t                  38 sound/core/hrtimer.c 	spin_lock(&t->lock);
t                  39 sound/core/hrtimer.c 	if (!t->running)
t                  42 sound/core/hrtimer.c 	ticks = t->sticks;
t                  43 sound/core/hrtimer.c 	spin_unlock(&t->lock);
t                  52 sound/core/hrtimer.c 	spin_lock(&t->lock);
t                  53 sound/core/hrtimer.c 	if (t->running) {
t                  54 sound/core/hrtimer.c 		hrtimer_add_expires_ns(hrt, t->sticks * resolution);
t                  60 sound/core/hrtimer.c 	spin_unlock(&t->lock);
t                  64 sound/core/hrtimer.c static int snd_hrtimer_open(struct snd_timer *t)
t                  72 sound/core/hrtimer.c 	stime->timer = t;
t                  74 sound/core/hrtimer.c 	t->private_data = stime;
t                  78 sound/core/hrtimer.c static int snd_hrtimer_close(struct snd_timer *t)
t                  80 sound/core/hrtimer.c 	struct snd_hrtimer *stime = t->private_data;
t                  83 sound/core/hrtimer.c 		spin_lock_irq(&t->lock);
t                  84 sound/core/hrtimer.c 		t->running = 0; /* just to be sure */
t                  86 sound/core/hrtimer.c 		spin_unlock_irq(&t->lock);
t                  90 sound/core/hrtimer.c 		t->private_data = NULL;
t                  95 sound/core/hrtimer.c static int snd_hrtimer_start(struct snd_timer *t)
t                  97 sound/core/hrtimer.c 	struct snd_hrtimer *stime = t->private_data;
t                 101 sound/core/hrtimer.c 	hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
t                 106 sound/core/hrtimer.c static int snd_hrtimer_stop(struct snd_timer *t)
t                 108 sound/core/hrtimer.c 	struct snd_hrtimer *stime = t->private_data;
t                 122 sound/core/oss/mulaw.c 	int t;
t                 131 sound/core/oss/mulaw.c 	t = ((u_val & QUANT_MASK) << 3) + BIAS;
t                 132 sound/core/oss/mulaw.c 	t <<= ((unsigned)u_val & SEG_MASK) >> SEG_SHIFT;
t                 134 sound/core/oss/mulaw.c 	return ((u_val & SIGN_BIT) ? (BIAS - t) : (t - BIAS));
t                 108 sound/core/oss/pcm_oss.c 	struct snd_interval t;
t                 109 sound/core/oss/pcm_oss.c 	t.empty = 0;
t                 110 sound/core/oss/pcm_oss.c 	t.min = t.max = val;
t                 111 sound/core/oss/pcm_oss.c 	t.openmin = t.openmax = 0;
t                 112 sound/core/oss/pcm_oss.c 	t.integer = 1;
t                 113 sound/core/oss/pcm_oss.c 	return snd_interval_refine(i, &t);
t                 471 sound/core/oss/pcm_oss.c 			struct snd_interval t;
t                 472 sound/core/oss/pcm_oss.c 			t.openmin = 1;
t                 473 sound/core/oss/pcm_oss.c 			t.openmax = 1;
t                 474 sound/core/oss/pcm_oss.c 			t.empty = 0;
t                 475 sound/core/oss/pcm_oss.c 			t.integer = 0;
t                 477 sound/core/oss/pcm_oss.c 				t.min = val - 1;
t                 478 sound/core/oss/pcm_oss.c 				t.max = val;
t                 480 sound/core/oss/pcm_oss.c 				t.min = val;
t                 481 sound/core/oss/pcm_oss.c 				t.max = val+1;
t                 483 sound/core/oss/pcm_oss.c 			changed = snd_interval_refine(i, &t);
t                  58 sound/core/pcm_drm_eld.c 	struct snd_interval t = { .min = 1, .max = 2, .integer = 1, };
t                  74 sound/core/pcm_drm_eld.c 				t.max = max(t.max, sad_max_channels(sad));
t                  77 sound/core/pcm_drm_eld.c 	return snd_interval_refine(c, &t);
t                 782 sound/core/pcm_lib.c 	struct snd_interval t;
t                 820 sound/core/pcm_lib.c 	t.min = div_down(best_num, best_den);
t                 821 sound/core/pcm_lib.c 	t.openmin = !!(best_num % best_den);
t                 861 sound/core/pcm_lib.c 	t.max = div_up(best_num, best_den);
t                 862 sound/core/pcm_lib.c 	t.openmax = !!(best_num % best_den);
t                 863 sound/core/pcm_lib.c 	t.integer = 0;
t                 864 sound/core/pcm_lib.c 	err = snd_interval_refine(i, &t);
t                 900 sound/core/pcm_lib.c 	struct snd_interval t;
t                 932 sound/core/pcm_lib.c 	t.min = div_down(best_num, best_den);
t                 933 sound/core/pcm_lib.c 	t.openmin = !!(best_num % best_den);
t                 964 sound/core/pcm_lib.c 	t.max = div_up(best_num, best_den);
t                 965 sound/core/pcm_lib.c 	t.openmax = !!(best_num % best_den);
t                 966 sound/core/pcm_lib.c 	t.integer = 0;
t                 967 sound/core/pcm_lib.c 	err = snd_interval_refine(i, &t);
t                1232 sound/core/pcm_lib.c 	struct snd_interval t;
t                1233 sound/core/pcm_lib.c 	t.min = min;
t                1234 sound/core/pcm_lib.c 	t.max = max;
t                1235 sound/core/pcm_lib.c 	t.openmin = t.openmax = 0;
t                1236 sound/core/pcm_lib.c 	t.integer = 0;
t                1237 sound/core/pcm_lib.c 	return snd_interval_refine(constrs_interval(constrs, var), &t);
t                1839 sound/core/pcm_lib.c 				long t = runtime->period_size * 2 /
t                1841 sound/core/pcm_lib.c 				wait_time = max(t, wait_time);
t                1898 sound/core/pcm_native.c 				long t = runtime->period_size * 2 / runtime->rate;
t                1899 sound/core/pcm_native.c 				tout = max(t, tout);
t                2105 sound/core/pcm_native.c 	struct snd_interval t;
t                2107 sound/core/pcm_native.c 		     hw_param_interval_c(params, rule->deps[1]), &t);
t                2108 sound/core/pcm_native.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                2114 sound/core/pcm_native.c 	struct snd_interval t;
t                2116 sound/core/pcm_native.c 		     hw_param_interval_c(params, rule->deps[1]), &t);
t                2117 sound/core/pcm_native.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                2123 sound/core/pcm_native.c 	struct snd_interval t;
t                2126 sound/core/pcm_native.c 			 (unsigned long) rule->private, &t);
t                2127 sound/core/pcm_native.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                2133 sound/core/pcm_native.c 	struct snd_interval t;
t                2136 sound/core/pcm_native.c 			 hw_param_interval_c(params, rule->deps[1]), &t);
t                2137 sound/core/pcm_native.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                2165 sound/core/pcm_native.c 	struct snd_interval t;
t                2167 sound/core/pcm_native.c 	t.min = UINT_MAX;
t                2168 sound/core/pcm_native.c 	t.max = 0;
t                2169 sound/core/pcm_native.c 	t.openmin = 0;
t                2170 sound/core/pcm_native.c 	t.openmax = 0;
t                2178 sound/core/pcm_native.c 		if (t.min > (unsigned)bits)
t                2179 sound/core/pcm_native.c 			t.min = bits;
t                2180 sound/core/pcm_native.c 		if (t.max < (unsigned)bits)
t                2181 sound/core/pcm_native.c 			t.max = bits;
t                2183 sound/core/pcm_native.c 	t.integer = 1;
t                2184 sound/core/pcm_native.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                2213 sound/core/pcm_native.c 	struct snd_interval t;
t                2215 sound/core/pcm_native.c 	t.min = 0;
t                2216 sound/core/pcm_native.c 	t.max = substream->buffer_bytes_max;
t                2217 sound/core/pcm_native.c 	t.openmin = 0;
t                2218 sound/core/pcm_native.c 	t.openmax = 0;
t                2219 sound/core/pcm_native.c 	t.integer = 1;
t                2220 sound/core/pcm_native.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                 225 sound/core/seq/oss/seq_oss_event.c 	switch (q->t.cmd) {
t                 233 sound/core/seq/oss/seq_oss_event.c 			tmp.echo = (q->t.time << 8) | SEQ_ECHO;
t                 249 sound/core/seq/oss/seq_oss_event.c 			return snd_seq_oss_timer_tempo(dp->timer, q->t.time);
t                 437 sound/core/seq/oss/seq_oss_event.c 		snd_seq_oss_writeq_wakeup(dp->writeq, rec->t.time);
t                  84 sound/core/seq/oss/seq_oss_event.h 	struct evrec_timer t;
t                 227 sound/core/seq/oss/seq_oss_readq.c 			rec.t.code = EV_TIMING;
t                 228 sound/core/seq/oss/seq_oss_readq.c 			rec.t.cmd = TMR_WAIT_ABS;
t                 229 sound/core/seq/oss/seq_oss_readq.c 			rec.t.time = curt;
t                  75 sound/core/seq/oss/seq_oss_timer.c 	abstime_t parm = ev->t.time;
t                  77 sound/core/seq/oss/seq_oss_timer.c 	if (ev->t.code == EV_TIMING) {
t                  78 sound/core/seq/oss/seq_oss_timer.c 		switch (ev->t.cmd) {
t                 103 sound/core/seq/oss/seq_oss_writeq.c 		rec->t.code = SEQ_SYNCTIMER;
t                 104 sound/core/seq/oss/seq_oss_writeq.c 		rec->t.time = time;
t                  60 sound/core/seq/seq_timer.c 	struct snd_seq_timer *t = *tmr;
t                  63 sound/core/seq/seq_timer.c 	if (t == NULL) {
t                  67 sound/core/seq/seq_timer.c 	t->running = 0;
t                  70 sound/core/seq/seq_timer.c 	snd_seq_timer_stop(t);
t                  71 sound/core/seq/seq_timer.c 	snd_seq_timer_reset(t);
t                  73 sound/core/seq/seq_timer.c 	kfree(t);
t                 260 sound/core/seq/seq_timer.c 	struct snd_timer_instance *t;
t                 275 sound/core/seq/seq_timer.c 	err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue);
t                 285 sound/core/seq/seq_timer.c 			err = snd_timer_open(&t, str, &tid, q->queue);
t                 292 sound/core/seq/seq_timer.c 	t->callback = snd_seq_timer_interrupt;
t                 293 sound/core/seq/seq_timer.c 	t->callback_data = q;
t                 294 sound/core/seq/seq_timer.c 	t->flags |= SNDRV_TIMER_IFLG_AUTO;
t                 296 sound/core/seq/seq_timer.c 	tmr->timeri = t;
t                 304 sound/core/seq/seq_timer.c 	struct snd_timer_instance *t;
t                 310 sound/core/seq/seq_timer.c 	t = tmr->timeri;
t                 313 sound/core/seq/seq_timer.c 	if (t)
t                 314 sound/core/seq/seq_timer.c 		snd_timer_close(t);
t                 342 sound/core/seq/seq_timer.c 	struct snd_timer *t;
t                 345 sound/core/seq/seq_timer.c 	t = tmr->timeri->timer;
t                 346 sound/core/seq/seq_timer.c 	if (!t)
t                 358 sound/core/seq/seq_timer.c 	if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
t                 295 sound/core/timer.c 		struct snd_timer_instance *t =
t                 298 sound/core/timer.c 		if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
t                1118 sound/core/timer.c static void snd_timer_s_function(struct timer_list *t)
t                1120 sound/core/timer.c 	struct snd_timer_system_private *priv = from_timer(priv, t,
t                1611 sound/core/timer.c 	struct snd_timer *t;
t                1623 sound/core/timer.c 	t = snd_timer_find(&tid);
t                1624 sound/core/timer.c 	if (t != NULL) {
t                1625 sound/core/timer.c 		ginfo->card = t->card ? t->card->number : -1;
t                1626 sound/core/timer.c 		if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
t                1628 sound/core/timer.c 		strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
t                1629 sound/core/timer.c 		strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
t                1630 sound/core/timer.c 		ginfo->resolution = t->hw.resolution;
t                1631 sound/core/timer.c 		if (t->hw.resolution_min > 0) {
t                1632 sound/core/timer.c 			ginfo->resolution_min = t->hw.resolution_min;
t                1633 sound/core/timer.c 			ginfo->resolution_max = t->hw.resolution_max;
t                1635 sound/core/timer.c 		list_for_each(p, &t->open_list_head) {
t                1650 sound/core/timer.c 	struct snd_timer *t;
t                1654 sound/core/timer.c 	t = snd_timer_find(&gparams->tid);
t                1655 sound/core/timer.c 	if (!t) {
t                1659 sound/core/timer.c 	if (!list_empty(&t->open_list_head)) {
t                1663 sound/core/timer.c 	if (!t->hw.set_period) {
t                1667 sound/core/timer.c 	err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
t                1688 sound/core/timer.c 	struct snd_timer *t;
t                1697 sound/core/timer.c 	t = snd_timer_find(&tid);
t                1698 sound/core/timer.c 	if (t != NULL) {
t                1699 sound/core/timer.c 		spin_lock_irq(&t->lock);
t                1700 sound/core/timer.c 		gstatus.resolution = snd_timer_hw_resolution(t);
t                1701 sound/core/timer.c 		if (t->hw.precise_resolution) {
t                1702 sound/core/timer.c 			t->hw.precise_resolution(t, &gstatus.resolution_num,
t                1708 sound/core/timer.c 		spin_unlock_irq(&t->lock);
t                1758 sound/core/timer.c 	struct snd_timer *t;
t                1764 sound/core/timer.c 	t = tu->timeri->timer;
t                1765 sound/core/timer.c 	if (!t)
t                1771 sound/core/timer.c 	info->card = t->card ? t->card->number : -1;
t                1772 sound/core/timer.c 	if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
t                1774 sound/core/timer.c 	strlcpy(info->id, t->id, sizeof(info->id));
t                1775 sound/core/timer.c 	strlcpy(info->name, t->name, sizeof(info->name));
t                1776 sound/core/timer.c 	info->resolution = t->hw.resolution;
t                1788 sound/core/timer.c 	struct snd_timer *t;
t                1794 sound/core/timer.c 	t = tu->timeri->timer;
t                1795 sound/core/timer.c 	if (!t)
t                1799 sound/core/timer.c 	if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
t                1838 sound/core/timer.c 	spin_lock_irq(&t->lock);
t                1848 sound/core/timer.c 	spin_unlock_irq(&t->lock);
t                  52 sound/core/timer_compat.c 	struct snd_timer *t;
t                  57 sound/core/timer_compat.c 	t = tu->timeri->timer;
t                  58 sound/core/timer_compat.c 	if (!t)
t                  61 sound/core/timer_compat.c 	info.card = t->card ? t->card->number : -1;
t                  62 sound/core/timer_compat.c 	if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
t                  64 sound/core/timer_compat.c 	strlcpy(info.id, t->id, sizeof(info.id));
t                  65 sound/core/timer_compat.c 	strlcpy(info.name, t->name, sizeof(info.name));
t                  66 sound/core/timer_compat.c 	info.resolution = t->hw.resolution;
t                 513 sound/drivers/aloop.c static void loopback_timer_function(struct timer_list *t)
t                 515 sound/drivers/aloop.c 	struct loopback_pcm *dpcm = from_timer(dpcm, t, timer);
t                 623 sound/drivers/aloop.c 	struct snd_interval t;
t                 626 sound/drivers/aloop.c 	t.min = cable->hw.rate_min;
t                 627 sound/drivers/aloop.c 	t.max = cable->hw.rate_max;
t                 629 sound/drivers/aloop.c         t.openmin = t.openmax = 0;
t                 630 sound/drivers/aloop.c         t.integer = 0;
t                 631 sound/drivers/aloop.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                 639 sound/drivers/aloop.c 	struct snd_interval t;
t                 642 sound/drivers/aloop.c 	t.min = cable->hw.channels_min;
t                 643 sound/drivers/aloop.c 	t.max = cable->hw.channels_max;
t                 645 sound/drivers/aloop.c         t.openmin = t.openmax = 0;
t                 646 sound/drivers/aloop.c         t.integer = 0;
t                 647 sound/drivers/aloop.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                 295 sound/drivers/dummy.c static void dummy_systimer_callback(struct timer_list *t)
t                 297 sound/drivers/dummy.c 	struct dummy_systimer_pcm *dpcm = from_timer(dpcm, t, timer);
t                 157 sound/drivers/mpu401/mpu401_uart.c static void snd_mpu401_uart_timer(struct timer_list *t)
t                 159 sound/drivers/mpu401/mpu401_uart.c 	struct snd_mpu401 *mpu = from_timer(mpu, t, timer);
t                 393 sound/drivers/mtpav.c static void snd_mtpav_output_timer(struct timer_list *t)
t                 396 sound/drivers/mtpav.c 	struct mtpav *chip = from_timer(chip, t, timer);
t                 223 sound/drivers/opl3/opl3_midi.c void snd_opl3_timer_func(struct timer_list *t)
t                 226 sound/drivers/opl3/opl3_midi.c 	struct snd_opl3 *opl3 = from_timer(opl3, t, tlist);
t                  27 sound/drivers/opl3/opl3_voice.h void snd_opl3_timer_func(struct timer_list *t);
t                 299 sound/drivers/serial-u16550.c static void snd_uart16550_buffer_timer(struct timer_list *t)
t                 304 sound/drivers/serial-u16550.c 	uart = from_timer(uart, t, buffer_timer);
t                 158 sound/firewire/amdtp-stream.c 	struct snd_interval t = {0};
t                 167 sound/firewire/amdtp-stream.c 	t.min = roundup(s->min, step);
t                 168 sound/firewire/amdtp-stream.c 	t.max = rounddown(s->max, step);
t                 169 sound/firewire/amdtp-stream.c 	t.integer = 1;
t                 171 sound/firewire/amdtp-stream.c 	return snd_interval_refine(s, &t);
t                  18 sound/firewire/bebob/bebob_pcm.c 	struct snd_interval t = {
t                  31 sound/firewire/bebob/bebob_pcm.c 		t.min = min(t.min, snd_bebob_rate_table[i]);
t                  32 sound/firewire/bebob/bebob_pcm.c 		t.max = max(t.max, snd_bebob_rate_table[i]);
t                  35 sound/firewire/bebob/bebob_pcm.c 	return snd_interval_refine(r, &t);
t                  46 sound/firewire/bebob/bebob_pcm.c 	struct snd_interval t = {
t                  60 sound/firewire/bebob/bebob_pcm.c 		t.min = min(t.min, formations[i].pcm);
t                  61 sound/firewire/bebob/bebob_pcm.c 		t.max = max(t.max, formations[i].pcm);
t                  64 sound/firewire/bebob/bebob_pcm.c 	return snd_interval_refine(c, &t);
t                  17 sound/firewire/digi00x/digi00x-pcm.c 	struct snd_interval t = {
t                  27 sound/firewire/digi00x/digi00x-pcm.c 		t.min = min(t.min, snd_dg00x_stream_rates[i]);
t                  28 sound/firewire/digi00x/digi00x-pcm.c 		t.max = max(t.max, snd_dg00x_stream_rates[i]);
t                  31 sound/firewire/digi00x/digi00x-pcm.c 	return snd_interval_refine(r, &t);
t                  41 sound/firewire/digi00x/digi00x-pcm.c 	struct snd_interval t = {
t                  50 sound/firewire/digi00x/digi00x-pcm.c 		t.min = min(t.min, snd_dg00x_stream_pcm_channels[i]);
t                  51 sound/firewire/digi00x/digi00x-pcm.c 		t.max = max(t.max, snd_dg00x_stream_pcm_channels[i]);
t                  54 sound/firewire/digi00x/digi00x-pcm.c 	return snd_interval_refine(c, &t);
t                 234 sound/firewire/fcp.c 	struct fcp_transaction t;
t                 237 sound/firewire/fcp.c 	t.unit = unit;
t                 238 sound/firewire/fcp.c 	t.response_buffer = response;
t                 239 sound/firewire/fcp.c 	t.response_size = response_size;
t                 240 sound/firewire/fcp.c 	t.response_match_bytes = response_match_bytes;
t                 241 sound/firewire/fcp.c 	t.state = STATE_PENDING;
t                 242 sound/firewire/fcp.c 	init_waitqueue_head(&t.wait);
t                 245 sound/firewire/fcp.c 		t.deferrable = true;
t                 248 sound/firewire/fcp.c 	list_add_tail(&t.list, &transactions);
t                 254 sound/firewire/fcp.c 		ret = snd_fw_transaction(t.unit, tcode,
t                 260 sound/firewire/fcp.c 		wait_event_timeout(t.wait, t.state != STATE_PENDING,
t                 263 sound/firewire/fcp.c 		if (t.state == STATE_DEFERRED) {
t                 271 sound/firewire/fcp.c 			t.state = STATE_PENDING;
t                 273 sound/firewire/fcp.c 		} else if (t.state == STATE_COMPLETE) {
t                 274 sound/firewire/fcp.c 			ret = t.response_size;
t                 276 sound/firewire/fcp.c 		} else if (t.state == STATE_BUS_RESET) {
t                 279 sound/firewire/fcp.c 			dev_err(&t.unit->device, "FCP command timed out\n");
t                 286 sound/firewire/fcp.c 	list_del(&t.list);
t                 303 sound/firewire/fcp.c 	struct fcp_transaction *t;
t                 306 sound/firewire/fcp.c 	list_for_each_entry(t, &transactions, list) {
t                 307 sound/firewire/fcp.c 		if (t->unit == unit &&
t                 308 sound/firewire/fcp.c 		    (t->state == STATE_PENDING ||
t                 309 sound/firewire/fcp.c 		     t->state == STATE_DEFERRED)) {
t                 310 sound/firewire/fcp.c 			t->state = STATE_BUS_RESET;
t                 311 sound/firewire/fcp.c 			wake_up(&t->wait);
t                 345 sound/firewire/fcp.c 	struct fcp_transaction *t;
t                 352 sound/firewire/fcp.c 	list_for_each_entry(t, &transactions, list) {
t                 353 sound/firewire/fcp.c 		struct fw_device *device = fw_parent_device(t->unit);
t                 361 sound/firewire/fcp.c 		if (t->state == STATE_PENDING &&
t                 362 sound/firewire/fcp.c 		    is_matching_response(t, data, length)) {
t                 363 sound/firewire/fcp.c 			if (t->deferrable && *(const u8 *)data == 0x0f) {
t                 364 sound/firewire/fcp.c 				t->state = STATE_DEFERRED;
t                 366 sound/firewire/fcp.c 				t->state = STATE_COMPLETE;
t                 367 sound/firewire/fcp.c 				t->response_size = min_t(unsigned int, length,
t                 368 sound/firewire/fcp.c 							 t->response_size);
t                 369 sound/firewire/fcp.c 				memcpy(t->response_buffer, data,
t                 370 sound/firewire/fcp.c 				       t->response_size);
t                 372 sound/firewire/fcp.c 			wake_up(&t->wait);
t                  18 sound/firewire/fireface/ff-pcm.c 	struct snd_interval t = {
t                  34 sound/firewire/fireface/ff-pcm.c 		t.min = min(t.min, amdtp_rate_table[i]);
t                  35 sound/firewire/fireface/ff-pcm.c 		t.max = max(t.max, amdtp_rate_table[i]);
t                  38 sound/firewire/fireface/ff-pcm.c 	return snd_interval_refine(r, &t);
t                  49 sound/firewire/fireface/ff-pcm.c 	struct snd_interval t = {
t                  65 sound/firewire/fireface/ff-pcm.c 		t.min = min(t.min, pcm_channels[mode]);
t                  66 sound/firewire/fireface/ff-pcm.c 		t.max = max(t.max, pcm_channels[mode]);
t                  69 sound/firewire/fireface/ff-pcm.c 	return snd_interval_refine(c, &t);
t                  26 sound/firewire/fireworks/fireworks_hwdep.c 	struct snd_efw_transaction *t;
t                  51 sound/firewire/fireworks/fireworks_hwdep.c 		t = (struct snd_efw_transaction *)(pull_ptr);
t                  52 sound/firewire/fireworks/fireworks_hwdep.c 		length = be32_to_cpu(t->length) * sizeof(__be32);
t                  71 sound/firewire/fireworks/fireworks_pcm.c 	struct snd_interval t = {
t                  81 sound/firewire/fireworks/fireworks_pcm.c 		t.min = min(t.min, freq_table[i]);
t                  82 sound/firewire/fireworks/fireworks_pcm.c 		t.max = max(t.max, freq_table[i]);
t                  85 sound/firewire/fireworks/fireworks_pcm.c 	return snd_interval_refine(r, &t);
t                  96 sound/firewire/fireworks/fireworks_pcm.c 	struct snd_interval t = {
t                 106 sound/firewire/fireworks/fireworks_pcm.c 		t.min = min(t.min, pcm_channels[mode]);
t                 107 sound/firewire/fireworks/fireworks_pcm.c 		t.max = max(t.max, pcm_channels[mode]);
t                 110 sound/firewire/fireworks/fireworks_pcm.c 	return snd_interval_refine(c, &t);
t                  74 sound/firewire/fireworks/fireworks_transaction.c 	struct transaction_queue t;
t                  78 sound/firewire/fireworks/fireworks_transaction.c 	t.unit = unit;
t                  79 sound/firewire/fireworks/fireworks_transaction.c 	t.buf = resp;
t                  80 sound/firewire/fireworks/fireworks_transaction.c 	t.size = resp_size;
t                  81 sound/firewire/fireworks/fireworks_transaction.c 	t.seqnum = be32_to_cpu(((struct snd_efw_transaction *)cmd)->seqnum) + 1;
t                  82 sound/firewire/fireworks/fireworks_transaction.c 	t.state = STATE_PENDING;
t                  83 sound/firewire/fireworks/fireworks_transaction.c 	init_waitqueue_head(&t.wait);
t                  86 sound/firewire/fireworks/fireworks_transaction.c 	list_add_tail(&t.list, &transaction_queues);
t                  91 sound/firewire/fireworks/fireworks_transaction.c 		ret = snd_efw_transaction_cmd(t.unit, (void *)cmd, cmd_size);
t                  95 sound/firewire/fireworks/fireworks_transaction.c 		wait_event_timeout(t.wait, t.state != STATE_PENDING,
t                  98 sound/firewire/fireworks/fireworks_transaction.c 		if (t.state == STATE_COMPLETE) {
t                  99 sound/firewire/fireworks/fireworks_transaction.c 			ret = t.size;
t                 101 sound/firewire/fireworks/fireworks_transaction.c 		} else if (t.state == STATE_BUS_RESET) {
t                 104 sound/firewire/fireworks/fireworks_transaction.c 			dev_err(&t.unit->device, "EFW transaction timed out\n");
t                 111 sound/firewire/fireworks/fireworks_transaction.c 	list_del(&t.list);
t                 121 sound/firewire/fireworks/fireworks_transaction.c 	struct snd_efw_transaction *t;
t                 123 sound/firewire/fireworks/fireworks_transaction.c 	t = (struct snd_efw_transaction *)data;
t                 124 sound/firewire/fireworks/fireworks_transaction.c 	length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
t                 201 sound/firewire/fireworks/fireworks_transaction.c 	struct transaction_queue *t;
t                 205 sound/firewire/fireworks/fireworks_transaction.c 	list_for_each_entry(t, &transaction_queues, list) {
t                 206 sound/firewire/fireworks/fireworks_transaction.c 		device = fw_parent_device(t->unit);
t                 214 sound/firewire/fireworks/fireworks_transaction.c 		if ((t->state == STATE_PENDING) && (t->seqnum == seqnum)) {
t                 215 sound/firewire/fireworks/fireworks_transaction.c 			t->state = STATE_COMPLETE;
t                 216 sound/firewire/fireworks/fireworks_transaction.c 			t->size = min_t(unsigned int, length, t->size);
t                 217 sound/firewire/fireworks/fireworks_transaction.c 			memcpy(t->buf, data, t->size);
t                 218 sound/firewire/fireworks/fireworks_transaction.c 			wake_up(&t->wait);
t                 291 sound/firewire/fireworks/fireworks_transaction.c 	struct transaction_queue *t;
t                 294 sound/firewire/fireworks/fireworks_transaction.c 	list_for_each_entry(t, &transaction_queues, list) {
t                 295 sound/firewire/fireworks/fireworks_transaction.c 		if ((t->unit == unit) &&
t                 296 sound/firewire/fireworks/fireworks_transaction.c 		    (t->state == STATE_PENDING)) {
t                 297 sound/firewire/fireworks/fireworks_transaction.c 			t->state = STATE_BUS_RESET;
t                 298 sound/firewire/fireworks/fireworks_transaction.c 			wake_up(&t->wait);
t                  18 sound/firewire/oxfw/oxfw-pcm.c 	struct snd_interval t = {
t                  34 sound/firewire/oxfw/oxfw-pcm.c 		t.min = min(t.min, formation.rate);
t                  35 sound/firewire/oxfw/oxfw-pcm.c 		t.max = max(t.max, formation.rate);
t                  38 sound/firewire/oxfw/oxfw-pcm.c 	return snd_interval_refine(r, &t);
t                 419 sound/hda/hdmi_chmap.c 	struct channel_map_table *t = map_tables;
t                 421 sound/hda/hdmi_chmap.c 	for (; t->map; t++) {
t                 422 sound/hda/hdmi_chmap.c 		if (t->map == c)
t                 423 sound/hda/hdmi_chmap.c 			return t->spk_mask;
t                 454 sound/hda/hdmi_chmap.c 	struct channel_map_table *t = map_tables;
t                 456 sound/hda/hdmi_chmap.c 	for (; t->map; t++) {
t                 457 sound/hda/hdmi_chmap.c 		if (t->spk_mask == spk)
t                 458 sound/hda/hdmi_chmap.c 			return t->map;
t                  23 sound/i2c/other/ak4117.c static void snd_ak4117_timer(struct timer_list *t);
t                 517 sound/i2c/other/ak4117.c static void snd_ak4117_timer(struct timer_list *t)
t                 519 sound/i2c/other/ak4117.c 	struct ak4117 *chip = from_timer(chip, t, timer);
t                 183 sound/isa/sb/emu8000_pcm.c static void emu8k_pcm_timer_func(struct timer_list *t)
t                 185 sound/isa/sb/emu8000_pcm.c 	struct snd_emu8k_pcm *rec = from_timer(rec, t, timer);
t                  84 sound/isa/sb/sb8_main.c 		struct snd_interval t = { .min = 1, .max = 1 };
t                  85 sound/isa/sb/sb8_main.c 		return snd_interval_refine(hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS), &t);
t                 200 sound/isa/sb/sb8_midi.c static void snd_sb8dsp_midi_output_timer(struct timer_list *t)
t                 202 sound/isa/sb/sb8_midi.c 	struct snd_sb *chip = from_timer(chip, t, midi_timer);
t                 349 sound/isa/wavefront/wavefront_midi.c static void snd_wavefront_midi_output_timer(struct timer_list *t)
t                 351 sound/isa/wavefront/wavefront_midi.c 	snd_wavefront_midi_t *midi = from_timer(midi, t, timer);
t                 120 sound/pci/asihpi/asihpi.c 	struct tasklet_struct t;
t                 554 sound/pci/asihpi/asihpi.c 	tasklet_disable(&card->t);
t                 556 sound/pci/asihpi/asihpi.c 	tasklet_enable(&card->t);
t                 575 sound/pci/asihpi/asihpi.c 		tasklet_disable(&card->t);
t                 577 sound/pci/asihpi/asihpi.c 		tasklet_enable(&card->t);
t                 739 sound/pci/asihpi/asihpi.c static void snd_card_asihpi_timer_function(struct timer_list *t)
t                 741 sound/pci/asihpi/asihpi.c 	struct snd_card_asihpi_pcm *dpcm = from_timer(dpcm, t, timer);
t                 946 sound/pci/asihpi/asihpi.c 	tasklet_schedule(&asihpi->t);
t                2897 sound/pci/asihpi/asihpi.c 		tasklet_init(&asihpi->t, snd_card_asihpi_int_task,
t                2994 sound/pci/asihpi/asihpi.c 		tasklet_kill(&asihpi->t);
t                2054 sound/pci/asihpi/hpi6205.c 	int t = timeout_us / 4;
t                2057 sound/pci/asihpi/hpi6205.c 	while ((interface->dsp_ack != state) && --t) {
t                2063 sound/pci/asihpi/hpi6205.c 	return t * 4;
t                 982 sound/pci/asihpi/hpi_internal.h 		struct hpi_profile_res_time t;
t                1019 sound/pci/asihpi/hpi_internal.h 		struct hpi_clock_msg t;	/* dsp time */
t                1080 sound/pci/asihpi/hpi_internal.h 		struct hpi_clock_res t;	/* dsp time */
t                1188 sound/pci/asihpi/hpi_internal.h 		struct hpi_clock_msg t;
t                1207 sound/pci/asihpi/hpi_internal.h 		struct hpi_clock_res t;
t                1948 sound/pci/cs46xx/dsp_spos.c 		struct dsp_task_descriptor *t = &ins->tasks[i];
t                1949 sound/pci/cs46xx/dsp_spos.c 		_dsp_create_task_tree(chip, t->data, t->address, t->size);
t                  63 sound/pci/ctxfi/cttimer.c static void ct_systimer_callback(struct timer_list *t)
t                  65 sound/pci/ctxfi/cttimer.c 	struct ct_timer_instance *ti = from_timer(ti, t, timer);
t                 202 sound/pci/echoaudio/midi.c static void snd_echo_midi_output_write(struct timer_list *t)
t                 204 sound/pci/echoaudio/midi.c 	struct echoaudio *chip = from_timer(chip, t, timer);
t                 506 sound/pci/ens1370.c 	unsigned int t, r = 0;
t                 508 sound/pci/ens1370.c 	for (t = 0; t < POLL_COUNT; t++) {
t                 604 sound/pci/ens1370.c 	unsigned int t, x, flag;
t                 608 sound/pci/ens1370.c 	for (t = 0; t < POLL_COUNT; t++) {
t                 617 sound/pci/ens1370.c 			for (t = 0; t < POLL_COUNT; t++) {
t                 623 sound/pci/ens1370.c 			for (t = 0; t < POLL_COUNT; t++) {
t                 646 sound/pci/ens1370.c 	unsigned int t, x, flag, fail = 0;
t                 651 sound/pci/ens1370.c 	for (t = 0; t < POLL_COUNT; t++) {
t                 660 sound/pci/ens1370.c 			for (t = 0; t < POLL_COUNT; t++) {
t                 666 sound/pci/ens1370.c 			for (t = 0; t < POLL_COUNT; t++) {
t                 677 sound/pci/ens1370.c 			for (t = 0; t < POLL_COUNT; t++) {
t                 682 sound/pci/ens1370.c 			for (t = 0; t < POLL_COUNT; t++) {
t                 685 sound/pci/ens1370.c 						for (t = 0; t < 100; t++)
t                1697 sound/pci/es1968.c 	unsigned int pa, offset, t;
t                1768 sound/pci/es1968.c 	t = ktime_to_us(diff);
t                1769 sound/pci/es1968.c 	if (t == 0) {
t                1773 sound/pci/es1968.c 		offset = (offset / t) * 1000 + ((offset % t) * 1000) / t;
t                 503 sound/pci/hda/hda_intel.c 	u32 val, t;
t                 509 sound/pci/hda/hda_intel.c 		t = preferred_bits[i];
t                 510 sound/pci/hda/hda_intel.c 		if (val & (1 << t))
t                 511 sound/pci/hda/hda_intel.c 			return t;
t                2260 sound/pci/ice1712/ice1712.c 	long t = 0x10000;
t                2264 sound/pci/ice1712/ice1712.c 	while (t-- > 0 && (inb(ICEREG(ice, I2C_CTRL)) & ICE1712_I2C_BUSY)) ;
t                2249 sound/pci/ice1712/ice1724.c 	int t = 0x10000;
t                2250 sound/pci/ice1712/ice1724.c 	while ((inb(ICEREG1724(ice, I2C_CTRL)) & VT1724_I2C_BUSY) && t--)
t                2252 sound/pci/ice1712/ice1724.c 	if (t == -1)
t                2676 sound/pci/intel8x0.c 	unsigned long pos, pos1, t;
t                2764 sound/pci/intel8x0.c 	t = ktime_us_delta(stop_time, start_time);
t                2766 sound/pci/intel8x0.c 		 "%s: measured %lu usecs (%lu samples)\n", __func__, t, pos);
t                2767 sound/pci/intel8x0.c 	if (t == 0) {
t                2772 sound/pci/intel8x0.c 	pos = (pos / t) * 1000 + ((pos % t) * 1000) / t;
t                 588 sound/pci/korg1212/korg1212.c static void snd_korg1212_timer_func(struct timer_list *t)
t                 590 sound/pci/korg1212/korg1212.c 	struct snd_korg1212 *korg1212 = from_timer(korg1212, t, timer);
t                2279 sound/pci/maestro3.c 	u8 t; /* makes as much sense as 'n', no? */
t                2311 sound/pci/maestro3.c 	t = inb(chip->iobase + ASSP_CONTROL_A);
t                2312 sound/pci/maestro3.c 	t &= ~( DSP_CLK_36MHZ_SELECT  | ASSP_CLK_49MHZ_SELECT);
t                2313 sound/pci/maestro3.c 	t |= ASSP_CLK_49MHZ_SELECT;
t                2314 sound/pci/maestro3.c 	t |= ASSP_0_WS_ENABLE; 
t                2315 sound/pci/maestro3.c 	outb(t, chip->iobase + ASSP_CONTROL_A);
t                 698 sound/pci/riptide/riptide.c 	u32 laddr, saddr, t, val;
t                 705 sound/pci/riptide/riptide.c 			t = atoh(&in[7], 2);
t                 706 sound/pci/riptide/riptide.c 			switch (t) {
t                1400 sound/pci/rme9652/hdsp.c static void snd_hdsp_midi_output_timer(struct timer_list *t)
t                1402 sound/pci/rme9652/hdsp.c 	struct hdsp_midi *hmidi = from_timer(hmidi, t, timer);
t                4314 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4319 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(c, &t);
t                4321 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4326 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(c, &t);
t                4328 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4333 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(c, &t);
t                4345 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4350 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(c, &t);
t                4352 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4357 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(c, &t);
t                4359 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4364 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(c, &t);
t                4376 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4381 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(r, &t);
t                4383 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4388 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(r, &t);
t                4390 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4395 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(r, &t);
t                4407 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4412 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(r, &t);
t                4414 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4419 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(r, &t);
t                4421 sound/pci/rme9652/hdsp.c 		struct snd_interval t = {
t                4426 sound/pci/rme9652/hdsp.c 		return snd_interval_refine(r, &t);
t                1939 sound/pci/rme9652/hdspm.c static void snd_hdspm_midi_output_timer(struct timer_list *t)
t                1941 sound/pci/rme9652/hdspm.c 	struct hdspm_midi *hmidi = from_timer(hmidi, t, timer);
t                5883 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5888 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(c, &t);
t                5890 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5895 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(c, &t);
t                5897 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5902 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(c, &t);
t                5918 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5923 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(c, &t);
t                5925 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5930 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(c, &t);
t                5932 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5937 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(c, &t);
t                5953 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5958 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(r, &t);
t                5960 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5965 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(r, &t);
t                5967 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5972 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(r, &t);
t                5987 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5992 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(r, &t);
t                5994 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                5999 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(r, &t);
t                6001 sound/pci/rme9652/hdspm.c 		struct snd_interval t = {
t                6006 sound/pci/rme9652/hdspm.c 		return snd_interval_refine(r, &t);
t                2240 sound/pci/rme9652/rme9652.c 		struct snd_interval t = {
t                2245 sound/pci/rme9652/rme9652.c 		return snd_interval_refine(c, &t);
t                2247 sound/pci/rme9652/rme9652.c 		struct snd_interval t = {
t                2252 sound/pci/rme9652/rme9652.c 		return snd_interval_refine(c, &t);
t                2264 sound/pci/rme9652/rme9652.c 		struct snd_interval t = {
t                2269 sound/pci/rme9652/rme9652.c 		return snd_interval_refine(r, &t);
t                2271 sound/pci/rme9652/rme9652.c 		struct snd_interval t = {
t                2276 sound/pci/rme9652/rme9652.c 		return snd_interval_refine(r, &t);
t                 286 sound/sh/aica.c static void aica_period_elapsed(struct timer_list *t)
t                 289 sound/sh/aica.c 							      t, timer);
t                 197 sound/soc/atmel/atmel_ssc_dai.c 	struct snd_interval t;
t                 248 sound/soc/atmel/atmel_ssc_dai.c 		t.min = 8000;
t                 249 sound/soc/atmel/atmel_ssc_dai.c 		t.max = ssc_p->mck_rate / mck_div / frame_size;
t                 250 sound/soc/atmel/atmel_ssc_dai.c 		t.openmin = t.openmax = 0;
t                 251 sound/soc/atmel/atmel_ssc_dai.c 		t.integer = 0;
t                 252 sound/soc/atmel/atmel_ssc_dai.c 		ret = snd_interval_refine(i, &t);
t                 213 sound/soc/au1x/psc-ac97.c 	int chans, t, stype = substream->stream;
t                 255 sound/soc/au1x/psc-ac97.c 		t = 100;
t                 256 sound/soc/au1x/psc-ac97.c 		while ((__raw_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR) && --t)
t                 259 sound/soc/au1x/psc-ac97.c 		if (!t)
t                 271 sound/soc/au1x/psc-ac97.c 		t = 100;
t                 272 sound/soc/au1x/psc-ac97.c 		while ((!(__raw_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && --t)
t                 275 sound/soc/au1x/psc-ac97.c 		if (!t)
t                1561 sound/soc/codecs/max98088.c        const char **t;
t                1593 sound/soc/codecs/max98088.c                t = krealloc(max98088->eq_texts,
t                1596 sound/soc/codecs/max98088.c                if (t == NULL)
t                1600 sound/soc/codecs/max98088.c                t[max98088->eq_textcnt] = cfg[i].name;
t                1602 sound/soc/codecs/max98088.c                max98088->eq_texts = t;
t                1576 sound/soc/codecs/max98095.c 	const char **t;
t                1608 sound/soc/codecs/max98095.c 		t = krealloc(max98095->eq_texts,
t                1611 sound/soc/codecs/max98095.c 		if (t == NULL)
t                1615 sound/soc/codecs/max98095.c 		t[max98095->eq_textcnt] = cfg[i].name;
t                1617 sound/soc/codecs/max98095.c 		max98095->eq_texts = t;
t                1727 sound/soc/codecs/max98095.c 	const char **t;
t                1760 sound/soc/codecs/max98095.c 		t = krealloc(max98095->bq_texts,
t                1763 sound/soc/codecs/max98095.c 		if (t == NULL)
t                1767 sound/soc/codecs/max98095.c 		t[max98095->bq_textcnt] = cfg[i].name;
t                1769 sound/soc/codecs/max98095.c 		max98095->bq_texts = t;
t                3367 sound/soc/codecs/rt5645.c static void rt5645_btn_check_callback(struct timer_list *t)
t                3369 sound/soc/codecs/rt5645.c 	struct rt5645_priv *rt5645 = from_timer(rt5645, t, btn_check_timer);
t                 115 sound/soc/codecs/rt5677-spi.c 	struct spi_transfer t[2];
t                 131 sound/soc/codecs/rt5677-spi.c 	memset(t, 0, sizeof(t));
t                 132 sound/soc/codecs/rt5677-spi.c 	t[0].tx_buf = header;
t                 133 sound/soc/codecs/rt5677-spi.c 	t[0].len = sizeof(header);
t                 134 sound/soc/codecs/rt5677-spi.c 	t[0].speed_hz = RT5677_SPI_FREQ;
t                 135 sound/soc/codecs/rt5677-spi.c 	t[1].rx_buf = body;
t                 136 sound/soc/codecs/rt5677-spi.c 	t[1].speed_hz = RT5677_SPI_FREQ;
t                 137 sound/soc/codecs/rt5677-spi.c 	spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
t                 139 sound/soc/codecs/rt5677-spi.c 	for (offset = 0; offset < len; offset += t[1].len) {
t                 141 sound/soc/codecs/rt5677-spi.c 				len - offset, &t[1].len);
t                 156 sound/soc/codecs/rt5677-spi.c 		rt5677_spi_reverse(cb + offset, len - offset, body, t[1].len);
t                 170 sound/soc/codecs/rt5677-spi.c 	struct spi_transfer t;
t                 186 sound/soc/codecs/rt5677-spi.c 	memset(&t, 0, sizeof(t));
t                 187 sound/soc/codecs/rt5677-spi.c 	t.tx_buf = buf;
t                 188 sound/soc/codecs/rt5677-spi.c 	t.speed_hz = RT5677_SPI_FREQ;
t                 189 sound/soc/codecs/rt5677-spi.c 	spi_message_init_with_transfers(&m, &t, 1);
t                 193 sound/soc/codecs/rt5677-spi.c 				len - offset, &t.len);
t                 203 sound/soc/codecs/rt5677-spi.c 		rt5677_spi_reverse(body, t.len, cb + offset, len - offset);
t                 204 sound/soc/codecs/rt5677-spi.c 		offset += t.len;
t                 205 sound/soc/codecs/rt5677-spi.c 		t.len += RT5677_SPI_HEADER + 1;
t                 987 sound/soc/codecs/sgtl5000.c 		u64 out, t;
t                1003 sound/soc/codecs/sgtl5000.c 		t = do_div(out, in);
t                1005 sound/soc/codecs/sgtl5000.c 		t *= 2048;
t                1006 sound/soc/codecs/sgtl5000.c 		do_div(t, in);
t                1007 sound/soc/codecs/sgtl5000.c 		frac_div = t;
t                 187 sound/soc/codecs/tas2552.c 		unsigned int d, q, t;
t                 195 sound/soc/codecs/tas2552.c 		t = (pll_clk * 2) << p;
t                 196 sound/soc/codecs/tas2552.c 		j = t / pll_clkin;
t                 197 sound/soc/codecs/tas2552.c 		d = t % pll_clkin;
t                 198 sound/soc/codecs/tas2552.c 		t = pll_clkin / 10000;
t                 199 sound/soc/codecs/tas2552.c 		q = d / (t + 1);
t                 200 sound/soc/codecs/tas2552.c 		d = q + ((9999 - pll_clkin % 10000) * (d / t - q)) / 10000;
t                 524 sound/soc/codecs/tscs454.c #define PLL_CTL(f, t, c1, r1, o1, f1l, f1h, c2, r2, o2, f2l, f2h)	\
t                 538 sound/soc/codecs/tscs454.c 			{R_TIMEBASE,	t},				\
t                 121 sound/soc/codecs/wm0010.c 	struct spi_transfer t;
t                 196 sound/soc/codecs/wm0010.c 	struct spi_transfer t;
t                 220 sound/soc/codecs/wm0010.c 	u32 *out32 = xfer->t.rx_buf;
t                 232 sound/soc/codecs/wm0010.c 	for (i = 0; i < xfer->t.len / 4; i++) {
t                 424 sound/soc/codecs/wm0010.c 		xfer->t.rx_buf = out;
t                 431 sound/soc/codecs/wm0010.c 		xfer->t.tx_buf = img;
t                 438 sound/soc/codecs/wm0010.c 		xfer->t.len = len;
t                 439 sound/soc/codecs/wm0010.c 		xfer->t.bits_per_word = 8;
t                 442 sound/soc/codecs/wm0010.c 			xfer->t.speed_hz = wm0010->sysclk / 6;
t                 444 sound/soc/codecs/wm0010.c 			xfer->t.speed_hz = wm0010->max_spi_freq;
t                 448 sound/soc/codecs/wm0010.c 					xfer->t.speed_hz = wm0010->board_max_spi_speed;
t                 452 sound/soc/codecs/wm0010.c 		wm0010->max_spi_freq = xfer->t.speed_hz;
t                 454 sound/soc/codecs/wm0010.c 		spi_message_add_tail(&xfer->t, &xfer->m);
t                 485 sound/soc/codecs/wm0010.c 		kfree(xfer->t.rx_buf);
t                 486 sound/soc/codecs/wm0010.c 		kfree(xfer->t.tx_buf);
t                 502 sound/soc/codecs/wm0010.c 	struct spi_transfer t;
t                 533 sound/soc/codecs/wm0010.c 	memset(&t, 0, sizeof(t));
t                 534 sound/soc/codecs/wm0010.c 	t.rx_buf = out;
t                 535 sound/soc/codecs/wm0010.c 	t.tx_buf = img;
t                 536 sound/soc/codecs/wm0010.c 	t.len = fw->size;
t                 537 sound/soc/codecs/wm0010.c 	t.bits_per_word = 8;
t                 538 sound/soc/codecs/wm0010.c 	t.speed_hz = wm0010->sysclk / 10;
t                 539 sound/soc/codecs/wm0010.c 	spi_message_add_tail(&t, &m);
t                 542 sound/soc/codecs/wm0010.c 		t.speed_hz);
t                 577 sound/soc/codecs/wm0010.c 	struct spi_transfer t;
t                 666 sound/soc/codecs/wm0010.c 		memset(&t, 0, sizeof(t));
t                 667 sound/soc/codecs/wm0010.c 		t.rx_buf = out;
t                 668 sound/soc/codecs/wm0010.c 		t.tx_buf = img_swap;
t                 669 sound/soc/codecs/wm0010.c 		t.len = len;
t                 670 sound/soc/codecs/wm0010.c 		t.bits_per_word = 8;
t                 671 sound/soc/codecs/wm0010.c 		t.speed_hz = wm0010->sysclk / 6;
t                 672 sound/soc/codecs/wm0010.c 		spi_message_add_tail(&t, &m);
t                1976 sound/soc/codecs/wm8904.c 	const char **t;
t                1995 sound/soc/codecs/wm8904.c 		t = krealloc(wm8904->retune_mobile_texts,
t                1999 sound/soc/codecs/wm8904.c 		if (t == NULL)
t                2003 sound/soc/codecs/wm8904.c 		t[wm8904->num_retune_mobile_texts] = 
t                2008 sound/soc/codecs/wm8904.c 		wm8904->retune_mobile_texts = t;
t                3221 sound/soc/codecs/wm8994.c 	const char **t;
t                3240 sound/soc/codecs/wm8994.c 		t = krealloc(wm8994->retune_mobile_texts,
t                3244 sound/soc/codecs/wm8994.c 		if (t == NULL)
t                3248 sound/soc/codecs/wm8994.c 		t[wm8994->num_retune_mobile_texts] =
t                3253 sound/soc/codecs/wm8994.c 		wm8994->retune_mobile_texts = t;
t                2554 sound/soc/codecs/wm8996.c 	const char **t;
t                2573 sound/soc/codecs/wm8996.c 		t = krealloc(wm8996->retune_mobile_texts,
t                2577 sound/soc/codecs/wm8996.c 		if (t == NULL)
t                2581 sound/soc/codecs/wm8996.c 		t[wm8996->num_retune_mobile_texts] = 
t                2586 sound/soc/codecs/wm8996.c 		wm8996->retune_mobile_texts = t;
t                 465 sound/soc/intel/common/sst-firmware.c 	struct sst_fw *sst_fw, *t;
t                 468 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
t                1577 sound/soc/intel/haswell/sst-haswell-ipc.c 	struct sst_fw *sst_fw, *t;
t                1594 sound/soc/intel/haswell/sst-haswell-ipc.c 	list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) {
t                1660 sound/soc/intel/haswell/sst-haswell-ipc.c 	struct sst_fw *sst_fw, *t;
t                1663 sound/soc/intel/haswell/sst-haswell-ipc.c 	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
t                 219 sound/soc/meson/axg-spdifin.c 		unsigned int t;
t                 224 sound/soc/meson/axg-spdifin.c 		t = axg_spdifin_mode_timer(priv, i, rate);
t                 227 sound/soc/meson/axg-spdifin.c 		axg_spdifin_write_timer(priv->map, i, t);
t                 230 sound/soc/meson/axg-spdifin.c 		axg_spdifin_write_threshold(priv->map, i, t + t_next);
t                 233 sound/soc/meson/axg-spdifin.c 		t_next = t;
t                  30 sound/soc/samsung/i2s.c #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
t                 212 sound/soc/samsung/s3c-i2s-v2.c #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
t                 167 sound/soc/sti/sti_uniperif.c 	struct snd_interval t;
t                 169 sound/soc/sti/sti_uniperif.c 	t.min = uni->tdm_slot.avail_slots;
t                 170 sound/soc/sti/sti_uniperif.c 	t.max = uni->tdm_slot.avail_slots;
t                 171 sound/soc/sti/sti_uniperif.c 	t.openmin = 0;
t                 172 sound/soc/sti/sti_uniperif.c 	t.openmax = 0;
t                 173 sound/soc/sti/sti_uniperif.c 	t.integer = 0;
t                 175 sound/soc/sti/sti_uniperif.c 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
t                 191 sound/synth/emux/emux_synth.c void snd_emux_timer_callback(struct timer_list *t)
t                 193 sound/synth/emux/emux_synth.c 	struct snd_emux *emu = from_timer(emu, t, tlist);
t                  45 sound/synth/emux/emux_voice.h void snd_emux_timer_callback(struct timer_list *t);
t                 843 sound/synth/emux/soundfont.c 	int r, p, t;
t                 848 sound/synth/emux/soundfont.c 	t = end - start;
t                 849 sound/synth/emux/soundfont.c 	if (t < 0) t = -t;
t                 851 sound/synth/emux/soundfont.c 		t = t << (13 - r);
t                 853 sound/synth/emux/soundfont.c 		t = t >> (r - 13);
t                 854 sound/synth/emux/soundfont.c 	return (t * 10) / (p * 441);
t                 356 sound/usb/midi.c static void snd_usbmidi_error_timer(struct timer_list *t)
t                 358 sound/usb/midi.c 	struct snd_usb_midi *umidi = from_timer(umidi, t, error_timer);
t                 122 sound/usb/mixer_quirks.c 				struct std_mono_table *t)
t                 126 sound/usb/mixer_quirks.c 	while (t->name != NULL) {
t                 127 sound/usb/mixer_quirks.c 		err = snd_create_std_mono_ctl(mixer, t->unitid, t->control,
t                 128 sound/usb/mixer_quirks.c 				t->cmask, t->val_type, t->name, t->tlv_callback);
t                 131 sound/usb/mixer_quirks.c 		t++;
t                 233 sound/usb/validate.c #define FIXED(p, t, s) { .protocol = (p), .type = (t), .size = sizeof(s) }
t                 234 sound/usb/validate.c #define FUNC(p, t, f) { .protocol = (p), .type = (t), .func = (f) }
t                 459 sound/x86/intel_hdmi_audio.c 	const struct channel_map_table *t = map_tables;
t                 461 sound/x86/intel_hdmi_audio.c 	for (; t->map; t++) {
t                 462 sound/x86/intel_hdmi_audio.c 		if (t->spk_mask == spk)
t                 463 sound/x86/intel_hdmi_audio.c 			return t->map;
t                 195 tools/accounting/getdelays.c #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
t                 197 tools/accounting/getdelays.c static void print_delayacct(struct taskstats *t)
t                 211 tools/accounting/getdelays.c 	       (unsigned long long)t->cpu_count,
t                 212 tools/accounting/getdelays.c 	       (unsigned long long)t->cpu_run_real_total,
t                 213 tools/accounting/getdelays.c 	       (unsigned long long)t->cpu_run_virtual_total,
t                 214 tools/accounting/getdelays.c 	       (unsigned long long)t->cpu_delay_total,
t                 215 tools/accounting/getdelays.c 	       average_ms((double)t->cpu_delay_total, t->cpu_count),
t                 217 tools/accounting/getdelays.c 	       (unsigned long long)t->blkio_count,
t                 218 tools/accounting/getdelays.c 	       (unsigned long long)t->blkio_delay_total,
t                 219 tools/accounting/getdelays.c 	       average_ms(t->blkio_delay_total, t->blkio_count),
t                 221 tools/accounting/getdelays.c 	       (unsigned long long)t->swapin_count,
t                 222 tools/accounting/getdelays.c 	       (unsigned long long)t->swapin_delay_total,
t                 223 tools/accounting/getdelays.c 	       average_ms(t->swapin_delay_total, t->swapin_count),
t                 225 tools/accounting/getdelays.c 	       (unsigned long long)t->freepages_count,
t                 226 tools/accounting/getdelays.c 	       (unsigned long long)t->freepages_delay_total,
t                 227 tools/accounting/getdelays.c 	       average_ms(t->freepages_delay_total, t->freepages_count),
t                 229 tools/accounting/getdelays.c 	       (unsigned long long)t->thrashing_count,
t                 230 tools/accounting/getdelays.c 	       (unsigned long long)t->thrashing_delay_total,
t                 231 tools/accounting/getdelays.c 	       average_ms(t->thrashing_delay_total, t->thrashing_count));
t                 234 tools/accounting/getdelays.c static void task_context_switch_counts(struct taskstats *t)
t                 239 tools/accounting/getdelays.c 	       (unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw);
t                 253 tools/accounting/getdelays.c static void print_ioacct(struct taskstats *t)
t                 256 tools/accounting/getdelays.c 		t->ac_comm,
t                 257 tools/accounting/getdelays.c 		(unsigned long long)t->read_bytes,
t                 258 tools/accounting/getdelays.c 		(unsigned long long)t->write_bytes,
t                 259 tools/accounting/getdelays.c 		(unsigned long long)t->cancelled_write_bytes);
t                  17 tools/arch/x86/lib/insn.c #define validate_next(t, insn, n)	\
t                  18 tools/arch/x86/lib/insn.c 	((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
t                  20 tools/arch/x86/lib/insn.c #define __get_next(t, insn)	\
t                  21 tools/arch/x86/lib/insn.c 	({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
t                  23 tools/arch/x86/lib/insn.c #define __peek_nbyte_next(t, insn, n)	\
t                  24 tools/arch/x86/lib/insn.c 	({ t r = *(t*)((insn)->next_byte + n); r; })
t                  26 tools/arch/x86/lib/insn.c #define get_next(t, insn)	\
t                  27 tools/arch/x86/lib/insn.c 	({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
t                  29 tools/arch/x86/lib/insn.c #define peek_nbyte_next(t, insn, n)	\
t                  30 tools/arch/x86/lib/insn.c 	({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
t                  32 tools/arch/x86/lib/insn.c #define peek_next(t, insn)	peek_nbyte_next(t, insn, 0)
t                  85 tools/bpf/bpftool/btf.c 			 const struct btf_type *t)
t                  90 tools/bpf/bpftool/btf.c 	kind = BTF_INFO_KIND(t->info);
t                  97 tools/bpf/bpftool/btf.c 		jsonw_string_field(w, "name", btf_str(btf, t->name_off));
t                 100 tools/bpf/bpftool/btf.c 		       btf_str(btf, t->name_off));
t                 103 tools/bpf/bpftool/btf.c 	switch (BTF_INFO_KIND(t->info)) {
t                 105 tools/bpf/bpftool/btf.c 		__u32 v = *(__u32 *)(t + 1);
t                 111 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "size", t->size);
t                 117 tools/bpf/bpftool/btf.c 			       t->size, BTF_INT_OFFSET(v), BTF_INT_BITS(v),
t                 128 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "type_id", t->type);
t                 130 tools/bpf/bpftool/btf.c 			printf(" type_id=%u", t->type);
t                 133 tools/bpf/bpftool/btf.c 		const struct btf_array *arr = (const void *)(t + 1);
t                 147 tools/bpf/bpftool/btf.c 		const struct btf_member *m = (const void *)(t + 1);
t                 148 tools/bpf/bpftool/btf.c 		__u16 vlen = BTF_INFO_VLEN(t->info);
t                 152 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "size", t->size);
t                 157 tools/bpf/bpftool/btf.c 			printf(" size=%u vlen=%u", t->size, vlen);
t                 163 tools/bpf/bpftool/btf.c 			if (BTF_INFO_KFLAG(t->info)) {
t                 193 tools/bpf/bpftool/btf.c 		const struct btf_enum *v = (const void *)(t + 1);
t                 194 tools/bpf/bpftool/btf.c 		__u16 vlen = BTF_INFO_VLEN(t->info);
t                 198 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "size", t->size);
t                 203 tools/bpf/bpftool/btf.c 			printf(" size=%u vlen=%u", t->size, vlen);
t                 222 tools/bpf/bpftool/btf.c 		const char *fwd_kind = BTF_INFO_KFLAG(t->info) ? "union"
t                 233 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "type_id", t->type);
t                 235 tools/bpf/bpftool/btf.c 			printf(" type_id=%u", t->type);
t                 238 tools/bpf/bpftool/btf.c 		const struct btf_param *p = (const void *)(t + 1);
t                 239 tools/bpf/bpftool/btf.c 		__u16 vlen = BTF_INFO_VLEN(t->info);
t                 243 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "ret_type_id", t->type);
t                 248 tools/bpf/bpftool/btf.c 			printf(" ret_type_id=%u vlen=%u", t->type, vlen);
t                 267 tools/bpf/bpftool/btf.c 		const struct btf_var *v = (const void *)(t + 1);
t                 273 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "type_id", t->type);
t                 276 tools/bpf/bpftool/btf.c 			printf(" type_id=%u, linkage=%s", t->type, linkage);
t                 281 tools/bpf/bpftool/btf.c 		const struct btf_var_secinfo *v = (const void *)(t+1);
t                 282 tools/bpf/bpftool/btf.c 		__u16 vlen = BTF_INFO_VLEN(t->info);
t                 286 tools/bpf/bpftool/btf.c 			jsonw_uint_field(w, "size", t->size);
t                 291 tools/bpf/bpftool/btf.c 			printf(" size=%u vlen=%u", t->size, vlen);
t                 324 tools/bpf/bpftool/btf.c 	const struct btf_type *t;
t                 335 tools/bpf/bpftool/btf.c 			t = btf__type_by_id(btf, root_type_ids[i]);
t                 336 tools/bpf/bpftool/btf.c 			dump_btf_type(btf, root_type_ids[i], t);
t                 342 tools/bpf/bpftool/btf.c 			t = btf__type_by_id(btf, i);
t                 343 tools/bpf/bpftool/btf.c 			dump_btf_type(btf, i, t);
t                  54 tools/bpf/bpftool/btf_dumper.c 	const struct btf_type *t = btf__type_by_id(d->btf, type_id);
t                  55 tools/bpf/bpftool/btf_dumper.c 	struct btf_array *arr = (struct btf_array *)(t + 1);
t                 194 tools/bpf/bpftool/btf_dumper.c static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
t                 201 tools/bpf/bpftool/btf_dumper.c 	int_type = (__u32 *)(t + 1);
t                 267 tools/bpf/bpftool/btf_dumper.c 	const struct btf_type *t;
t                 274 tools/bpf/bpftool/btf_dumper.c 	t = btf__type_by_id(d->btf, type_id);
t                 275 tools/bpf/bpftool/btf_dumper.c 	if (!t)
t                 278 tools/bpf/bpftool/btf_dumper.c 	kind_flag = BTF_INFO_KFLAG(t->info);
t                 279 tools/bpf/bpftool/btf_dumper.c 	vlen = BTF_INFO_VLEN(t->info);
t                 281 tools/bpf/bpftool/btf_dumper.c 	m = (struct btf_member *)(t + 1);
t                 315 tools/bpf/bpftool/btf_dumper.c 	const struct btf_type *t = btf__type_by_id(d->btf, type_id);
t                 319 tools/bpf/bpftool/btf_dumper.c 	jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
t                 320 tools/bpf/bpftool/btf_dumper.c 	ret = btf_dumper_do_type(d, t->type, bit_offset, data);
t                 330 tools/bpf/bpftool/btf_dumper.c 	const struct btf_type *t;
t                 333 tools/bpf/bpftool/btf_dumper.c 	t = btf__type_by_id(d->btf, type_id);
t                 334 tools/bpf/bpftool/btf_dumper.c 	if (!t)
t                 337 tools/bpf/bpftool/btf_dumper.c 	vlen = BTF_INFO_VLEN(t->info);
t                 338 tools/bpf/bpftool/btf_dumper.c 	vsi = (struct btf_var_secinfo *)(t + 1);
t                 341 tools/bpf/bpftool/btf_dumper.c 	jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
t                 357 tools/bpf/bpftool/btf_dumper.c 	const struct btf_type *t = btf__type_by_id(d->btf, type_id);
t                 359 tools/bpf/bpftool/btf_dumper.c 	switch (BTF_INFO_KIND(t->info)) {
t                 361 tools/bpf/bpftool/btf_dumper.c 		return btf_dumper_int(t, bit_offset, data, d->jw,
t                 427 tools/bpf/bpftool/btf_dumper.c 	const struct btf_type *t;
t                 434 tools/bpf/bpftool/btf_dumper.c 	t = btf__type_by_id(btf, type_id);
t                 436 tools/bpf/bpftool/btf_dumper.c 	switch (BTF_INFO_KIND(t->info)) {
t                 439 tools/bpf/bpftool/btf_dumper.c 		BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
t                 443 tools/bpf/bpftool/btf_dumper.c 			      btf__name_by_offset(btf, t->name_off));
t                 447 tools/bpf/bpftool/btf_dumper.c 			      btf__name_by_offset(btf, t->name_off));
t                 451 tools/bpf/bpftool/btf_dumper.c 			      btf__name_by_offset(btf, t->name_off));
t                 454 tools/bpf/bpftool/btf_dumper.c 		array = (struct btf_array *)(t + 1);
t                 459 tools/bpf/bpftool/btf_dumper.c 		BTF_PRINT_TYPE(t->type);
t                 464 tools/bpf/bpftool/btf_dumper.c 			      BTF_INFO_KFLAG(t->info) ? "union" : "struct",
t                 465 tools/bpf/bpftool/btf_dumper.c 			      btf__name_by_offset(btf, t->name_off));
t                 469 tools/bpf/bpftool/btf_dumper.c 		BTF_PRINT_TYPE(t->type);
t                 473 tools/bpf/bpftool/btf_dumper.c 		BTF_PRINT_TYPE(t->type);
t                 477 tools/bpf/bpftool/btf_dumper.c 		BTF_PRINT_TYPE(t->type);
t                 480 tools/bpf/bpftool/btf_dumper.c 		pos = btf_dump_func(btf, func_sig, t, NULL, pos, size);
t                 485 tools/bpf/bpftool/btf_dumper.c 		proto_type = btf__type_by_id(btf, t->type);
t                 486 tools/bpf/bpftool/btf_dumper.c 		pos = btf_dump_func(btf, func_sig, proto_type, t, pos, size);
t                 491 tools/bpf/bpftool/btf_dumper.c 		var = (struct btf_var *)(t + 1);
t                 494 tools/bpf/bpftool/btf_dumper.c 		BTF_PRINT_TYPE(t->type);
t                 496 tools/bpf/bpftool/btf_dumper.c 			      btf__name_by_offset(btf, t->name_off));
t                 500 tools/bpf/bpftool/btf_dumper.c 			      btf__name_by_offset(btf, t->name_off));
t                 145 tools/firewire/decode-fcp.c decode_avc(struct link_transaction *t)
t                 148 tools/firewire/decode-fcp.c 	    (struct avc_frame *) t->request->packet.write_block.data;
t                 174 tools/firewire/decode-fcp.c decode_fcp(struct link_transaction *t)
t                 177 tools/firewire/decode-fcp.c 	    (struct avc_frame *) t->request->packet.write_block.data;
t                 179 tools/firewire/decode-fcp.c 	    ((unsigned long long) t->request->packet.common.offset_high << 32) |
t                 180 tools/firewire/decode-fcp.c 	    t->request->packet.common.offset_low;
t                 182 tools/firewire/decode-fcp.c 	if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST)
t                 188 tools/firewire/decode-fcp.c 			decode_avc(t);
t                 162 tools/firewire/nosy-dump.c 	struct link_transaction *t;
t                 164 tools/firewire/nosy-dump.c 	list_for_each_entry(t, &pending_transaction_list, link) {
t                 165 tools/firewire/nosy-dump.c 		if (t->request_node == request_node &&
t                 166 tools/firewire/nosy-dump.c 		    t->response_node == response_node &&
t                 167 tools/firewire/nosy-dump.c 		    t->tlabel == tlabel)
t                 168 tools/firewire/nosy-dump.c 			return t;
t                 171 tools/firewire/nosy-dump.c 	t = malloc(sizeof *t);
t                 172 tools/firewire/nosy-dump.c 	if (!t)
t                 174 tools/firewire/nosy-dump.c 	t->request_node = request_node;
t                 175 tools/firewire/nosy-dump.c 	t->response_node = response_node;
t                 176 tools/firewire/nosy-dump.c 	t->tlabel = tlabel;
t                 177 tools/firewire/nosy-dump.c 	list_init(&t->request_list);
t                 178 tools/firewire/nosy-dump.c 	list_init(&t->response_list);
t                 180 tools/firewire/nosy-dump.c 	list_append(&pending_transaction_list, &t->link);
t                 182 tools/firewire/nosy-dump.c 	return t;
t                 186 tools/firewire/nosy-dump.c link_transaction_destroy(struct link_transaction *t)
t                 190 tools/firewire/nosy-dump.c 	while (!list_empty(&t->request_list)) {
t                 191 tools/firewire/nosy-dump.c 		sa = list_head(&t->request_list, struct subaction, link);
t                 195 tools/firewire/nosy-dump.c 	while (!list_empty(&t->response_list)) {
t                 196 tools/firewire/nosy-dump.c 		sa = list_head(&t->response_list, struct subaction, link);
t                 200 tools/firewire/nosy-dump.c 	free(t);
t                 205 tools/firewire/nosy-dump.c 	int (*decode)(struct link_transaction *t);
t                 213 tools/firewire/nosy-dump.c handle_transaction(struct link_transaction *t)
t                 218 tools/firewire/nosy-dump.c 	if (!t->request) {
t                 224 tools/firewire/nosy-dump.c 		if (protocol_decoders[i].decode(t))
t                 230 tools/firewire/nosy-dump.c 	decode_link_packet(&t->request->packet, t->request->length,
t                 232 tools/firewire/nosy-dump.c 	if (t->response)
t                 233 tools/firewire/nosy-dump.c 		decode_link_packet(&t->response->packet, t->request->length,
t                 239 tools/firewire/nosy-dump.c 		list_for_each_entry(sa, &t->request_list, link)
t                 241 tools/firewire/nosy-dump.c 		list_for_each_entry(sa, &t->response_list, link)
t                 246 tools/firewire/nosy-dump.c 	link_transaction_destroy(t);
t                 252 tools/firewire/nosy-dump.c 	struct link_transaction *t;
t                 255 tools/firewire/nosy-dump.c 		t = list_head(&pending_transaction_list,
t                 257 tools/firewire/nosy-dump.c 		list_remove(&t->link);
t                 258 tools/firewire/nosy-dump.c 		link_transaction_destroy(t);
t                 486 tools/firewire/nosy-dump.c 	struct link_transaction *t;
t                 488 tools/firewire/nosy-dump.c 	t = link_transaction_lookup(p->common.source, p->common.destination,
t                 491 tools/firewire/nosy-dump.c 	t->request = sa;
t                 493 tools/firewire/nosy-dump.c 	if (!list_empty(&t->request_list)) {
t                 494 tools/firewire/nosy-dump.c 		prev = list_tail(&t->request_list,
t                 513 tools/firewire/nosy-dump.c 	list_append(&t->request_list, &sa->link);
t                 520 tools/firewire/nosy-dump.c 		list_remove(&t->link);
t                 521 tools/firewire/nosy-dump.c 		handle_transaction(t);
t                 527 tools/firewire/nosy-dump.c 		list_remove(&t->link);
t                 528 tools/firewire/nosy-dump.c 		handle_transaction(t);
t                 551 tools/firewire/nosy-dump.c 	struct link_transaction *t;
t                 553 tools/firewire/nosy-dump.c 	t = link_transaction_lookup(p->common.destination, p->common.source,
t                 555 tools/firewire/nosy-dump.c 	if (list_empty(&t->request_list)) {
t                 560 tools/firewire/nosy-dump.c 	t->response = sa;
t                 562 tools/firewire/nosy-dump.c 	if (!list_empty(&t->response_list)) {
t                 563 tools/firewire/nosy-dump.c 		prev = list_tail(&t->response_list, struct subaction, link);
t                 578 tools/firewire/nosy-dump.c 		prev = list_tail(&t->request_list, struct subaction, link);
t                 592 tools/firewire/nosy-dump.c 	list_append(&t->response_list, &sa->link);
t                 599 tools/firewire/nosy-dump.c 		list_remove(&t->link);
t                 600 tools/firewire/nosy-dump.c 		handle_transaction(t);
t                 172 tools/firewire/nosy-dump.h int decode_fcp(struct link_transaction *t);
t                1655 tools/include/nolibc/nolibc.h 		struct timeval *t;
t                1656 tools/include/nolibc/nolibc.h 	} arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
t                1659 tools/include/nolibc/nolibc.h 	struct timespec t;
t                1662 tools/include/nolibc/nolibc.h 		t.tv_sec  = timeout->tv_sec;
t                1663 tools/include/nolibc/nolibc.h 		t.tv_nsec = timeout->tv_usec * 1000;
t                1665 tools/include/nolibc/nolibc.h 	return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
t                  82 tools/lib/argv_split.c 			char *t;
t                  86 tools/lib/argv_split.c 			t = strndup(p, str-p);
t                  87 tools/lib/argv_split.c 			if (t == NULL)
t                  89 tools/lib/argv_split.c 			*argvp++ = t;
t                  44 tools/lib/bpf/btf.c static int btf_add_type(struct btf *btf, struct btf_type *t)
t                  67 tools/lib/bpf/btf.c 	btf->types[++(btf->nr_types)] = t;
t                 145 tools/lib/bpf/btf.c static int btf_type_size(struct btf_type *t)
t                 148 tools/lib/bpf/btf.c 	__u16 vlen = btf_vlen(t);
t                 150 tools/lib/bpf/btf.c 	switch (btf_kind(t)) {
t                 175 tools/lib/bpf/btf.c 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
t                 188 tools/lib/bpf/btf.c 		struct btf_type *t = next_type;
t                 192 tools/lib/bpf/btf.c 		type_size = btf_type_size(t);
t                 196 tools/lib/bpf/btf.c 		err = btf_add_type(btf, t);
t                 217 tools/lib/bpf/btf.c static bool btf_type_is_void(const struct btf_type *t)
t                 219 tools/lib/bpf/btf.c 	return t == &btf_void || btf_is_fwd(t);
t                 222 tools/lib/bpf/btf.c static bool btf_type_is_void_or_null(const struct btf_type *t)
t                 224 tools/lib/bpf/btf.c 	return !t || btf_type_is_void(t);
t                 232 tools/lib/bpf/btf.c 	const struct btf_type *t;
t                 237 tools/lib/bpf/btf.c 	t = btf__type_by_id(btf, type_id);
t                 238 tools/lib/bpf/btf.c 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
t                 240 tools/lib/bpf/btf.c 		switch (btf_kind(t)) {
t                 246 tools/lib/bpf/btf.c 			size = t->size;
t                 256 tools/lib/bpf/btf.c 			type_id = t->type;
t                 259 tools/lib/bpf/btf.c 			array = btf_array(t);
t                 269 tools/lib/bpf/btf.c 		t = btf__type_by_id(btf, type_id);
t                 283 tools/lib/bpf/btf.c 	const struct btf_type *t;
t                 286 tools/lib/bpf/btf.c 	t = btf__type_by_id(btf, type_id);
t                 288 tools/lib/bpf/btf.c 	       !btf_type_is_void_or_null(t) &&
t                 289 tools/lib/bpf/btf.c 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
t                 290 tools/lib/bpf/btf.c 		type_id = t->type;
t                 291 tools/lib/bpf/btf.c 		t = btf__type_by_id(btf, type_id);
t                 295 tools/lib/bpf/btf.c 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
t                 309 tools/lib/bpf/btf.c 		const struct btf_type *t = btf->types[i];
t                 310 tools/lib/bpf/btf.c 		const char *name = btf__name_by_offset(btf, t->name_off);
t                 506 tools/lib/bpf/btf.c 			     struct btf_type *t)
t                 508 tools/lib/bpf/btf.c 	__u32 size = 0, off = 0, i, vars = btf_vlen(t);
t                 509 tools/lib/bpf/btf.c 	const char *name = btf__name_by_offset(btf, t->name_off);
t                 521 tools/lib/bpf/btf.c 	if (ret || !size || (t->size && t->size != size)) {
t                 526 tools/lib/bpf/btf.c 	t->size = size;
t                 528 tools/lib/bpf/btf.c 	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
t                 556 tools/lib/bpf/btf.c 	qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off);
t                 566 tools/lib/bpf/btf.c 		struct btf_type *t = btf->types[i];
t                 573 tools/lib/bpf/btf.c 		if (btf_is_datasec(t)) {
t                 574 tools/lib/bpf/btf.c 			err = btf_fixup_datasec(obj, btf, t);
t                1418 tools/lib/bpf/btf.c 		struct btf_type *t = d->btf->types[i];
t                1421 tools/lib/bpf/btf.c 		if (btf_is_var(t) || btf_is_datasec(t))
t                1454 tools/lib/bpf/btf.c 	struct btf_type *t;
t                1457 tools/lib/bpf/btf.c 		t = d->btf->types[i];
t                1458 tools/lib/bpf/btf.c 		r = fn(&t->name_off, ctx);
t                1462 tools/lib/bpf/btf.c 		switch (btf_kind(t)) {
t                1465 tools/lib/bpf/btf.c 			struct btf_member *m = btf_members(t);
t                1466 tools/lib/bpf/btf.c 			__u16 vlen = btf_vlen(t);
t                1477 tools/lib/bpf/btf.c 			struct btf_enum *m = btf_enum(t);
t                1478 tools/lib/bpf/btf.c 			__u16 vlen = btf_vlen(t);
t                1489 tools/lib/bpf/btf.c 			struct btf_param *m = btf_params(t);
t                1490 tools/lib/bpf/btf.c 			__u16 vlen = btf_vlen(t);
t                1724 tools/lib/bpf/btf.c static long btf_hash_common(struct btf_type *t)
t                1728 tools/lib/bpf/btf.c 	h = hash_combine(0, t->name_off);
t                1729 tools/lib/bpf/btf.c 	h = hash_combine(h, t->info);
t                1730 tools/lib/bpf/btf.c 	h = hash_combine(h, t->size);
t                1742 tools/lib/bpf/btf.c static long btf_hash_int(struct btf_type *t)
t                1744 tools/lib/bpf/btf.c 	__u32 info = *(__u32 *)(t + 1);
t                1747 tools/lib/bpf/btf.c 	h = btf_hash_common(t);
t                1765 tools/lib/bpf/btf.c static long btf_hash_enum(struct btf_type *t)
t                1770 tools/lib/bpf/btf.c 	h = hash_combine(0, t->name_off);
t                1771 tools/lib/bpf/btf.c 	h = hash_combine(h, t->info & ~0xffff);
t                1772 tools/lib/bpf/btf.c 	h = hash_combine(h, t->size);
t                1798 tools/lib/bpf/btf.c static inline bool btf_is_enum_fwd(struct btf_type *t)
t                1800 tools/lib/bpf/btf.c 	return btf_is_enum(t) && btf_vlen(t) == 0;
t                1818 tools/lib/bpf/btf.c static long btf_hash_struct(struct btf_type *t)
t                1820 tools/lib/bpf/btf.c 	const struct btf_member *member = btf_members(t);
t                1821 tools/lib/bpf/btf.c 	__u32 vlen = btf_vlen(t);
t                1822 tools/lib/bpf/btf.c 	long h = btf_hash_common(t);
t                1865 tools/lib/bpf/btf.c static long btf_hash_array(struct btf_type *t)
t                1867 tools/lib/bpf/btf.c 	const struct btf_array *info = btf_array(t);
t                1868 tools/lib/bpf/btf.c 	long h = btf_hash_common(t);
t                1915 tools/lib/bpf/btf.c static long btf_hash_fnproto(struct btf_type *t)
t                1917 tools/lib/bpf/btf.c 	const struct btf_param *member = btf_params(t);
t                1918 tools/lib/bpf/btf.c 	__u16 vlen = btf_vlen(t);
t                1919 tools/lib/bpf/btf.c 	long h = btf_hash_common(t);
t                1993 tools/lib/bpf/btf.c 	struct btf_type *t = d->btf->types[type_id];
t                2001 tools/lib/bpf/btf.c 	switch (btf_kind(t)) {
t                2017 tools/lib/bpf/btf.c 		h = btf_hash_int(t);
t                2021 tools/lib/bpf/btf.c 			if (btf_equal_int(t, cand)) {
t                2029 tools/lib/bpf/btf.c 		h = btf_hash_enum(t);
t                2033 tools/lib/bpf/btf.c 			if (btf_equal_enum(t, cand)) {
t                2039 tools/lib/bpf/btf.c 			if (btf_compat_enum(t, cand)) {
t                2040 tools/lib/bpf/btf.c 				if (btf_is_enum_fwd(t)) {
t                2052 tools/lib/bpf/btf.c 		h = btf_hash_common(t);
t                2056 tools/lib/bpf/btf.c 			if (btf_equal_common(t, cand)) {
t                2127 tools/lib/bpf/btf.c static inline __u16 btf_fwd_kind(struct btf_type *t)
t                2129 tools/lib/bpf/btf.c 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
t                2458 tools/lib/bpf/btf.c 	struct btf_type *cand_type, *t;
t                2469 tools/lib/bpf/btf.c 	t = d->btf->types[type_id];
t                2470 tools/lib/bpf/btf.c 	kind = btf_kind(t);
t                2475 tools/lib/bpf/btf.c 	h = btf_hash_struct(t);
t                2491 tools/lib/bpf/btf.c 		if (!btf_shallow_equal_struct(t, cand_type))
t                2552 tools/lib/bpf/btf.c 	struct btf_type *t, *cand;
t                2562 tools/lib/bpf/btf.c 	t = d->btf->types[type_id];
t                2565 tools/lib/bpf/btf.c 	switch (btf_kind(t)) {
t                2572 tools/lib/bpf/btf.c 		ref_type_id = btf_dedup_ref_type(d, t->type);
t                2575 tools/lib/bpf/btf.c 		t->type = ref_type_id;
t                2577 tools/lib/bpf/btf.c 		h = btf_hash_common(t);
t                2581 tools/lib/bpf/btf.c 			if (btf_equal_common(t, cand)) {
t                2589 tools/lib/bpf/btf.c 		struct btf_array *info = btf_array(t);
t                2601 tools/lib/bpf/btf.c 		h = btf_hash_array(t);
t                2605 tools/lib/bpf/btf.c 			if (btf_equal_array(t, cand)) {
t                2618 tools/lib/bpf/btf.c 		ref_type_id = btf_dedup_ref_type(d, t->type);
t                2621 tools/lib/bpf/btf.c 		t->type = ref_type_id;
t                2623 tools/lib/bpf/btf.c 		vlen = btf_vlen(t);
t                2624 tools/lib/bpf/btf.c 		param = btf_params(t);
t                2633 tools/lib/bpf/btf.c 		h = btf_hash_fnproto(t);
t                2637 tools/lib/bpf/btf.c 			if (btf_equal_fnproto(t, cand)) {
t                2761 tools/lib/bpf/btf.c 	struct btf_type *t = d->btf->types[type_id];
t                2764 tools/lib/bpf/btf.c 	switch (btf_kind(t)) {
t                2777 tools/lib/bpf/btf.c 		r = btf_dedup_remap_type_id(d, t->type);
t                2780 tools/lib/bpf/btf.c 		t->type = r;
t                2784 tools/lib/bpf/btf.c 		struct btf_array *arr_info = btf_array(t);
t                2799 tools/lib/bpf/btf.c 		struct btf_member *member = btf_members(t);
t                2800 tools/lib/bpf/btf.c 		__u16 vlen = btf_vlen(t);
t                2813 tools/lib/bpf/btf.c 		struct btf_param *param = btf_params(t);
t                2814 tools/lib/bpf/btf.c 		__u16 vlen = btf_vlen(t);
t                2816 tools/lib/bpf/btf.c 		r = btf_dedup_remap_type_id(d, t->type);
t                2819 tools/lib/bpf/btf.c 		t->type = r;
t                2832 tools/lib/bpf/btf.c 		struct btf_var_secinfo *var = btf_var_secinfos(t);
t                2833 tools/lib/bpf/btf.c 		__u16 vlen = btf_vlen(t);
t                 131 tools/lib/bpf/btf.h static inline __u16 btf_kind(const struct btf_type *t)
t                 133 tools/lib/bpf/btf.h 	return BTF_INFO_KIND(t->info);
t                 136 tools/lib/bpf/btf.h static inline __u16 btf_vlen(const struct btf_type *t)
t                 138 tools/lib/bpf/btf.h 	return BTF_INFO_VLEN(t->info);
t                 141 tools/lib/bpf/btf.h static inline bool btf_kflag(const struct btf_type *t)
t                 143 tools/lib/bpf/btf.h 	return BTF_INFO_KFLAG(t->info);
t                 146 tools/lib/bpf/btf.h static inline bool btf_is_int(const struct btf_type *t)
t                 148 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_INT;
t                 151 tools/lib/bpf/btf.h static inline bool btf_is_ptr(const struct btf_type *t)
t                 153 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_PTR;
t                 156 tools/lib/bpf/btf.h static inline bool btf_is_array(const struct btf_type *t)
t                 158 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_ARRAY;
t                 161 tools/lib/bpf/btf.h static inline bool btf_is_struct(const struct btf_type *t)
t                 163 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_STRUCT;
t                 166 tools/lib/bpf/btf.h static inline bool btf_is_union(const struct btf_type *t)
t                 168 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_UNION;
t                 171 tools/lib/bpf/btf.h static inline bool btf_is_composite(const struct btf_type *t)
t                 173 tools/lib/bpf/btf.h 	__u16 kind = btf_kind(t);
t                 178 tools/lib/bpf/btf.h static inline bool btf_is_enum(const struct btf_type *t)
t                 180 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_ENUM;
t                 183 tools/lib/bpf/btf.h static inline bool btf_is_fwd(const struct btf_type *t)
t                 185 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_FWD;
t                 188 tools/lib/bpf/btf.h static inline bool btf_is_typedef(const struct btf_type *t)
t                 190 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_TYPEDEF;
t                 193 tools/lib/bpf/btf.h static inline bool btf_is_volatile(const struct btf_type *t)
t                 195 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_VOLATILE;
t                 198 tools/lib/bpf/btf.h static inline bool btf_is_const(const struct btf_type *t)
t                 200 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_CONST;
t                 203 tools/lib/bpf/btf.h static inline bool btf_is_restrict(const struct btf_type *t)
t                 205 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_RESTRICT;
t                 208 tools/lib/bpf/btf.h static inline bool btf_is_mod(const struct btf_type *t)
t                 210 tools/lib/bpf/btf.h 	__u16 kind = btf_kind(t);
t                 217 tools/lib/bpf/btf.h static inline bool btf_is_func(const struct btf_type *t)
t                 219 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_FUNC;
t                 222 tools/lib/bpf/btf.h static inline bool btf_is_func_proto(const struct btf_type *t)
t                 224 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_FUNC_PROTO;
t                 227 tools/lib/bpf/btf.h static inline bool btf_is_var(const struct btf_type *t)
t                 229 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_VAR;
t                 232 tools/lib/bpf/btf.h static inline bool btf_is_datasec(const struct btf_type *t)
t                 234 tools/lib/bpf/btf.h 	return btf_kind(t) == BTF_KIND_DATASEC;
t                 237 tools/lib/bpf/btf.h static inline __u8 btf_int_encoding(const struct btf_type *t)
t                 239 tools/lib/bpf/btf.h 	return BTF_INT_ENCODING(*(__u32 *)(t + 1));
t                 242 tools/lib/bpf/btf.h static inline __u8 btf_int_offset(const struct btf_type *t)
t                 244 tools/lib/bpf/btf.h 	return BTF_INT_OFFSET(*(__u32 *)(t + 1));
t                 247 tools/lib/bpf/btf.h static inline __u8 btf_int_bits(const struct btf_type *t)
t                 249 tools/lib/bpf/btf.h 	return BTF_INT_BITS(*(__u32 *)(t + 1));
t                 252 tools/lib/bpf/btf.h static inline struct btf_array *btf_array(const struct btf_type *t)
t                 254 tools/lib/bpf/btf.h 	return (struct btf_array *)(t + 1);
t                 257 tools/lib/bpf/btf.h static inline struct btf_enum *btf_enum(const struct btf_type *t)
t                 259 tools/lib/bpf/btf.h 	return (struct btf_enum *)(t + 1);
t                 262 tools/lib/bpf/btf.h static inline struct btf_member *btf_members(const struct btf_type *t)
t                 264 tools/lib/bpf/btf.h 	return (struct btf_member *)(t + 1);
t                 268 tools/lib/bpf/btf.h static inline __u32 btf_member_bit_offset(const struct btf_type *t,
t                 271 tools/lib/bpf/btf.h 	const struct btf_member *m = btf_members(t) + member_idx;
t                 272 tools/lib/bpf/btf.h 	bool kflag = btf_kflag(t);
t                 280 tools/lib/bpf/btf.h static inline __u32 btf_member_bitfield_size(const struct btf_type *t,
t                 283 tools/lib/bpf/btf.h 	const struct btf_member *m = btf_members(t) + member_idx;
t                 284 tools/lib/bpf/btf.h 	bool kflag = btf_kflag(t);
t                 289 tools/lib/bpf/btf.h static inline struct btf_param *btf_params(const struct btf_type *t)
t                 291 tools/lib/bpf/btf.h 	return (struct btf_param *)(t + 1);
t                 294 tools/lib/bpf/btf.h static inline struct btf_var *btf_var(const struct btf_type *t)
t                 296 tools/lib/bpf/btf.h 	return (struct btf_var *)(t + 1);
t                 300 tools/lib/bpf/btf.h btf_var_secinfos(const struct btf_type *t)
t                 302 tools/lib/bpf/btf.h 	return (struct btf_var_secinfo *)(t + 1);
t                 252 tools/lib/bpf/btf_dump.c 	const struct btf_type *t;
t                 256 tools/lib/bpf/btf_dump.c 		t = btf__type_by_id(d->btf, i);
t                 257 tools/lib/bpf/btf_dump.c 		vlen = btf_vlen(t);
t                 259 tools/lib/bpf/btf_dump.c 		switch (btf_kind(t)) {
t                 272 tools/lib/bpf/btf_dump.c 			d->type_states[t->type].referenced = 1;
t                 276 tools/lib/bpf/btf_dump.c 			const struct btf_array *a = btf_array(t);
t                 284 tools/lib/bpf/btf_dump.c 			const struct btf_member *m = btf_members(t);
t                 291 tools/lib/bpf/btf_dump.c 			const struct btf_param *p = btf_params(t);
t                 298 tools/lib/bpf/btf_dump.c 			const struct btf_var_secinfo *v = btf_var_secinfos(t);
t                 417 tools/lib/bpf/btf_dump.c 	const struct btf_type *t;
t                 425 tools/lib/bpf/btf_dump.c 	t = btf__type_by_id(d->btf, id);
t                 429 tools/lib/bpf/btf_dump.c 		if (btf_is_composite(t) && through_ptr && t->name_off != 0)
t                 435 tools/lib/bpf/btf_dump.c 	switch (btf_kind(t)) {
t                 441 tools/lib/bpf/btf_dump.c 		err = btf_dump_order_type(d, t->type, true);
t                 446 tools/lib/bpf/btf_dump.c 		return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
t                 450 tools/lib/bpf/btf_dump.c 		const struct btf_member *m = btf_members(t);
t                 456 tools/lib/bpf/btf_dump.c 		if (through_ptr && t->name_off != 0)
t                 461 tools/lib/bpf/btf_dump.c 		vlen = btf_vlen(t);
t                 468 tools/lib/bpf/btf_dump.c 		if (t->name_off != 0) {
t                 484 tools/lib/bpf/btf_dump.c 		if (t->name_off != 0 || !tstate->referenced) {
t                 495 tools/lib/bpf/btf_dump.c 		is_strong = btf_dump_order_type(d, t->type, through_ptr);
t                 514 tools/lib/bpf/btf_dump.c 		return btf_dump_order_type(d, t->type, through_ptr);
t                 517 tools/lib/bpf/btf_dump.c 		const struct btf_param *p = btf_params(t);
t                 520 tools/lib/bpf/btf_dump.c 		err = btf_dump_order_type(d, t->type, through_ptr);
t                 525 tools/lib/bpf/btf_dump.c 		vlen = btf_vlen(t);
t                 547 tools/lib/bpf/btf_dump.c 				     const struct btf_type *t);
t                 549 tools/lib/bpf/btf_dump.c 				     const struct btf_type *t, int lvl);
t                 552 tools/lib/bpf/btf_dump.c 				   const struct btf_type *t);
t                 554 tools/lib/bpf/btf_dump.c 				   const struct btf_type *t, int lvl);
t                 557 tools/lib/bpf/btf_dump.c 				  const struct btf_type *t);
t                 560 tools/lib/bpf/btf_dump.c 				      const struct btf_type *t, int lvl);
t                 581 tools/lib/bpf/btf_dump.c 	const struct btf_type *t = btf__type_by_id(d->btf, id);
t                 589 tools/lib/bpf/btf_dump.c 	if (t->name_off == 0)
t                 591 tools/lib/bpf/btf_dump.c 	return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
t                 616 tools/lib/bpf/btf_dump.c 	const struct btf_type *t;
t                 622 tools/lib/bpf/btf_dump.c 	t = btf__type_by_id(d->btf, id);
t                 623 tools/lib/bpf/btf_dump.c 	kind = btf_kind(t);
t                 638 tools/lib/bpf/btf_dump.c 			if (t->name_off == 0) {
t                 643 tools/lib/bpf/btf_dump.c 			btf_dump_emit_struct_fwd(d, id, t);
t                 654 tools/lib/bpf/btf_dump.c 				btf_dump_emit_typedef_def(d, id, t, 0);
t                 672 tools/lib/bpf/btf_dump.c 			btf_dump_emit_enum_def(d, id, t, 0);
t                 681 tools/lib/bpf/btf_dump.c 		btf_dump_emit_type(d, t->type, cont_id);
t                 684 tools/lib/bpf/btf_dump.c 		btf_dump_emit_type(d, btf_array(t)->type, cont_id);
t                 687 tools/lib/bpf/btf_dump.c 		btf_dump_emit_fwd_def(d, id, t);
t                 693 tools/lib/bpf/btf_dump.c 		btf_dump_emit_type(d, t->type, id);
t                 702 tools/lib/bpf/btf_dump.c 			btf_dump_emit_typedef_def(d, id, t, 0);
t                 717 tools/lib/bpf/btf_dump.c 		if (top_level_def || t->name_off == 0) {
t                 718 tools/lib/bpf/btf_dump.c 			const struct btf_member *m = btf_members(t);
t                 719 tools/lib/bpf/btf_dump.c 			__u16 vlen = btf_vlen(t);
t                 722 tools/lib/bpf/btf_dump.c 			new_cont_id = t->name_off == 0 ? cont_id : id;
t                 726 tools/lib/bpf/btf_dump.c 			btf_dump_emit_struct_fwd(d, id, t);
t                 732 tools/lib/bpf/btf_dump.c 			btf_dump_emit_struct_def(d, id, t, 0);
t                 740 tools/lib/bpf/btf_dump.c 		const struct btf_param *p = btf_params(t);
t                 741 tools/lib/bpf/btf_dump.c 		__u16 vlen = btf_vlen(t);
t                 744 tools/lib/bpf/btf_dump.c 		btf_dump_emit_type(d, t->type, cont_id);
t                 757 tools/lib/bpf/btf_dump.c 	const struct btf_type *t = btf__type_by_id(btf, id);
t                 758 tools/lib/bpf/btf_dump.c 	__u16 kind = btf_kind(t);
t                 763 tools/lib/bpf/btf_dump.c 		return min(sizeof(void *), t->size);
t                 770 tools/lib/bpf/btf_dump.c 		return btf_align_of(btf, t->type);
t                 772 tools/lib/bpf/btf_dump.c 		return btf_align_of(btf, btf_array(t)->type);
t                 775 tools/lib/bpf/btf_dump.c 		const struct btf_member *m = btf_members(t);
t                 776 tools/lib/bpf/btf_dump.c 		__u16 vlen = btf_vlen(t);
t                 785 tools/lib/bpf/btf_dump.c 		pr_warning("unsupported BTF_KIND:%u\n", btf_kind(t));
t                 791 tools/lib/bpf/btf_dump.c 				 const struct btf_type *t)
t                 799 tools/lib/bpf/btf_dump.c 	if (t->size % align)
t                 802 tools/lib/bpf/btf_dump.c 	m = btf_members(t);
t                 803 tools/lib/bpf/btf_dump.c 	vlen = btf_vlen(t);
t                 807 tools/lib/bpf/btf_dump.c 		bit_sz = btf_member_bitfield_size(t, i);
t                 861 tools/lib/bpf/btf_dump.c 				     const struct btf_type *t)
t                 864 tools/lib/bpf/btf_dump.c 			btf_is_struct(t) ? "struct" : "union",
t                 870 tools/lib/bpf/btf_dump.c 				     const struct btf_type *t,
t                 873 tools/lib/bpf/btf_dump.c 	const struct btf_member *m = btf_members(t);
t                 874 tools/lib/bpf/btf_dump.c 	bool is_struct = btf_is_struct(t);
t                 876 tools/lib/bpf/btf_dump.c 	__u16 vlen = btf_vlen(t);
t                 878 tools/lib/bpf/btf_dump.c 	packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
t                 882 tools/lib/bpf/btf_dump.c 			t->name_off ? " " : "",
t                 890 tools/lib/bpf/btf_dump.c 		m_sz = btf_member_bitfield_size(t, i);
t                 891 tools/lib/bpf/btf_dump.c 		m_off = btf_member_bit_offset(t, i);
t                 911 tools/lib/bpf/btf_dump.c 		btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
t                 923 tools/lib/bpf/btf_dump.c 				   const struct btf_type *t)
t                 929 tools/lib/bpf/btf_dump.c 				   const struct btf_type *t,
t                 932 tools/lib/bpf/btf_dump.c 	const struct btf_enum *v = btf_enum(t);
t                 933 tools/lib/bpf/btf_dump.c 	__u16 vlen = btf_vlen(t);
t                 939 tools/lib/bpf/btf_dump.c 			t->name_off ? " " : "",
t                 963 tools/lib/bpf/btf_dump.c 				  const struct btf_type *t)
t                 967 tools/lib/bpf/btf_dump.c 	if (btf_kflag(t))
t                 974 tools/lib/bpf/btf_dump.c 				     const struct btf_type *t, int lvl)
t                 979 tools/lib/bpf/btf_dump.c 	btf_dump_emit_type_decl(d, t->type, name, lvl);
t                1047 tools/lib/bpf/btf_dump.c 	const struct btf_type *t;
t                1068 tools/lib/bpf/btf_dump.c 		t = btf__type_by_id(d->btf, id);
t                1069 tools/lib/bpf/btf_dump.c 		switch (btf_kind(t)) {
t                1075 tools/lib/bpf/btf_dump.c 			id = t->type;
t                1078 tools/lib/bpf/btf_dump.c 			id = btf_array(t)->type;
t                1089 tools/lib/bpf/btf_dump.c 				   btf_kind(t), id);
t                1120 tools/lib/bpf/btf_dump.c 	const struct btf_type *t;
t                1125 tools/lib/bpf/btf_dump.c 		t = btf__type_by_id(d->btf, id);
t                1127 tools/lib/bpf/btf_dump.c 		switch (btf_kind(t)) {
t                1166 tools/lib/bpf/btf_dump.c 	const struct btf_type *t;
t                1181 tools/lib/bpf/btf_dump.c 		t = btf__type_by_id(d->btf, id);
t                1182 tools/lib/bpf/btf_dump.c 		kind = btf_kind(t);
t                1187 tools/lib/bpf/btf_dump.c 			name = btf_name_of(d, t->name_off);
t                1194 tools/lib/bpf/btf_dump.c 			if (t->name_off == 0)
t                1195 tools/lib/bpf/btf_dump.c 				btf_dump_emit_struct_def(d, id, t, lvl);
t                1197 tools/lib/bpf/btf_dump.c 				btf_dump_emit_struct_fwd(d, id, t);
t                1202 tools/lib/bpf/btf_dump.c 			if (t->name_off == 0)
t                1203 tools/lib/bpf/btf_dump.c 				btf_dump_emit_enum_def(d, id, t, lvl);
t                1205 tools/lib/bpf/btf_dump.c 				btf_dump_emit_enum_fwd(d, id, t);
t                1209 tools/lib/bpf/btf_dump.c 			btf_dump_emit_fwd_def(d, id, t);
t                1228 tools/lib/bpf/btf_dump.c 			const struct btf_array *a = btf_array(t);
t                1273 tools/lib/bpf/btf_dump.c 			const struct btf_param *p = btf_params(t);
t                1274 tools/lib/bpf/btf_dump.c 			__u16 vlen = btf_vlen(t);
t                1343 tools/lib/bpf/btf_dump.c 	const struct btf_type *t = btf__type_by_id(d->btf, id);
t                1344 tools/lib/bpf/btf_dump.c 	const char *orig_name = btf_name_of(d, t->name_off);
t                1348 tools/lib/bpf/btf_dump.c 	if (t->name_off == 0)
t                1021 tools/lib/bpf/libbpf.c 	const struct btf_type *t = btf__type_by_id(btf, id);
t                1026 tools/lib/bpf/libbpf.c 	while (btf_is_mod(t) || btf_is_typedef(t)) {
t                1028 tools/lib/bpf/libbpf.c 			*res_id = t->type;
t                1029 tools/lib/bpf/libbpf.c 		t = btf__type_by_id(btf, t->type);
t                1032 tools/lib/bpf/libbpf.c 	return t;
t                1045 tools/lib/bpf/libbpf.c 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
t                1050 tools/lib/bpf/libbpf.c 	if (!btf_is_ptr(t)) {
t                1052 tools/lib/bpf/libbpf.c 			   map_name, name, btf_kind(t));
t                1056 tools/lib/bpf/libbpf.c 	arr_t = btf__type_by_id(btf, t->type);
t                1059 tools/lib/bpf/libbpf.c 			   map_name, name, t->type);
t                1077 tools/lib/bpf/libbpf.c 	const struct btf_type *var, *def, *t;
t                1182 tools/lib/bpf/libbpf.c 			t = btf__type_by_id(obj->btf, m->type);
t                1183 tools/lib/bpf/libbpf.c 			if (!t) {
t                1188 tools/lib/bpf/libbpf.c 			if (!btf_is_ptr(t)) {
t                1190 tools/lib/bpf/libbpf.c 					   map_name, btf_kind(t));
t                1193 tools/lib/bpf/libbpf.c 			sz = btf__resolve_size(obj->btf, t->type);
t                1196 tools/lib/bpf/libbpf.c 					   map_name, t->type, sz);
t                1200 tools/lib/bpf/libbpf.c 				 map_name, t->type, sz);
t                1207 tools/lib/bpf/libbpf.c 			map->btf_key_type_id = t->type;
t                1225 tools/lib/bpf/libbpf.c 			t = btf__type_by_id(obj->btf, m->type);
t                1226 tools/lib/bpf/libbpf.c 			if (!t) {
t                1231 tools/lib/bpf/libbpf.c 			if (!btf_is_ptr(t)) {
t                1233 tools/lib/bpf/libbpf.c 					   map_name, btf_kind(t));
t                1236 tools/lib/bpf/libbpf.c 			sz = btf__resolve_size(obj->btf, t->type);
t                1239 tools/lib/bpf/libbpf.c 					   map_name, t->type, sz);
t                1243 tools/lib/bpf/libbpf.c 				 map_name, t->type, sz);
t                1250 tools/lib/bpf/libbpf.c 			map->btf_value_type_id = t->type;
t                1274 tools/lib/bpf/libbpf.c 	const struct btf_type *t;
t                1293 tools/lib/bpf/libbpf.c 		t = btf__type_by_id(obj->btf, i);
t                1294 tools/lib/bpf/libbpf.c 		if (!btf_is_datasec(t))
t                1296 tools/lib/bpf/libbpf.c 		name = btf__name_by_offset(obj->btf, t->name_off);
t                1298 tools/lib/bpf/libbpf.c 			sec = t;
t                1367 tools/lib/bpf/libbpf.c 	struct btf_type *t;
t                1374 tools/lib/bpf/libbpf.c 		t = (struct btf_type *)btf__type_by_id(btf, i);
t                1376 tools/lib/bpf/libbpf.c 		if (!has_datasec && btf_is_var(t)) {
t                1378 tools/lib/bpf/libbpf.c 			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
t                1384 tools/lib/bpf/libbpf.c 			t->size = 1;
t                1385 tools/lib/bpf/libbpf.c 			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
t                1386 tools/lib/bpf/libbpf.c 		} else if (!has_datasec && btf_is_datasec(t)) {
t                1388 tools/lib/bpf/libbpf.c 			const struct btf_var_secinfo *v = btf_var_secinfos(t);
t                1389 tools/lib/bpf/libbpf.c 			struct btf_member *m = btf_members(t);
t                1393 tools/lib/bpf/libbpf.c 			name = (char *)btf__name_by_offset(btf, t->name_off);
t                1400 tools/lib/bpf/libbpf.c 			vlen = btf_vlen(t);
t                1401 tools/lib/bpf/libbpf.c 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
t                1410 tools/lib/bpf/libbpf.c 		} else if (!has_func && btf_is_func_proto(t)) {
t                1412 tools/lib/bpf/libbpf.c 			vlen = btf_vlen(t);
t                1413 tools/lib/bpf/libbpf.c 			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
t                1414 tools/lib/bpf/libbpf.c 			t->size = sizeof(__u32); /* kernel enforced */
t                1415 tools/lib/bpf/libbpf.c 		} else if (!has_func && btf_is_func(t)) {
t                1417 tools/lib/bpf/libbpf.c 			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
t                2370 tools/lib/bpf/libbpf.c 	const struct btf_type *t;
t                2397 tools/lib/bpf/libbpf.c 	t = skip_mods_and_typedefs(btf, type_id, &id);
t                2398 tools/lib/bpf/libbpf.c 	if (!t)
t                2412 tools/lib/bpf/libbpf.c 		t = skip_mods_and_typedefs(btf, id, &id);
t                2413 tools/lib/bpf/libbpf.c 		if (!t)
t                2418 tools/lib/bpf/libbpf.c 		if (btf_is_composite(t)) {
t                2422 tools/lib/bpf/libbpf.c 			if (access_idx >= btf_vlen(t))
t                2424 tools/lib/bpf/libbpf.c 			if (btf_member_bitfield_size(t, access_idx))
t                2427 tools/lib/bpf/libbpf.c 			offset = btf_member_bit_offset(t, access_idx);
t                2432 tools/lib/bpf/libbpf.c 			m = btf_members(t) + access_idx;
t                2445 tools/lib/bpf/libbpf.c 		} else if (btf_is_array(t)) {
t                2446 tools/lib/bpf/libbpf.c 			const struct btf_array *a = btf_array(t);
t                2448 tools/lib/bpf/libbpf.c 			t = skip_mods_and_typedefs(btf, a->type, &id);
t                2449 tools/lib/bpf/libbpf.c 			if (!t || access_idx >= a->nelems)
t                2462 tools/lib/bpf/libbpf.c 				   type_id, spec_str, i, id, btf_kind(t));
t                2512 tools/lib/bpf/libbpf.c 	const struct btf_type *t;
t                2517 tools/lib/bpf/libbpf.c 	t = btf__type_by_id(local_btf, local_type_id);
t                2518 tools/lib/bpf/libbpf.c 	if (!t)
t                2521 tools/lib/bpf/libbpf.c 	local_name = btf__name_by_offset(local_btf, t->name_off);
t                2532 tools/lib/bpf/libbpf.c 		t = btf__type_by_id(targ_btf, i);
t                2533 tools/lib/bpf/libbpf.c 		targ_name = btf__name_by_offset(targ_btf, t->name_off);
t                2916 tools/lib/bpf/libbpf.c 	const struct btf_type *t;
t                2922 tools/lib/bpf/libbpf.c 	t = btf__type_by_id(spec->btf, type_id);
t                2923 tools/lib/bpf/libbpf.c 	s = btf__name_by_offset(spec->btf, t->name_off);
t                 311 tools/lib/bpf/netlink.c 	struct tcmsg *t = NLMSG_DATA(nlh);
t                 314 tools/lib/bpf/netlink.c 	len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
t                 315 tools/lib/bpf/netlink.c 	attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
t                 319 tools/lib/bpf/netlink.c 	return dump_class_nlmsg(cookie, t, tb);
t                 327 tools/lib/bpf/netlink.c 		struct tcmsg t;
t                 332 tools/lib/bpf/netlink.c 		.t.tcm_family = AF_UNSPEC,
t                 333 tools/lib/bpf/netlink.c 		.t.tcm_ifindex = ifindex,
t                 350 tools/lib/bpf/netlink.c 	struct tcmsg *t = NLMSG_DATA(nlh);
t                 353 tools/lib/bpf/netlink.c 	len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
t                 354 tools/lib/bpf/netlink.c 	attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
t                 358 tools/lib/bpf/netlink.c 	return dump_qdisc_nlmsg(cookie, t, tb);
t                 366 tools/lib/bpf/netlink.c 		struct tcmsg t;
t                 371 tools/lib/bpf/netlink.c 		.t.tcm_family = AF_UNSPEC,
t                 372 tools/lib/bpf/netlink.c 		.t.tcm_ifindex = ifindex,
t                 389 tools/lib/bpf/netlink.c 	struct tcmsg *t = NLMSG_DATA(nlh);
t                 392 tools/lib/bpf/netlink.c 	len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
t                 393 tools/lib/bpf/netlink.c 	attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
t                 397 tools/lib/bpf/netlink.c 	return dump_filter_nlmsg(cookie, t, tb);
t                 405 tools/lib/bpf/netlink.c 		struct tcmsg t;
t                 410 tools/lib/bpf/netlink.c 		.t.tcm_family = AF_UNSPEC,
t                 411 tools/lib/bpf/netlink.c 		.t.tcm_ifindex = ifindex,
t                 412 tools/lib/bpf/netlink.c 		.t.tcm_parent = handle,
t                  26 tools/lib/lockdep/tests/ABBA_2threads.c 	pthread_t t;
t                  30 tools/lib/lockdep/tests/ABBA_2threads.c 	if (pthread_create(&t, NULL, ba_lock, NULL)) {
t                  44 tools/lib/lockdep/tests/ABBA_2threads.c 	pthread_join(t, NULL);
t                 390 tools/perf/bench/epoll-ctl.c 		unsigned long t[EPOLL_NR_OPS];
t                 393 tools/perf/bench/epoll-ctl.c 			t[j] = worker[i].ops[j];
t                 394 tools/perf/bench/epoll-ctl.c 			update_stats(&all_stats[j], t[j]);
t                 400 tools/perf/bench/epoll-ctl.c 			       t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]);
t                 405 tools/perf/bench/epoll-ctl.c 			       t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]);
t                 522 tools/perf/bench/epoll-wait.c 		unsigned long t = worker[i].ops/runtime.tv_sec;
t                 524 tools/perf/bench/epoll-wait.c 		update_stats(&throughput_stats, t);
t                 528 tools/perf/bench/epoll-wait.c 			       worker[i].tid, &worker[i].fdmap[0], t);
t                 532 tools/perf/bench/epoll-wait.c 			       &worker[i].fdmap[nfds-1], t);
t                 207 tools/perf/bench/futex-hash.c 		unsigned long t = worker[i].ops/runtime.tv_sec;
t                 208 tools/perf/bench/futex-hash.c 		update_stats(&throughput_stats, t);
t                 212 tools/perf/bench/futex-hash.c 				       worker[i].tid, &worker[i].futex[0], t);
t                 216 tools/perf/bench/futex-hash.c 				       &worker[i].futex[nfutexes-1], t);
t                 214 tools/perf/bench/futex-lock-pi.c 		unsigned long t = worker[i].ops/runtime.tv_sec;
t                 216 tools/perf/bench/futex-lock-pi.c 		update_stats(&throughput_stats, t);
t                 219 tools/perf/bench/futex-lock-pi.c 			       worker[i].tid, worker[i].futex, t);
t                 495 tools/perf/bench/numa.c 	int t;
t                 503 tools/perf/bench/numa.c 	t = 0;
t                 576 tools/perf/bench/numa.c 				if (t >= g->p.nr_tasks) {
t                 580 tools/perf/bench/numa.c 				td = g->threads + t;
t                 582 tools/perf/bench/numa.c 				if (t)
t                 595 tools/perf/bench/numa.c 				t++;
t                 603 tools/perf/bench/numa.c 	if (t < g->p.nr_tasks)
t                 604 tools/perf/bench/numa.c 		printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
t                 632 tools/perf/bench/numa.c 	int t;
t                 640 tools/perf/bench/numa.c 	t = 0;
t                 698 tools/perf/bench/numa.c 				if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
t                 702 tools/perf/bench/numa.c 				td = g->threads + t;
t                 704 tools/perf/bench/numa.c 				if (!t)
t                 710 tools/perf/bench/numa.c 				t++;
t                 718 tools/perf/bench/numa.c 	if (t < g->p.nr_tasks)
t                 719 tools/perf/bench/numa.c 		printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
t                 879 tools/perf/bench/numa.c 	int n, t;
t                 881 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_threads; t++) {
t                 886 tools/perf/bench/numa.c 		task_nr = process_nr*g->p.nr_threads + t;
t                 914 tools/perf/bench/numa.c 	int t, p;
t                 917 tools/perf/bench/numa.c 		for (t = 0; t < g->p.nr_threads; t++) {
t                 922 tools/perf/bench/numa.c 			task_nr = p*g->p.nr_threads + t;
t                 978 tools/perf/bench/numa.c 	int t;
t                 989 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_tasks; t++) {
t                 990 tools/perf/bench/numa.c 		struct thread_data *td = g->threads + t;
t                1291 tools/perf/bench/numa.c 	int t;
t                1314 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_threads; t++) {
t                1315 tools/perf/bench/numa.c 		task_nr = process_nr*g->p.nr_threads + t;
t                1320 tools/perf/bench/numa.c 		td->thread_nr    = t;
t                1326 tools/perf/bench/numa.c 		ret = pthread_create(pthreads + t, NULL, worker_thread, td);
t                1330 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_threads; t++) {
t                1331 tools/perf/bench/numa.c                 ret = pthread_join(pthreads[t], NULL);
t                1362 tools/perf/bench/numa.c 	int t;
t                1366 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_tasks; t++) {
t                1367 tools/perf/bench/numa.c 		struct thread_data *td = g->threads + t;
t                1496 tools/perf/bench/numa.c 	int i, t, p;
t                1575 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_tasks; t++) {
t                1576 tools/perf/bench/numa.c 		u64 thread_runtime_ns = g->threads[t].runtime_ns;
t                1636 tools/perf/bench/numa.c 			for (t = 0; t < g->p.nr_threads; t++) {
t                1638 tools/perf/bench/numa.c 				td = g->threads + p*g->p.nr_threads + t;
t                1639 tools/perf/bench/numa.c 				snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
t                  84 tools/perf/bench/sched-pipe.c 	int t;
t                 101 tools/perf/bench/sched-pipe.c 	for (t = 0; t < nr_threads; t++) {
t                 102 tools/perf/bench/sched-pipe.c 		td = threads + t;
t                 104 tools/perf/bench/sched-pipe.c 		td->nr = t;
t                 106 tools/perf/bench/sched-pipe.c 		if (t == 0) {
t                 118 tools/perf/bench/sched-pipe.c 		for (t = 0; t < nr_threads; t++) {
t                 119 tools/perf/bench/sched-pipe.c 			td = threads + t;
t                 125 tools/perf/bench/sched-pipe.c 		for (t = 0; t < nr_threads; t++) {
t                 126 tools/perf/bench/sched-pipe.c 			td = threads + t;
t                 767 tools/perf/builtin-lock.c 	struct thread *t;
t                 774 tools/perf/builtin-lock.c 		t = perf_session__findnew(session, st->tid);
t                 775 tools/perf/builtin-lock.c 		pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
t                 777 tools/perf/builtin-lock.c 		thread__put(t);
t                2001 tools/perf/builtin-sched.c 				  u64 t, int state)
t                2011 tools/perf/builtin-sched.c 	timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
t                2089 tools/perf/builtin-sched.c 					 u64 t, u64 tprev)
t                2098 tools/perf/builtin-sched.c 		r->dt_run = t - tprev;
t                2519 tools/perf/builtin-sched.c 	u64 tprev, t = sample->time;
t                2553 tools/perf/builtin-sched.c 	if (ptime->start && ptime->start > t)
t                2569 tools/perf/builtin-sched.c 		if (t > ptime->end)
t                2570 tools/perf/builtin-sched.c 			t = ptime->end;
t                2574 tools/perf/builtin-sched.c 		timehist_update_runtime_stats(tr, t, tprev);
t                2590 tools/perf/builtin-sched.c 			timehist_update_runtime_stats(last_tr, t, tprev);
t                2603 tools/perf/builtin-sched.c 				callchain_append(&itr->callchain, &itr->cursor, t - tprev);
t                2610 tools/perf/builtin-sched.c 		timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
t                2613 tools/perf/builtin-sched.c 	if (sched->hist_time.start == 0 && t >= ptime->start)
t                2614 tools/perf/builtin-sched.c 		sched->hist_time.start = t;
t                2615 tools/perf/builtin-sched.c 	if (ptime->end == 0 || t <= ptime->end)
t                2616 tools/perf/builtin-sched.c 		sched->hist_time.end = t;
t                2658 tools/perf/builtin-sched.c static void print_thread_runtime(struct thread *t,
t                2665 tools/perf/builtin-sched.c 	       comm_width, timehist_get_commstr(t), t->ppid,
t                2681 tools/perf/builtin-sched.c static void print_thread_waittime(struct thread *t,
t                2685 tools/perf/builtin-sched.c 	       comm_width, timehist_get_commstr(t), t->ppid,
t                2706 tools/perf/builtin-sched.c static int __show_thread_runtime(struct thread *t, void *priv)
t                2711 tools/perf/builtin-sched.c 	if (thread__is_filtered(t))
t                2714 tools/perf/builtin-sched.c 	r = thread__priv(t);
t                2721 tools/perf/builtin-sched.c 			print_thread_waittime(t, r);
t                2723 tools/perf/builtin-sched.c 			print_thread_runtime(t, r);
t                2729 tools/perf/builtin-sched.c static int show_thread_runtime(struct thread *t, void *priv)
t                2731 tools/perf/builtin-sched.c 	if (t->dead)
t                2734 tools/perf/builtin-sched.c 	return __show_thread_runtime(t, priv);
t                2737 tools/perf/builtin-sched.c static int show_deadthread_runtime(struct thread *t, void *priv)
t                2739 tools/perf/builtin-sched.c 	if (!t->dead)
t                2742 tools/perf/builtin-sched.c 	return __show_thread_runtime(t, priv);
t                2805 tools/perf/builtin-sched.c 	struct thread *t;
t                2847 tools/perf/builtin-sched.c 		t = idle_threads[i];
t                2848 tools/perf/builtin-sched.c 		if (!t)
t                2851 tools/perf/builtin-sched.c 		r = thread__priv(t);
t                2871 tools/perf/builtin-sched.c 			t = idle_threads[i];
t                2872 tools/perf/builtin-sched.c 			if (!t)
t                2875 tools/perf/builtin-sched.c 			itr = thread__priv(t);
t                 702 tools/perf/builtin-script.c 		u64 t = sample->time;
t                 706 tools/perf/builtin-script.c 			t = sample->time - initial_time;
t                 708 tools/perf/builtin-script.c 		nsecs = t;
t                 716 tools/perf/builtin-script.c 			timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
t                 273 tools/perf/builtin-top.c static void perf_top__resort_hists(struct perf_top *t)
t                 275 tools/perf/builtin-top.c 	struct evlist *evlist = t->evlist;
t                 288 tools/perf/builtin-top.c 			if (t->zero) {
t                 291 tools/perf/builtin-top.c 				hists__decay_entries(hists, t->hide_user_symbols,
t                 292 tools/perf/builtin-top.c 						     t->hide_kernel_symbols);
t                 591 tools/perf/builtin-top.c 	struct perf_top *t = arg;
t                 593 tools/perf/builtin-top.c 	if (t->evlist->selected != NULL)
t                 594 tools/perf/builtin-top.c 		t->sym_evsel = t->evlist->selected;
t                 596 tools/perf/builtin-top.c 	perf_top__resort_hists(t);
t                 598 tools/perf/builtin-top.c 	if (t->lost || t->drop)
t                1025 tools/perf/builtin-trace.c static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
t                1027 tools/perf/builtin-trace.c 	double duration = (double)t / NSEC_PER_MSEC;
t                1297 tools/perf/builtin-trace.c static bool trace__filter_duration(struct trace *trace, double t)
t                1299 tools/perf/builtin-trace.c 	return t < (trace->duration_filter * NSEC_PER_MSEC);
t                 135 tools/perf/jvmti/jvmti_agent.c 	time_t t;
t                 138 tools/perf/jvmti/jvmti_agent.c 	time(&t);
t                 139 tools/perf/jvmti/jvmti_agent.c 	localtime_r(&t, &tm);
t                 257 tools/perf/pmu-events/jevents.c #define EXPECT(e, t, m) do { if (!(e)) {			\
t                 258 tools/perf/pmu-events/jevents.c 	jsmntok_t *loc = (t);					\
t                 259 tools/perf/pmu-events/jevents.c 	if (!(t)->start && (t) > tokens)			\
t                 260 tools/perf/pmu-events/jevents.c 		loc = (t) - 1;					\
t                 263 tools/perf/pmu-events/jevents.c 	       json_name(t));					\
t                 132 tools/perf/pmu-events/json.c int json_line(char *map, jsmntok_t *t)
t                 134 tools/perf/pmu-events/json.c 	return countchar(map, '\n', t->start) + 1;
t                 147 tools/perf/pmu-events/json.c const char *json_name(jsmntok_t *t)
t                 149 tools/perf/pmu-events/json.c 	return LOOKUP(jsmn_types, t->type);
t                 152 tools/perf/pmu-events/json.c int json_len(jsmntok_t *t)
t                 154 tools/perf/pmu-events/json.c 	return t->end - t->start;
t                 158 tools/perf/pmu-events/json.c int json_streq(char *map, jsmntok_t *t, const char *s)
t                 160 tools/perf/pmu-events/json.c 	unsigned len = json_len(t);
t                 161 tools/perf/pmu-events/json.c 	return len == strlen(s) && !strncasecmp(map + t->start, s, len);
t                   9 tools/perf/pmu-events/json.h int json_line(char *map, jsmntok_t *t);
t                  10 tools/perf/pmu-events/json.h const char *json_name(jsmntok_t *t);
t                  11 tools/perf/pmu-events/json.h int json_streq(char *map, jsmntok_t *t, const char *s);
t                  12 tools/perf/pmu-events/json.h int json_len(jsmntok_t *t);
t                 389 tools/perf/tests/builtin-test.c #define for_each_test(j, t)	 				\
t                 391 tools/perf/tests/builtin-test.c 		for (t = &tests[j][0]; t->func; t++)
t                 393 tools/perf/tests/builtin-test.c static int test_and_print(struct test *t, bool force_skip, int subtest)
t                 399 tools/perf/tests/builtin-test.c 		err = run_test(t, subtest);
t                 406 tools/perf/tests/builtin-test.c 	if (!t->subtest.get_nr)
t                 407 tools/perf/tests/builtin-test.c 		pr_debug("%s:", t->desc);
t                 409 tools/perf/tests/builtin-test.c 		pr_debug("%s subtest %d:", t->desc, subtest + 1);
t                 563 tools/perf/tests/builtin-test.c 	struct test *t;
t                 568 tools/perf/tests/builtin-test.c 	for_each_test(j, t) {
t                 569 tools/perf/tests/builtin-test.c 		int len = strlen(t->desc);
t                 575 tools/perf/tests/builtin-test.c 	for_each_test(j, t) {
t                 578 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(t, curr, argc, argv))
t                 581 tools/perf/tests/builtin-test.c 		if (t->is_supported && !t->is_supported()) {
t                 582 tools/perf/tests/builtin-test.c 			pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
t                 586 tools/perf/tests/builtin-test.c 		pr_info("%2d: %-*s:", i, width, t->desc);
t                 593 tools/perf/tests/builtin-test.c 		if (!t->subtest.get_nr) {
t                 594 tools/perf/tests/builtin-test.c 			test_and_print(t, false, -1);
t                 596 tools/perf/tests/builtin-test.c 			int subn = t->subtest.get_nr();
t                 617 tools/perf/tests/builtin-test.c 				int len = strlen(t->subtest.get_desc(subi));
t                 625 tools/perf/tests/builtin-test.c 					t->subtest.get_desc(subi));
t                 626 tools/perf/tests/builtin-test.c 				err = test_and_print(t, skip, subi);
t                 627 tools/perf/tests/builtin-test.c 				if (err != TEST_OK && t->subtest.skip_if_fail)
t                 653 tools/perf/tests/builtin-test.c 		struct test t = {
t                 657 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(&t, curr, argc, argv))
t                 660 tools/perf/tests/builtin-test.c 		pr_info("%2d: %s\n", i, t.desc);
t                 670 tools/perf/tests/builtin-test.c 	struct test *t;
t                 673 tools/perf/tests/builtin-test.c 	for_each_test(j, t) {
t                 676 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(t, curr, argc, argv) ||
t                 677 tools/perf/tests/builtin-test.c 		    (t->is_supported && !t->is_supported()))
t                 680 tools/perf/tests/builtin-test.c 		pr_info("%2d: %s\n", i, t->desc);
t                 682 tools/perf/tests/builtin-test.c 		if (t->subtest.get_nr) {
t                 683 tools/perf/tests/builtin-test.c 			int subn = t->subtest.get_nr();
t                 688 tools/perf/tests/builtin-test.c 					t->subtest.get_desc(subi));
t                  19 tools/perf/tests/expr.c int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
t                 130 tools/perf/tests/hists_link.c 		       struct thread *t, struct map *m, struct symbol *s)
t                 133 tools/perf/tests/hists_link.c 		if (samples->thread == t && samples->map == m &&
t                  16 tools/perf/tests/is_printable_array.c 	} t[] = {
t                  27 tools/perf/tests/is_printable_array.c 	for (i = 0; i < ARRAY_SIZE(t); i++) {
t                  30 tools/perf/tests/is_printable_array.c 		ret = is_printable_array((char *) t[i].buf, t[i].len);
t                  31 tools/perf/tests/is_printable_array.c 		if (ret != t[i].ret) {
t                  50 tools/perf/tests/kmod-path.c int test__kmod_path__parse(struct test *t __maybe_unused, int subtest __maybe_unused)
t                  37 tools/perf/tests/map_groups.c int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __maybe_unused)
t                  46 tools/perf/tests/mem2node.c int test__mem2node(struct test *t __maybe_unused, int subtest __maybe_unused)
t                1816 tools/perf/tests/parse-events.c static int test_term(struct terms_test *t)
t                1823 tools/perf/tests/parse-events.c 	ret = parse_events_terms(&terms, t->str);
t                1826 tools/perf/tests/parse-events.c 			 t->str , ret);
t                1830 tools/perf/tests/parse-events.c 	ret = t->check(&terms);
t                1842 tools/perf/tests/parse-events.c 		struct terms_test *t = &terms[i];
t                1844 tools/perf/tests/parse-events.c 		pr_debug("running test %d '%s'\n", i, t->str);
t                1845 tools/perf/tests/parse-events.c 		ret = test_term(t);
t                 109 tools/perf/tests/tests.h int test__mem2node(struct test *t, int subtest);
t                 110 tools/perf/tests/tests.h int test__map_groups__merge_in(struct test *t, int subtest);
t                 111 tools/perf/tests/tests.h int test__time_utils(struct test *t, int subtest);
t                 134 tools/perf/tests/time-utils-test.c int test__time_utils(struct test *t __maybe_unused, int subtest __maybe_unused)
t                  10 tools/perf/tests/unit_number__scnprintf.c int test__unit_number__scnprint(struct test *t __maybe_unused, int subtest __maybe_unused)
t                  77 tools/perf/ui/tui/util.c 	const char *t;
t                  79 tools/perf/ui/tui/util.c 	t = text;
t                  81 tools/perf/ui/tui/util.c 		const char *sep = strchr(t, '\n');
t                  84 tools/perf/ui/tui/util.c 			sep = strchr(t, '\0');
t                  85 tools/perf/ui/tui/util.c 		len = sep - t;
t                  91 tools/perf/ui/tui/util.c 		t = sep + 1;
t                 167 tools/perf/ui/tui/util.c 	const char *t;
t                 169 tools/perf/ui/tui/util.c 	t = text;
t                 171 tools/perf/ui/tui/util.c 		const char *sep = strchr(t, '\n');
t                 175 tools/perf/ui/tui/util.c 			sep = strchr(t, '\0');
t                 176 tools/perf/ui/tui/util.c 		len = sep - t;
t                 182 tools/perf/ui/tui/util.c 		t = sep + 1;
t                 452 tools/perf/util/annotate.c 	char *endptr, *name, *t;
t                 466 tools/perf/util/annotate.c 	t = strchr(name, '>');
t                 467 tools/perf/util/annotate.c 	if (t == NULL)
t                 470 tools/perf/util/annotate.c 	*t = '\0';
t                 472 tools/perf/util/annotate.c 	*t = '>';
t                 125 tools/perf/util/bpf-event.c 	const struct btf_type *t;
t                 133 tools/perf/util/bpf-event.c 		t = btf__type_by_id(btf, finfo->type_id);
t                 134 tools/perf/util/bpf-event.c 		short_name = btf__name_by_offset(btf, t->name_off);
t                  41 tools/perf/util/data-convert-bt.c #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
t                 975 tools/perf/util/data-convert-bt.c 	struct bt_ctf_field_type *t = NULL;
t                 995 tools/perf/util/data-convert-bt.c 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
t                 996 tools/perf/util/data-convert-bt.c 		bt_ctf_field_type_put(t);
t                1106 tools/perf/util/data-convert-bt.c #define ADD_FIELD(cl, t, n)						\
t                1109 tools/perf/util/data-convert-bt.c 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
t                1217 tools/perf/util/data-convert-bt.c #define __NON_SAMPLE_ADD_FIELD(t, n)						\
t                1220 tools/perf/util/data-convert-bt.c 		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
t                  58 tools/perf/util/debug.c static int veprintf_time(u64 t, const char *fmt, va_list args)
t                  61 tools/perf/util/debug.c 	u64 secs, usecs, nsecs = t;
t                  73 tools/perf/util/debug.c int eprintf_time(int level, int var, u64 t, const char *fmt, ...)
t                  80 tools/perf/util/debug.c 		ret = veprintf_time(t, fmt, args);
t                  33 tools/perf/util/debug.h #define pr_time_N(n, var, t, fmt, ...) \
t                  34 tools/perf/util/debug.h 	eprintf_time(n, var, t, fmt, ##__VA_ARGS__)
t                  36 tools/perf/util/debug.h #define pr_oe_time(t, fmt, ...)  pr_time_N(1, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__)
t                  37 tools/perf/util/debug.h #define pr_oe_time2(t, fmt, ...) pr_time_N(2, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__)
t                  52 tools/perf/util/debug.h int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __printf(4, 5);
t                 252 tools/perf/util/evsel.h #define perf_evsel__match(evsel, t, c)		\
t                 253 tools/perf/util/evsel.h 	(evsel->core.attr.type == PERF_TYPE_##t &&	\
t                 213 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d)
t                 217 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	return (t / d) * n + ((t % d) * n) / d;
t                  71 tools/perf/util/jitdump.c #define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
t                 198 tools/perf/util/machine.c 			struct thread *t = rb_entry(nd, struct thread, rb_node);
t                 201 tools/perf/util/machine.c 			__machine__remove_thread(machine, t, false);
t                  18 tools/perf/util/mem-events.c #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
t                 972 tools/perf/util/pmu.c 	struct parse_events_term *t;
t                 974 tools/perf/util/pmu.c 	list_for_each_entry(t, head_terms, list) {
t                 975 tools/perf/util/pmu.c 		if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
t                 976 tools/perf/util/pmu.c 			if (!strcmp(t->config, term->config)) {
t                 977 tools/perf/util/pmu.c 				t->used = true;
t                 978 tools/perf/util/pmu.c 				*value = t->val.num;
t                  50 tools/perf/util/s390-cpumsf-kernel.h 			unsigned int t:1;	/* 2 - Timestamp format	      */
t                 332 tools/perf/util/s390-cpumsf.c 	local.t = flags >> 61 & 0x1;
t                 351 tools/perf/util/s390-cpumsf.c 		      te->t ? 'T' : ' ',
t                 207 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                 214 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(4);
t                 215 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                 220 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
t                 221 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
t                 222 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyLong_FromLong(value));
t                 223 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_str));
t                 225 tools/perf/util/scripting-engines/trace-event-python.c 	try_call_object(handler_name, t);
t                 227 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                 248 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                 255 tools/perf/util/scripting-engines/trace-event-python.c 		t = PyTuple_New(3);
t                 257 tools/perf/util/scripting-engines/trace-event-python.c 		t = PyTuple_New(2);
t                 258 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                 261 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
t                 262 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
t                 264 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyUnicode_FromString(delim));
t                 266 tools/perf/util/scripting-engines/trace-event-python.c 	try_call_object(handler_name, t);
t                 268 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                 625 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                 627 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(2);
t                 628 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                 630 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, 0, PyLong_FromUnsignedLongLong(value->id));
t                 631 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, 1, PyLong_FromUnsignedLongLong(value->value));
t                 632 tools/perf/util/scripting-engines/trace-event-python.c 	return t;
t                 663 tools/perf/util/scripting-engines/trace-event-python.c 			PyObject *t = get_sample_value_as_tuple(&sample->read.group.values[i]);
t                 664 tools/perf/util/scripting-engines/trace-event-python.c 			PyList_SET_ITEM(values, i, t);
t                 667 tools/perf/util/scripting-engines/trace-event-python.c 		PyObject *t = get_sample_value_as_tuple(&sample->read.one);
t                 668 tools/perf/util/scripting-engines/trace-event-python.c 		PyList_SET_ITEM(values, 0, t);
t                 797 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *handler, *context, *t, *obj = NULL, *callchain;
t                 833 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(MAX_FIELDS);
t                 834 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                 846 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyUnicode_FromString(handler_name));
t                 847 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, context);
t                 855 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
t                 856 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyLong_FromLong(s));
t                 857 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyLong_FromLong(ns));
t                 858 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyLong_FromLong(pid));
t                 859 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyUnicode_FromString(comm));
t                 860 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, callchain);
t                 894 tools/perf/util/scripting-engines/trace-event-python.c 			PyTuple_SetItem(t, n++, obj);
t                 901 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, dict);
t                 906 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++,	all_entries_dict);
t                 911 tools/perf/util/scripting-engines/trace-event-python.c 	if (_PyTuple_Resize(&t, n) == -1)
t                 915 tools/perf/util/scripting-engines/trace-event-python.c 		call_object(handler, t, handler_name);
t                 917 tools/perf/util/scripting-engines/trace-event-python.c 		call_object(handler, t, default_handler_name);
t                 919 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                 924 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                 926 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(sz);
t                 927 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                 929 tools/perf/util/scripting-engines/trace-event-python.c 	return t;
t                 932 tools/perf/util/scripting-engines/trace-event-python.c static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
t                 935 tools/perf/util/scripting-engines/trace-event-python.c 	return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
t                 938 tools/perf/util/scripting-engines/trace-event-python.c 	return PyTuple_SetItem(t, pos, PyLong_FromLongLong(val));
t                 942 tools/perf/util/scripting-engines/trace-event-python.c static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
t                 944 tools/perf/util/scripting-engines/trace-event-python.c 	return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
t                 947 tools/perf/util/scripting-engines/trace-event-python.c static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
t                 949 tools/perf/util/scripting-engines/trace-event-python.c 	return PyTuple_SetItem(t, pos, _PyUnicode_FromString(s));
t                 952 tools/perf/util/scripting-engines/trace-event-python.c static int tuple_set_bytes(PyObject *t, unsigned int pos, void *bytes,
t                 955 tools/perf/util/scripting-engines/trace-event-python.c 	return PyTuple_SetItem(t, pos, _PyBytes_FromStringAndSize(bytes, sz));
t                 961 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                 963 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(2);
t                 965 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, evsel->db_id);
t                 966 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 1, perf_evsel__name(evsel));
t                 968 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->evsel_handler, t, "evsel_table");
t                 970 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                 979 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                 981 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(3);
t                 983 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, machine->db_id);
t                 984 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 1, machine->pid);
t                 985 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
t                 987 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->machine_handler, t, "machine_table");
t                 989 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                 998 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1000 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(5);
t                1002 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, thread->db_id);
t                1003 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, machine->db_id);
t                1004 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, main_thread_db_id);
t                1005 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 3, thread->pid_);
t                1006 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 4, thread->tid);
t                1008 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->thread_handler, t, "thread_table");
t                1010 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1019 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1021 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(5);
t                1023 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, comm->db_id);
t                1024 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 1, comm__str(comm));
t                1025 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, thread->db_id);
t                1026 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 3, comm->start);
t                1027 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 4, comm->exec);
t                1029 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->comm_handler, t, "comm_table");
t                1031 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1040 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1042 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(3);
t                1044 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, db_id);
t                1045 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, comm->db_id);
t                1046 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, thread->db_id);
t                1048 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->comm_thread_handler, t, "comm_thread_table");
t                1050 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1060 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1064 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(5);
t                1066 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, dso->db_id);
t                1067 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, machine->db_id);
t                1068 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 2, dso->short_name);
t                1069 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 3, dso->long_name);
t                1070 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 4, sbuild_id);
t                1072 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->dso_handler, t, "dso_table");
t                1074 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1084 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1086 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(6);
t                1088 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, *sym_db_id);
t                1089 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, dso->db_id);
t                1090 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, sym->start);
t                1091 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 3, sym->end);
t                1092 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 4, sym->binding);
t                1093 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 5, sym->name);
t                1095 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->symbol_handler, t, "symbol_table");
t                1097 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1106 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1108 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(2);
t                1110 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 0, branch_type);
t                1111 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_string(t, 1, name);
t                1113 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->branch_type_handler, t, "branch_type_table");
t                1115 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1124 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1126 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(24);
t                1128 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, es->db_id);
t                1129 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, es->evsel->db_id);
t                1130 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, es->al->machine->db_id);
t                1131 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 3, es->al->thread->db_id);
t                1132 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 4, es->comm_db_id);
t                1133 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 5, es->dso_db_id);
t                1134 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 6, es->sym_db_id);
t                1135 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 7, es->offset);
t                1136 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 8, es->sample->ip);
t                1137 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 9, es->sample->time);
t                1138 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 10, es->sample->cpu);
t                1139 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 11, es->addr_dso_db_id);
t                1140 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 12, es->addr_sym_db_id);
t                1141 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 13, es->addr_offset);
t                1142 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 14, es->sample->addr);
t                1143 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 15, es->sample->period);
t                1144 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 16, es->sample->weight);
t                1145 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 17, es->sample->transaction);
t                1146 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 18, es->sample->data_src);
t                1147 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
t                1148 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
t                1149 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 21, es->call_path_id);
t                1150 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 22, es->sample->insn_cnt);
t                1151 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 23, es->sample->cyc_cnt);
t                1153 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->sample_handler, t, "sample_table");
t                1155 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1161 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1163 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(3);
t                1165 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, es->db_id);
t                1166 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, es->evsel->core.attr.config);
t                1167 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
t                1169 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->synth_handler, t, "synth_data");
t                1171 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1190 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1196 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(4);
t                1198 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, cp->db_id);
t                1199 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, parent_db_id);
t                1200 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, sym_db_id);
t                1201 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 3, cp->ip);
t                1203 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->call_path_handler, t, "call_path_table");
t                1205 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1215 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1217 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(14);
t                1219 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, cr->db_id);
t                1220 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, cr->thread->db_id);
t                1221 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, comm_db_id);
t                1222 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 3, cr->cp->db_id);
t                1223 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 4, cr->call_time);
t                1224 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 5, cr->return_time);
t                1225 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 6, cr->branch_count);
t                1226 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 7, cr->call_ref);
t                1227 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 8, cr->return_ref);
t                1228 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 9, cr->cp->parent->db_id);
t                1229 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 10, cr->flags);
t                1230 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 11, cr->parent_db_id);
t                1231 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 12, cr->insn_count);
t                1232 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 13, cr->cyc_count);
t                1234 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->call_return_handler, t, "call_return_table");
t                1236 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1248 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *t;
t                1250 tools/perf/util/scripting-engines/trace-event-python.c 	t = tuple_new(9);
t                1252 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 0, db_id);
t                1253 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 1, machine->db_id);
t                1254 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 2, sample->time);
t                1255 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 3, sample->cpu);
t                1256 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 4, th_out_id);
t                1257 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 5, comm_out_id);
t                1258 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 6, th_in_id);
t                1259 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, 7, comm_in_id);
t                1260 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 8, flags);
t                1262 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(tables->context_switch_handler, t, "context_switch");
t                1264 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1281 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *handler, *t, *dict, *callchain;
t                1295 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(MAX_FIELDS);
t                1296 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                1303 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, dict);
t                1304 tools/perf/util/scripting-engines/trace-event-python.c 	if (_PyTuple_Resize(&t, n) == -1)
t                1307 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(handler, t, handler_name);
t                1309 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1359 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *handler, *t;
t                1363 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(MAX_FIELDS);
t                1364 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                1376 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
t                1377 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyLong_FromLong(thread));
t                1379 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, n++, tstamp);
t                1380 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, n++, count->val);
t                1381 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, n++, count->ena);
t                1382 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, n++, count->run);
t                1384 tools/perf/util/scripting-engines/trace-event-python.c 	if (_PyTuple_Resize(&t, n) == -1)
t                1387 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(handler, t, handler_name);
t                1389 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1416 tools/perf/util/scripting-engines/trace-event-python.c 	PyObject *handler, *t;
t                1420 tools/perf/util/scripting-engines/trace-event-python.c 	t = PyTuple_New(MAX_FIELDS);
t                1421 tools/perf/util/scripting-engines/trace-event-python.c 	if (!t)
t                1430 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_u64(t, n++, tstamp);
t                1432 tools/perf/util/scripting-engines/trace-event-python.c 	if (_PyTuple_Resize(&t, n) == -1)
t                1435 tools/perf/util/scripting-engines/trace-event-python.c 	call_object(handler, t, handler_name);
t                1437 tools/perf/util/scripting-engines/trace-event-python.c 	Py_DECREF(t);
t                1545 tools/perf/util/sort.c 	u64 t = he->transaction;
t                1552 tools/perf/util/sort.c 		if (txbits[i].flag & t)
t                1554 tools/perf/util/sort.c 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
t                1556 tools/perf/util/sort.c 	if (t & PERF_TXN_ABORT_MASK) {
t                1558 tools/perf/util/sort.c 			(t & PERF_TXN_ABORT_MASK) >>
t                1077 tools/perf/util/stat-display.c static double timeval2double(struct timeval *t)
t                1079 tools/perf/util/stat-display.c 	return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
t                  89 tools/perf/util/stat.c 	ID(CYCLES_IN_TX,	cpu/cycles-t/),
t                 699 tools/perf/util/svghelper.c static void scan_thread_topology(int *map, struct topology *t, int cpu,
t                 705 tools/perf/util/svghelper.c 	for (i = 0; i < t->sib_thr_nr; i++) {
t                 706 tools/perf/util/svghelper.c 		if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i])))
t                 709 tools/perf/util/svghelper.c 		for_each_set_bit(thr, cpumask_bits(&t->sib_thr[i]), nr_cpus)
t                 715 tools/perf/util/svghelper.c static void scan_core_topology(int *map, struct topology *t, int nr_cpus)
t                 721 tools/perf/util/svghelper.c 	for (i = 0; i < t->sib_core_nr; i++)
t                 722 tools/perf/util/svghelper.c 		for_each_set_bit(cpu, cpumask_bits(&t->sib_core[i]), nr_cpus)
t                 723 tools/perf/util/svghelper.c 			scan_thread_topology(map, t, cpu, &pos, nr_cpus);
t                 755 tools/perf/util/svghelper.c 	struct topology t;
t                 760 tools/perf/util/svghelper.c 	t.sib_core_nr = env->nr_sibling_cores;
t                 761 tools/perf/util/svghelper.c 	t.sib_thr_nr = env->nr_sibling_threads;
t                 762 tools/perf/util/svghelper.c 	t.sib_core = calloc(env->nr_sibling_cores, sizeof(cpumask_t));
t                 763 tools/perf/util/svghelper.c 	t.sib_thr = calloc(env->nr_sibling_threads, sizeof(cpumask_t));
t                 768 tools/perf/util/svghelper.c 	if (!t.sib_core || !t.sib_thr) {
t                 774 tools/perf/util/svghelper.c 		if (str_to_bitmap(sib_core, &t.sib_core[i], nr_cpus)) {
t                 783 tools/perf/util/svghelper.c 		if (str_to_bitmap(sib_thr, &t.sib_thr[i], nr_cpus)) {
t                 800 tools/perf/util/svghelper.c 	scan_core_topology(topology_map, &t, nr_cpus);
t                 805 tools/perf/util/svghelper.c 	zfree(&t.sib_core);
t                 806 tools/perf/util/svghelper.c 	zfree(&t.sib_thr);
t                 285 tools/perf/util/synthetic-events.c 	unsigned long long t;
t                 308 tools/perf/util/synthetic-events.c 	t = rdclock();
t                 322 tools/perf/util/synthetic-events.c 		if ((rdclock() - t) > timeout) {
t                 394 tools/perf/util/trace-event-info.c 		struct tracepoint_path *t = tps;
t                 397 tools/perf/util/trace-event-info.c 		zfree(&t->name);
t                 398 tools/perf/util/trace-event-info.c 		zfree(&t->system);
t                 399 tools/perf/util/trace-event-info.c 		free(t);
t                  28 tools/perf/util/trace-event.c int trace_event__init(struct trace_event *t)
t                  33 tools/perf/util/trace-event.c 		t->plugin_list = tep_load_plugins(pevent);
t                  34 tools/perf/util/trace-event.c 		t->pevent  = pevent;
t                  65 tools/perf/util/trace-event.c void trace_event__cleanup(struct trace_event *t)
t                  67 tools/perf/util/trace-event.c 	tep_unload_plugins(t->plugin_list, t->pevent);
t                  68 tools/perf/util/trace-event.c 	tep_free(t->pevent);
t                  20 tools/perf/util/trace-event.h int trace_event__init(struct trace_event *t);
t                  21 tools/perf/util/trace-event.h void trace_event__cleanup(struct trace_event *t);
t                  36 tools/perf/util/trigger.h #define TRIGGER_WARN_ONCE(t, exp) \
t                  37 tools/perf/util/trigger.h 	WARN_ONCE(t->state != exp, "trigger '%s' state transist error: %d in %s()\n", \
t                  38 tools/perf/util/trigger.h 		  t->name, t->state, __func__)
t                  40 tools/perf/util/trigger.h static inline bool trigger_is_available(struct trigger *t)
t                  42 tools/perf/util/trigger.h 	return t->state >= 0;
t                  45 tools/perf/util/trigger.h static inline bool trigger_is_error(struct trigger *t)
t                  47 tools/perf/util/trigger.h 	return t->state <= TRIGGER_ERROR;
t                  50 tools/perf/util/trigger.h static inline void trigger_on(struct trigger *t)
t                  52 tools/perf/util/trigger.h 	TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
t                  53 tools/perf/util/trigger.h 	t->state = TRIGGER_ON;
t                  56 tools/perf/util/trigger.h static inline void trigger_ready(struct trigger *t)
t                  58 tools/perf/util/trigger.h 	if (!trigger_is_available(t))
t                  60 tools/perf/util/trigger.h 	t->state = TRIGGER_READY;
t                  63 tools/perf/util/trigger.h static inline void trigger_hit(struct trigger *t)
t                  65 tools/perf/util/trigger.h 	if (!trigger_is_available(t))
t                  67 tools/perf/util/trigger.h 	TRIGGER_WARN_ONCE(t, TRIGGER_READY);
t                  68 tools/perf/util/trigger.h 	t->state = TRIGGER_HIT;
t                  71 tools/perf/util/trigger.h static inline void trigger_off(struct trigger *t)
t                  73 tools/perf/util/trigger.h 	if (!trigger_is_available(t))
t                  75 tools/perf/util/trigger.h 	t->state = TRIGGER_OFF;
t                  78 tools/perf/util/trigger.h static inline void trigger_error(struct trigger *t)
t                  80 tools/perf/util/trigger.h 	t->state = TRIGGER_ERROR;
t                  83 tools/perf/util/trigger.h static inline bool trigger_is_ready(struct trigger *t)
t                  85 tools/perf/util/trigger.h 	return t->state == TRIGGER_READY;
t                  88 tools/perf/util/trigger.h static inline bool trigger_is_hit(struct trigger *t)
t                  90 tools/perf/util/trigger.h 	return t->state == TRIGGER_HIT;
t                   9 tools/perf/util/tsc.c 	u64 t, quot, rem;
t                  11 tools/perf/util/tsc.c 	t = ns - tc->time_zero;
t                  12 tools/perf/util/tsc.c 	quot = t / tc->time_mult;
t                  13 tools/perf/util/tsc.c 	rem  = t % tc->time_mult;
t                  44 tools/power/cpupower/utils/helpers/amd.c 	int t;
t                  47 tools/power/cpupower/utils/helpers/amd.c 		t = pstate.val & 0xf;
t                  49 tools/power/cpupower/utils/helpers/amd.c 		t = pstate.fam17h_bits.did;
t                  51 tools/power/cpupower/utils/helpers/amd.c 		t = pstate.bits.did;
t                  53 tools/power/cpupower/utils/helpers/amd.c 	return t;
t                  58 tools/power/cpupower/utils/helpers/amd.c 	int t;
t                  66 tools/power/cpupower/utils/helpers/amd.c 		t = 0x10;
t                  69 tools/power/cpupower/utils/helpers/amd.c 			t = 0x8;
t                  70 tools/power/cpupower/utils/helpers/amd.c 		cof = (100 * (fid + t)) >> did;
t                 330 tools/power/x86/turbostat/turbostat.c 					struct thread_data *t;
t                 334 tools/power/x86/turbostat/turbostat.c 					t = GET_THREAD(thread_base, thread_no,
t                 338 tools/power/x86/turbostat/turbostat.c 					if (cpu_is_not_present(t->cpu_id))
t                 345 tools/power/x86/turbostat/turbostat.c 					retval = func(t, c, p);
t                 799 tools/power/x86/turbostat/turbostat.c int dump_counters(struct thread_data *t, struct core_data *c,
t                 805 tools/power/x86/turbostat/turbostat.c 	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
t                 807 tools/power/x86/turbostat/turbostat.c 	if (t) {
t                 809 tools/power/x86/turbostat/turbostat.c 			t->cpu_id, t->flags);
t                 810 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
t                 811 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
t                 812 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
t                 813 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "c1: %016llX\n", t->c1);
t                 816 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "IRQ: %lld\n", t->irq_count);
t                 818 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "SMI: %d\n", t->smi_count);
t                 822 tools/power/x86/turbostat/turbostat.c 				i, mp->msr_num, t->counter[i]);
t                 885 tools/power/x86/turbostat/turbostat.c int format_counters(struct thread_data *t, struct core_data *c,
t                 896 tools/power/x86/turbostat/turbostat.c 	if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
t                 900 tools/power/x86/turbostat/turbostat.c 	if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                 904 tools/power/x86/turbostat/turbostat.c 	if ((t != &average.threads) &&
t                 905 tools/power/x86/turbostat/turbostat.c 		(cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset)))
t                 912 tools/power/x86/turbostat/turbostat.c 		timersub(&t->tv_end, &t->tv_begin, &tv);
t                 918 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
t                 920 tools/power/x86/turbostat/turbostat.c 	interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
t                 922 tools/power/x86/turbostat/turbostat.c 	tsc = t->tsc * tsc_tweak;
t                 925 tools/power/x86/turbostat/turbostat.c 	if (t == &average.threads) {
t                 949 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
t                 954 tools/power/x86/turbostat/turbostat.c 			if (t)
t                 957 tools/power/x86/turbostat/turbostat.c 					      cpus[t->cpu_id].physical_node_id);
t                 969 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
t                 971 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
t                 973 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
t                 978 tools/power/x86/turbostat/turbostat.c 			1.0 / units * t->aperf / interval_float);
t                 981 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc);
t                 985 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf);
t                 988 tools/power/x86/turbostat/turbostat.c 				tsc / units * t->aperf / t->mperf / interval_float);
t                 992 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float);
t                 997 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count);
t                 999 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count);
t                1004 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count);
t                1010 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]);
t                1012 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]);
t                1015 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]);
t                1017 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]);
t                1020 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000);
t                1022 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc);
t                1028 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc);
t                1032 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
t                1080 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                1195 tools/power/x86/turbostat/turbostat.c void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                1209 tools/power/x86/turbostat/turbostat.c 	for_all_cpus(format_counters, t, c, p);
t                1392 tools/power/x86/turbostat/turbostat.c int delta_cpu(struct thread_data *t, struct core_data *c,
t                1399 tools/power/x86/turbostat/turbostat.c 	if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
t                1403 tools/power/x86/turbostat/turbostat.c 	retval = delta_thread(t, t2, c2);	/* c2 is core delta */
t                1408 tools/power/x86/turbostat/turbostat.c 	if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
t                1414 tools/power/x86/turbostat/turbostat.c void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                1419 tools/power/x86/turbostat/turbostat.c 	t->tv_begin.tv_sec = 0;
t                1420 tools/power/x86/turbostat/turbostat.c 	t->tv_begin.tv_usec = 0;
t                1421 tools/power/x86/turbostat/turbostat.c 	t->tv_end.tv_sec = 0;
t                1422 tools/power/x86/turbostat/turbostat.c 	t->tv_end.tv_usec = 0;
t                1423 tools/power/x86/turbostat/turbostat.c 	t->tv_delta.tv_sec = 0;
t                1424 tools/power/x86/turbostat/turbostat.c 	t->tv_delta.tv_usec = 0;
t                1426 tools/power/x86/turbostat/turbostat.c 	t->tsc = 0;
t                1427 tools/power/x86/turbostat/turbostat.c 	t->aperf = 0;
t                1428 tools/power/x86/turbostat/turbostat.c 	t->mperf = 0;
t                1429 tools/power/x86/turbostat/turbostat.c 	t->c1 = 0;
t                1431 tools/power/x86/turbostat/turbostat.c 	t->irq_count = 0;
t                1432 tools/power/x86/turbostat/turbostat.c 	t->smi_count = 0;
t                1435 tools/power/x86/turbostat/turbostat.c 	t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
t                1473 tools/power/x86/turbostat/turbostat.c 		t->counter[i] = 0;
t                1481 tools/power/x86/turbostat/turbostat.c int sum_counters(struct thread_data *t, struct core_data *c,
t                1489 tools/power/x86/turbostat/turbostat.c 		average.threads.apic_id = t->apic_id;
t                1491 tools/power/x86/turbostat/turbostat.c 		average.threads.x2apic_id = t->x2apic_id;
t                1495 tools/power/x86/turbostat/turbostat.c 		average.threads.tv_begin = t->tv_begin;
t                1498 tools/power/x86/turbostat/turbostat.c 	average.threads.tv_end = t->tv_end;
t                1500 tools/power/x86/turbostat/turbostat.c 	average.threads.tsc += t->tsc;
t                1501 tools/power/x86/turbostat/turbostat.c 	average.threads.aperf += t->aperf;
t                1502 tools/power/x86/turbostat/turbostat.c 	average.threads.mperf += t->mperf;
t                1503 tools/power/x86/turbostat/turbostat.c 	average.threads.c1 += t->c1;
t                1505 tools/power/x86/turbostat/turbostat.c 	average.threads.irq_count += t->irq_count;
t                1506 tools/power/x86/turbostat/turbostat.c 	average.threads.smi_count += t->smi_count;
t                1511 tools/power/x86/turbostat/turbostat.c 		average.threads.counter[i] += t->counter[i];
t                1515 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
t                1534 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                1584 tools/power/x86/turbostat/turbostat.c void compute_average(struct thread_data *t, struct core_data *c,
t                1592 tools/power/x86/turbostat/turbostat.c 	for_all_cpus(sum_counters, t, c, p);
t                1724 tools/power/x86/turbostat/turbostat.c void get_apic_id(struct thread_data *t)
t                1732 tools/power/x86/turbostat/turbostat.c 		t->apic_id = (ebx >> 24) & 0xff;
t                1754 tools/power/x86/turbostat/turbostat.c 		t->x2apic_id = eax;
t                1766 tools/power/x86/turbostat/turbostat.c 	t->x2apic_id = edx;
t                1768 tools/power/x86/turbostat/turbostat.c 	if (debug && (t->apic_id != (t->x2apic_id & 0xff)))
t                1770 tools/power/x86/turbostat/turbostat.c 				t->cpu_id, t->apic_id, t->x2apic_id);
t                1778 tools/power/x86/turbostat/turbostat.c int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                1780 tools/power/x86/turbostat/turbostat.c 	int cpu = t->cpu_id;
t                1791 tools/power/x86/turbostat/turbostat.c 	gettimeofday(&t->tv_begin, (struct timezone *)NULL);
t                1794 tools/power/x86/turbostat/turbostat.c 		get_apic_id(t);
t                1796 tools/power/x86/turbostat/turbostat.c 	t->tsc = rdtsc();	/* we are running on local CPU of interest */
t                1817 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
t                1820 tools/power/x86/turbostat/turbostat.c 		t->tsc = rdtsc();	/* re-read close to APERF */
t                1822 tools/power/x86/turbostat/turbostat.c 		tsc_before = t->tsc;
t                1824 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
t                1829 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
t                1851 tools/power/x86/turbostat/turbostat.c 		t->aperf = t->aperf * aperf_mperf_multiplier;
t                1852 tools/power/x86/turbostat/turbostat.c 		t->mperf = t->mperf * aperf_mperf_multiplier;
t                1856 tools/power/x86/turbostat/turbostat.c 		t->irq_count = irqs_per_cpu[cpu];
t                1860 tools/power/x86/turbostat/turbostat.c 		t->smi_count = msr & 0xFFFFFFFF;
t                1863 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
t                1868 tools/power/x86/turbostat/turbostat.c 		if (get_mp(cpu, mp, &t->counter[i]))
t                1873 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
t                1915 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                2020 tools/power/x86/turbostat/turbostat.c 	gettimeofday(&t->tv_end, (struct timezone *)NULL);
t                2697 tools/power/x86/turbostat/turbostat.c 					struct thread_data *t, *t2;
t                2701 tools/power/x86/turbostat/turbostat.c 					t = GET_THREAD(thread_base, thread_no,
t                2705 tools/power/x86/turbostat/turbostat.c 					if (cpu_is_not_present(t->cpu_id))
t                2721 tools/power/x86/turbostat/turbostat.c 					retval = func(t, c, p, t2, c2, p2);
t                3594 tools/power/x86/turbostat/turbostat.c int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                3603 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
t                3606 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                3639 tools/power/x86/turbostat/turbostat.c int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                3647 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
t                3650 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                3730 tools/power/x86/turbostat/turbostat.c int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                3735 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
t                3738 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                4097 tools/power/x86/turbostat/turbostat.c int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                4106 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
t                4109 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
t                4117 tools/power/x86/turbostat/turbostat.c 	if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
t                4170 tools/power/x86/turbostat/turbostat.c int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                4180 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                4183 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
t                4456 tools/power/x86/turbostat/turbostat.c int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t                4467 tools/power/x86/turbostat/turbostat.c 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
t                4470 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
t                5101 tools/power/x86/turbostat/turbostat.c allocate_counters(struct thread_data **t, struct core_data **c,
t                5109 tools/power/x86/turbostat/turbostat.c 	*t = calloc(num_threads, sizeof(struct thread_data));
t                5110 tools/power/x86/turbostat/turbostat.c 	if (*t == NULL)
t                5114 tools/power/x86/turbostat/turbostat.c 		(*t)[i].cpu_id = -1;
t                5146 tools/power/x86/turbostat/turbostat.c 	struct thread_data *t;
t                5157 tools/power/x86/turbostat/turbostat.c 	t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
t                5161 tools/power/x86/turbostat/turbostat.c 	t->cpu_id = cpu_id;
t                5163 tools/power/x86/turbostat/turbostat.c 		t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
t                5165 tools/power/x86/turbostat/turbostat.c 			t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
t                 199 tools/testing/nvdimm/test/nfit.c 	int (*alloc)(struct nfit_test *t);
t                 200 tools/testing/nvdimm/test/nfit.c 	void (*setup)(struct nfit_test *t);
t                 230 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_get_fw_info(struct nfit_test *t,
t                 234 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 235 tools/testing/nvdimm/test/nfit.c 	struct nfit_test_fw *fw = &t->fw[idx];
t                 238 tools/testing/nvdimm/test/nfit.c 			__func__, t, nd_cmd, buf_len, idx);
t                 256 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_start_update(struct nfit_test *t,
t                 260 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 261 tools/testing/nvdimm/test/nfit.c 	struct nfit_test_fw *fw = &t->fw[idx];
t                 264 tools/testing/nvdimm/test/nfit.c 			__func__, t, nd_cmd, buf_len, idx);
t                 286 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_send_data(struct nfit_test *t,
t                 290 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 291 tools/testing/nvdimm/test/nfit.c 	struct nfit_test_fw *fw = &t->fw[idx];
t                 295 tools/testing/nvdimm/test/nfit.c 			__func__, t, nd_cmd, buf_len, idx);
t                 337 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_finish_fw(struct nfit_test *t,
t                 341 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 342 tools/testing/nvdimm/test/nfit.c 	struct nfit_test_fw *fw = &t->fw[idx];
t                 345 tools/testing/nvdimm/test/nfit.c 			__func__, t, nd_cmd, buf_len, idx);
t                 388 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_finish_query(struct nfit_test *t,
t                 392 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 393 tools/testing/nvdimm/test/nfit.c 	struct nfit_test_fw *fw = &t->fw[idx];
t                 396 tools/testing/nvdimm/test/nfit.c 			__func__, t, nd_cmd, buf_len, idx);
t                 566 tools/testing/nvdimm/test/nfit.c static int nfit_test_cmd_ars_start(struct nfit_test *t,
t                 581 tools/testing/nvdimm/test/nfit.c 		post_ars_status(ars_state, &t->badrange, ars_start->address,
t                 612 tools/testing/nvdimm/test/nfit.c static int nfit_test_cmd_clear_error(struct nfit_test *t,
t                 623 tools/testing/nvdimm/test/nfit.c 	badrange_forget(&t->badrange, clear_err->address, clear_err->length);
t                 816 tools/testing/nvdimm/test/nfit.c 	struct nfit_test *t = container_of(work, typeof(*t), work);
t                 818 tools/testing/nvdimm/test/nfit.c 	__acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
t                 821 tools/testing/nvdimm/test/nfit.c static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
t                 836 tools/testing/nvdimm/test/nfit.c 	rc =  badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
t                 842 tools/testing/nvdimm/test/nfit.c 		queue_work(nfit_wq, &t->work);
t                 852 tools/testing/nvdimm/test/nfit.c static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
t                 867 tools/testing/nvdimm/test/nfit.c 	badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
t                 878 tools/testing/nvdimm/test/nfit.c static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
t                 887 tools/testing/nvdimm/test/nfit.c 	spin_lock(&t->badrange.lock);
t                 888 tools/testing/nvdimm/test/nfit.c 	list_for_each_entry(be, &t->badrange.list, list) {
t                 895 tools/testing/nvdimm/test/nfit.c 	spin_unlock(&t->badrange.lock);
t                 901 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
t                 904 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 940 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_security_status(struct nfit_test *t,
t                 944 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 955 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_unlock_unit(struct nfit_test *t,
t                 959 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                 981 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_set_pass(struct nfit_test *t,
t                 985 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1006 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_freeze_lock(struct nfit_test *t,
t                1010 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1025 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_disable_pass(struct nfit_test *t,
t                1029 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1049 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
t                1053 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1080 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
t                1084 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1103 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
t                1107 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1126 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_master_set_pass(struct nfit_test *t,
t                1130 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1153 tools/testing/nvdimm/test/nfit.c static int nd_intel_test_cmd_master_secure_erase(struct nfit_test *t,
t                1157 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1200 tools/testing/nvdimm/test/nfit.c 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
t                1230 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_security_status(t,
t                1234 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_unlock_unit(t,
t                1238 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_set_pass(t,
t                1242 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_disable_pass(t,
t                1246 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_freeze_lock(t,
t                1250 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_secure_erase(t,
t                1254 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_overwrite(t,
t                1255 tools/testing/nvdimm/test/nfit.c 						buf, buf_len, i - t->dcr_idx);
t                1258 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_query_overwrite(t,
t                1259 tools/testing/nvdimm/test/nfit.c 						buf, buf_len, i - t->dcr_idx);
t                1262 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_master_set_pass(t,
t                1266 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_master_secure_erase(t,
t                1270 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_cmd_set_lss_status(t,
t                1274 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_get_fw_info(t, buf,
t                1275 tools/testing/nvdimm/test/nfit.c 						buf_len, i - t->dcr_idx);
t                1278 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_start_update(t, buf,
t                1279 tools/testing/nvdimm/test/nfit.c 						buf_len, i - t->dcr_idx);
t                1282 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_send_data(t, buf,
t                1283 tools/testing/nvdimm/test/nfit.c 						buf_len, i - t->dcr_idx);
t                1286 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_finish_fw(t, buf,
t                1287 tools/testing/nvdimm/test/nfit.c 						buf_len, i - t->dcr_idx);
t                1290 tools/testing/nvdimm/test/nfit.c 				rc = nd_intel_test_finish_query(t, buf,
t                1291 tools/testing/nvdimm/test/nfit.c 						buf_len, i - t->dcr_idx);
t                1295 tools/testing/nvdimm/test/nfit.c 						&t->smart[i - t->dcr_idx]);
t                1300 tools/testing/nvdimm/test/nfit.c 						&t->smart_threshold[i -
t                1301 tools/testing/nvdimm/test/nfit.c 							t->dcr_idx]);
t                1306 tools/testing/nvdimm/test/nfit.c 						&t->smart_threshold[i -
t                1307 tools/testing/nvdimm/test/nfit.c 							t->dcr_idx],
t                1308 tools/testing/nvdimm/test/nfit.c 						&t->smart[i - t->dcr_idx],
t                1309 tools/testing/nvdimm/test/nfit.c 						&t->pdev.dev, t->dimm_dev[i]);
t                1314 tools/testing/nvdimm/test/nfit.c 						&t->smart_threshold[i -
t                1315 tools/testing/nvdimm/test/nfit.c 							t->dcr_idx],
t                1316 tools/testing/nvdimm/test/nfit.c 						&t->smart[i - t->dcr_idx],
t                1317 tools/testing/nvdimm/test/nfit.c 						&t->pdev.dev, t->dimm_dev[i]);
t                1339 tools/testing/nvdimm/test/nfit.c 				t->label[i - t->dcr_idx]);
t                1343 tools/testing/nvdimm/test/nfit.c 				t->label[i - t->dcr_idx]);
t                1350 tools/testing/nvdimm/test/nfit.c 		struct ars_state *ars_state = &t->ars_state;
t                1368 tools/testing/nvdimm/test/nfit.c 				rc = nfit_test_cmd_ars_error_inject(t, buf,
t                1372 tools/testing/nvdimm/test/nfit.c 				rc = nfit_test_cmd_ars_inject_clear(t, buf,
t                1376 tools/testing/nvdimm/test/nfit.c 				rc = nfit_test_cmd_ars_inject_status(t, buf,
t                1392 tools/testing/nvdimm/test/nfit.c 			rc = nfit_test_cmd_ars_start(t, ars_state, buf,
t                1400 tools/testing/nvdimm/test/nfit.c 			rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
t                1428 tools/testing/nvdimm/test/nfit.c static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
t                1431 tools/testing/nvdimm/test/nfit.c 	struct device *dev = &t->pdev.dev;
t                1451 tools/testing/nvdimm/test/nfit.c 	list_add(&nfit_res->list, &t->resources);
t                1464 tools/testing/nvdimm/test/nfit.c static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
t                1476 tools/testing/nvdimm/test/nfit.c 	return __test_alloc(t, size, dma, buf);
t                1485 tools/testing/nvdimm/test/nfit.c 		struct nfit_test *t = instances[i];
t                1487 tools/testing/nvdimm/test/nfit.c 		if (!t)
t                1490 tools/testing/nvdimm/test/nfit.c 		list_for_each_entry(n, &t->resources, list) {
t                1523 tools/testing/nvdimm/test/nfit.c 	struct nfit_test *t = data;
t                1526 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++)
t                1527 tools/testing/nvdimm/test/nfit.c 		if (t->dimm_dev[i])
t                1528 tools/testing/nvdimm/test/nfit.c 			device_unregister(t->dimm_dev[i]);
t                1642 tools/testing/nvdimm/test/nfit.c static int nfit_test_dimm_init(struct nfit_test *t)
t                1646 tools/testing/nvdimm/test/nfit.c 	if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t))
t                1648 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++) {
t                1649 tools/testing/nvdimm/test/nfit.c 		t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
t                1650 tools/testing/nvdimm/test/nfit.c 				&t->pdev.dev, 0, NULL,
t                1652 tools/testing/nvdimm/test/nfit.c 				"test_dimm%d", i + t->dcr_idx);
t                1653 tools/testing/nvdimm/test/nfit.c 		if (!t->dimm_dev[i])
t                1659 tools/testing/nvdimm/test/nfit.c static void security_init(struct nfit_test *t)
t                1663 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++) {
t                1670 tools/testing/nvdimm/test/nfit.c static void smart_init(struct nfit_test *t)
t                1681 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++) {
t                1682 tools/testing/nvdimm/test/nfit.c 		memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
t                1683 tools/testing/nvdimm/test/nfit.c 		memcpy(&t->smart_threshold[i], &smart_t_data,
t                1688 tools/testing/nvdimm/test/nfit.c static int nfit_test0_alloc(struct nfit_test *t)
t                1701 tools/testing/nvdimm/test/nfit.c 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
t                1702 tools/testing/nvdimm/test/nfit.c 	if (!t->nfit_buf)
t                1704 tools/testing/nvdimm/test/nfit.c 	t->nfit_size = nfit_size;
t                1706 tools/testing/nvdimm/test/nfit.c 	t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
t                1707 tools/testing/nvdimm/test/nfit.c 	if (!t->spa_set[0])
t                1710 tools/testing/nvdimm/test/nfit.c 	t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
t                1711 tools/testing/nvdimm/test/nfit.c 	if (!t->spa_set[1])
t                1714 tools/testing/nvdimm/test/nfit.c 	t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
t                1715 tools/testing/nvdimm/test/nfit.c 	if (!t->spa_set[2])
t                1718 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++) {
t                1719 tools/testing/nvdimm/test/nfit.c 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
t                1720 tools/testing/nvdimm/test/nfit.c 		if (!t->dimm[i])
t                1723 tools/testing/nvdimm/test/nfit.c 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
t                1724 tools/testing/nvdimm/test/nfit.c 		if (!t->label[i])
t                1726 tools/testing/nvdimm/test/nfit.c 		sprintf(t->label[i], "label%d", i);
t                1728 tools/testing/nvdimm/test/nfit.c 		t->flush[i] = test_alloc(t, max(PAGE_SIZE,
t                1730 tools/testing/nvdimm/test/nfit.c 				&t->flush_dma[i]);
t                1731 tools/testing/nvdimm/test/nfit.c 		if (!t->flush[i])
t                1735 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++) {
t                1736 tools/testing/nvdimm/test/nfit.c 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
t                1737 tools/testing/nvdimm/test/nfit.c 		if (!t->dcr[i])
t                1741 tools/testing/nvdimm/test/nfit.c 	t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
t                1742 tools/testing/nvdimm/test/nfit.c 	if (!t->_fit)
t                1745 tools/testing/nvdimm/test/nfit.c 	if (nfit_test_dimm_init(t))
t                1747 tools/testing/nvdimm/test/nfit.c 	smart_init(t);
t                1748 tools/testing/nvdimm/test/nfit.c 	security_init(t);
t                1749 tools/testing/nvdimm/test/nfit.c 	return ars_state_init(&t->pdev.dev, &t->ars_state);
t                1752 tools/testing/nvdimm/test/nfit.c static int nfit_test1_alloc(struct nfit_test *t)
t                1759 tools/testing/nvdimm/test/nfit.c 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
t                1760 tools/testing/nvdimm/test/nfit.c 	if (!t->nfit_buf)
t                1762 tools/testing/nvdimm/test/nfit.c 	t->nfit_size = nfit_size;
t                1764 tools/testing/nvdimm/test/nfit.c 	t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
t                1765 tools/testing/nvdimm/test/nfit.c 	if (!t->spa_set[0])
t                1768 tools/testing/nvdimm/test/nfit.c 	for (i = 0; i < t->num_dcr; i++) {
t                1769 tools/testing/nvdimm/test/nfit.c 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
t                1770 tools/testing/nvdimm/test/nfit.c 		if (!t->label[i])
t                1772 tools/testing/nvdimm/test/nfit.c 		sprintf(t->label[i], "label%d", i);
t                1775 tools/testing/nvdimm/test/nfit.c 	t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
t                1776 tools/testing/nvdimm/test/nfit.c 	if (!t->spa_set[1])
t                1779 tools/testing/nvdimm/test/nfit.c 	if (nfit_test_dimm_init(t))
t                1781 tools/testing/nvdimm/test/nfit.c 	smart_init(t);
t                1782 tools/testing/nvdimm/test/nfit.c 	return ars_state_init(&t->pdev.dev, &t->ars_state);
t                1795 tools/testing/nvdimm/test/nfit.c static void nfit_test0_setup(struct nfit_test *t)
t                1801 tools/testing/nvdimm/test/nfit.c 	void *nfit_buf = t->nfit_buf;
t                1819 tools/testing/nvdimm/test/nfit.c 	spa->address = t->spa_set_dma[0];
t                1833 tools/testing/nvdimm/test/nfit.c 	spa->address = t->spa_set_dma[1];
t                1843 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dcr_dma[0];
t                1853 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dcr_dma[1];
t                1863 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dcr_dma[2];
t                1873 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dcr_dma[3];
t                1883 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dimm_dma[0];
t                1893 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dimm_dma[1];
t                1903 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dimm_dma[2];
t                1913 tools/testing/nvdimm/test/nfit.c 	spa->address = t->dimm_dma[3];
t                2312 tools/testing/nvdimm/test/nfit.c 		flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
t                2322 tools/testing/nvdimm/test/nfit.c 		flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
t                2332 tools/testing/nvdimm/test/nfit.c 		flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
t                2342 tools/testing/nvdimm/test/nfit.c 		flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
t                2353 tools/testing/nvdimm/test/nfit.c 	if (t->setup_hotplug) {
t                2400 tools/testing/nvdimm/test/nfit.c 		spa->address = t->dcr_dma[4];
t                2414 tools/testing/nvdimm/test/nfit.c 		spa->address = t->spa_set_dma[2];
t                2424 tools/testing/nvdimm/test/nfit.c 		spa->address = t->dimm_dma[4];
t                2484 tools/testing/nvdimm/test/nfit.c 			flush->hint_address[i] = t->flush_dma[4]
t                2489 tools/testing/nvdimm/test/nfit.c 		WARN_ON(offset != t->nfit_size);
t                2492 tools/testing/nvdimm/test/nfit.c 	t->nfit_filled = offset;
t                2494 tools/testing/nvdimm/test/nfit.c 	post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
t                2497 tools/testing/nvdimm/test/nfit.c 	acpi_desc = &t->acpi_desc;
t                2536 tools/testing/nvdimm/test/nfit.c static void nfit_test1_setup(struct nfit_test *t)
t                2539 tools/testing/nvdimm/test/nfit.c 	void *nfit_buf = t->nfit_buf;
t                2552 tools/testing/nvdimm/test/nfit.c 	spa->address = t->spa_set_dma[0];
t                2562 tools/testing/nvdimm/test/nfit.c 	spa->address = t->spa_set_dma[1];
t                2626 tools/testing/nvdimm/test/nfit.c 	WARN_ON(offset != t->nfit_size);
t                2628 tools/testing/nvdimm/test/nfit.c 	t->nfit_filled = offset;
t                2630 tools/testing/nvdimm/test/nfit.c 	post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
t                2633 tools/testing/nvdimm/test/nfit.c 	acpi_desc = &t->acpi_desc;
t                 205 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 	int t[11];
t                 711 tools/testing/selftests/bpf/test_align.c 		unsigned int t = atoi(argv[argc - 1]);
t                 713 tools/testing/selftests/bpf/test_align.c 		if (t < to) {
t                 714 tools/testing/selftests/bpf/test_align.c 			from = t;
t                 715 tools/testing/selftests/bpf/test_align.c 			to   = t + 1;
t                4244 tools/testing/selftests/bpf/test_btf.c 		const struct btf_type *t;
t                4247 tools/testing/selftests/bpf/test_btf.c 		t = btf__type_by_id(btf, finfo->type_id);
t                4248 tools/testing/selftests/bpf/test_btf.c 		if (CHECK(!t, "btf__type_by_id failure: id %u",
t                4254 tools/testing/selftests/bpf/test_btf.c 		fname = btf__name_by_offset(btf, t->name_off);
t                6671 tools/testing/selftests/bpf/test_btf.c static int btf_type_size(const struct btf_type *t)
t                6674 tools/testing/selftests/bpf/test_btf.c 	__u16 vlen = BTF_INFO_VLEN(t->info);
t                6675 tools/testing/selftests/bpf/test_btf.c 	__u16 kind = BTF_INFO_KIND(t->info);
t                 212 tools/testing/selftests/bpf/test_lpm_map.c 	struct tlpm_node *t, *list = NULL;
t                 260 tools/testing/selftests/bpf/test_lpm_map.c 		t = tlpm_match(list, data, 8 * keysize);
t                 266 tools/testing/selftests/bpf/test_lpm_map.c 		assert(!t == !!r);
t                 268 tools/testing/selftests/bpf/test_lpm_map.c 		if (t) {
t                 270 tools/testing/selftests/bpf/test_lpm_map.c 			assert(t->n_bits == value[keysize]);
t                 271 tools/testing/selftests/bpf/test_lpm_map.c 			for (j = 0; j < t->n_bits; ++j)
t                 272 tools/testing/selftests/bpf/test_lpm_map.c 				assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
t                 283 tools/testing/selftests/bpf/test_lpm_map.c 	for (i = 0, t = list; t; i++, t = t->next)
t                 297 tools/testing/selftests/bpf/test_lpm_map.c 		t = tlpm_match(list, data, 8 * keysize);
t                 303 tools/testing/selftests/bpf/test_lpm_map.c 		assert(!t == !!r);
t                 305 tools/testing/selftests/bpf/test_lpm_map.c 		if (t) {
t                 307 tools/testing/selftests/bpf/test_lpm_map.c 			assert(t->n_bits == value[keysize]);
t                 308 tools/testing/selftests/bpf/test_lpm_map.c 			for (j = 0; j < t->n_bits; ++j)
t                 309 tools/testing/selftests/bpf/test_lpm_map.c 				assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
t                 875 tools/testing/selftests/bpf/test_lru_map.c 	int t, f;
t                 887 tools/testing/selftests/bpf/test_lru_map.c 		for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) {
t                 888 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity0(map_types[t], map_flags[f]);
t                 889 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
t                 890 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
t                 891 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
t                 892 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
t                 893 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity5(map_types[t], map_flags[f]);
t                 894 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity6(map_types[t], map_flags[f], tgt_free);
t                 895 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity7(map_types[t], map_flags[f]);
t                 896 tools/testing/selftests/bpf/test_lru_map.c 			test_lru_sanity8(map_types[t], map_flags[f]);
t                1530 tools/testing/selftests/bpf/test_maps.c 	int map_fd, err, t, f;
t                1564 tools/testing/selftests/bpf/test_maps.c 	for (t = 0; t < ARRAY_SIZE(types); t++) {
t                1565 tools/testing/selftests/bpf/test_maps.c 		type = types[t];
t                 703 tools/testing/selftests/bpf/test_select_reuseport.c 	int t, f, err;
t                 708 tools/testing/selftests/bpf/test_select_reuseport.c 		for (t = 0; t < ARRAY_SIZE(types); t++) {
t                 709 tools/testing/selftests/bpf/test_select_reuseport.c 			bool inany = bind_inany[t];
t                 710 tools/testing/selftests/bpf/test_select_reuseport.c 			int type = types[t];
t                 713 tools/testing/selftests/bpf/test_select_reuseport.c 			       family_strings[f], type_strings[t],
t                  36 tools/testing/selftests/bpf/test_tcpnotify_user.c 	struct tcp_notifier *t = data;
t                  38 tools/testing/selftests/bpf/test_tcpnotify_user.c 	if (t->type != 0xde || t->subtype != 0xad ||
t                  39 tools/testing/selftests/bpf/test_tcpnotify_user.c 	    t->source != 0xbe || t->hash != 0xef)
t                1156 tools/testing/selftests/bpf/test_verifier.c 		unsigned int t = atoi(argv[arg]);
t                1158 tools/testing/selftests/bpf/test_verifier.c 		if (t < to) {
t                1159 tools/testing/selftests/bpf/test_verifier.c 			from = t;
t                1160 tools/testing/selftests/bpf/test_verifier.c 			to   = t + 1;
t                 665 tools/testing/selftests/kselftest_harness.h static inline void __register_test(struct __test_metadata *t)
t                 670 tools/testing/selftests/kselftest_harness.h 		__test_list = t;
t                 671 tools/testing/selftests/kselftest_harness.h 		t->next = NULL;
t                 672 tools/testing/selftests/kselftest_harness.h 		t->prev = t;
t                 676 tools/testing/selftests/kselftest_harness.h 		t->next = NULL;
t                 677 tools/testing/selftests/kselftest_harness.h 		t->prev = __test_list->prev;
t                 678 tools/testing/selftests/kselftest_harness.h 		t->prev->next = t;
t                 679 tools/testing/selftests/kselftest_harness.h 		__test_list->prev = t;
t                 681 tools/testing/selftests/kselftest_harness.h 		t->next = __test_list;
t                 682 tools/testing/selftests/kselftest_harness.h 		t->next->prev = t;
t                 683 tools/testing/selftests/kselftest_harness.h 		t->prev = t;
t                 684 tools/testing/selftests/kselftest_harness.h 		__test_list = t;
t                 698 tools/testing/selftests/kselftest_harness.h void __run_test(struct __test_metadata *t)
t                 703 tools/testing/selftests/kselftest_harness.h 	t->passed = 1;
t                 704 tools/testing/selftests/kselftest_harness.h 	t->trigger = 0;
t                 705 tools/testing/selftests/kselftest_harness.h 	printf("[ RUN      ] %s\n", t->name);
t                 706 tools/testing/selftests/kselftest_harness.h 	alarm(t->timeout);
t                 710 tools/testing/selftests/kselftest_harness.h 		t->passed = 0;
t                 712 tools/testing/selftests/kselftest_harness.h 		t->fn(t);
t                 714 tools/testing/selftests/kselftest_harness.h 		_exit(t->passed ? 0 : t->step);
t                 719 tools/testing/selftests/kselftest_harness.h 			t->passed = t->termsig == -1 ? !WEXITSTATUS(status) : 0;
t                 720 tools/testing/selftests/kselftest_harness.h 			if (t->termsig != -1) {
t                 724 tools/testing/selftests/kselftest_harness.h 					t->name,
t                 726 tools/testing/selftests/kselftest_harness.h 			} else if (!t->passed) {
t                 729 tools/testing/selftests/kselftest_harness.h 					t->name,
t                 733 tools/testing/selftests/kselftest_harness.h 			t->passed = 0;
t                 737 tools/testing/selftests/kselftest_harness.h 					t->name);
t                 738 tools/testing/selftests/kselftest_harness.h 			} else if (WTERMSIG(status) == t->termsig) {
t                 739 tools/testing/selftests/kselftest_harness.h 				t->passed = 1;
t                 744 tools/testing/selftests/kselftest_harness.h 					t->name,
t                 750 tools/testing/selftests/kselftest_harness.h 				t->name,
t                 754 tools/testing/selftests/kselftest_harness.h 	printf("[     %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
t                 761 tools/testing/selftests/kselftest_harness.h 	struct __test_metadata *t;
t                 769 tools/testing/selftests/kselftest_harness.h 	for (t = __test_list; t; t = t->next) {
t                 771 tools/testing/selftests/kselftest_harness.h 		__run_test(t);
t                 772 tools/testing/selftests/kselftest_harness.h 		if (t->passed)
t                 428 tools/testing/selftests/mqueue/mq_perf_tests.c 	pthread_t *t;
t                 433 tools/testing/selftests/mqueue/mq_perf_tests.c 	t = &cpu_threads[0];
t                 124 tools/testing/selftests/networking/timestamping/rxtimestamp.c void print_test_case(struct test_case *t)
t                 129 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	if (t->sockopt.so_timestamp)
t                 131 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	if (t->sockopt.so_timestampns)
t                 133 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	if (t->sockopt.so_timestamping) {
t                 136 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			if (t->sockopt.so_timestamping & sof_flags[f].mask)
t                 141 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	if (t->expected.tstamp)
t                 143 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	if (t->expected.tstampns)
t                 145 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	if (t->expected.swtstamp || t->expected.hwtstamp) {
t                 147 tools/testing/selftests/networking/timestamping/rxtimestamp.c 		if (t->expected.swtstamp)
t                 149 tools/testing/selftests/networking/timestamping/rxtimestamp.c 		if (t->expected.swtstamp && t->expected.hwtstamp)
t                 151 tools/testing/selftests/networking/timestamping/rxtimestamp.c 		if (t->expected.hwtstamp)
t                 273 tools/testing/selftests/networking/timestamping/rxtimestamp.c bool run_test_case(struct socket_type s, struct test_case t)
t                 312 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	config_so_flags(rcv, t.sockopt);
t                 318 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	failed = do_recv(rcv, read_size, t.expected);
t                 332 tools/testing/selftests/networking/timestamping/rxtimestamp.c 	int s, t;
t                 339 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			for (t = 0; t < ARRAY_SIZE(test_cases); t++) {
t                 340 tools/testing/selftests/networking/timestamping/rxtimestamp.c 				printf("%d\t", t);
t                 341 tools/testing/selftests/networking/timestamping/rxtimestamp.c 				print_test_case(&test_cases[t]);
t                 345 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			t = atoi(optarg);
t                 346 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			if (t >= ARRAY_SIZE(test_cases))
t                 347 tools/testing/selftests/networking/timestamping/rxtimestamp.c 				error(1, 0, "Invalid test case: %d", t);
t                 349 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			test_cases[t].enabled = true;
t                 376 tools/testing/selftests/networking/timestamping/rxtimestamp.c 		for (t = 0; t < ARRAY_SIZE(test_cases); t++) {
t                 377 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			if (!all_tests && !test_cases[t].enabled)
t                 380 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			printf("Starting testcase %d...\n", t);
t                 381 tools/testing/selftests/networking/timestamping/rxtimestamp.c 			if (run_test_case(socket_types[s], test_cases[t])) {
t                 384 tools/testing/selftests/networking/timestamping/rxtimestamp.c 				print_test_case(&test_cases[t]);
t                  20 tools/testing/selftests/powerpc/primitives/asm/asm-compat.h #define PPC_LLARX(t, a, b, eh)	PPC_LDARX(t, a, b, eh)
t                  53 tools/testing/selftests/powerpc/primitives/asm/asm-compat.h #define PPC_LLARX(t, a, b, eh)	PPC_LWARX(t, a, b, eh)
t                 203 tools/testing/selftests/powerpc/signal/sigfuz.c 	pid_t t;
t                 222 tools/testing/selftests/powerpc/signal/sigfuz.c 		t = fork();
t                 224 tools/testing/selftests/powerpc/signal/sigfuz.c 		if (t == 0) {
t                 234 tools/testing/selftests/powerpc/signal/sigfuz.c 			waitpid(t, &ret, 0);
t                 251 tools/testing/selftests/powerpc/signal/sigfuz.c 	int t, rc;
t                 256 tools/testing/selftests/powerpc/signal/sigfuz.c 	for (t = 0; t < nthread; t++) {
t                 257 tools/testing/selftests/powerpc/signal/sigfuz.c 		rc = pthread_create(&threads[t], NULL, sigfuz_test,
t                 258 tools/testing/selftests/powerpc/signal/sigfuz.c 				    (void *)&t);
t                 263 tools/testing/selftests/powerpc/signal/sigfuz.c 	for (t = 0; t < nthread; t++) {
t                 264 tools/testing/selftests/powerpc/signal/sigfuz.c 		rc = pthread_join(threads[t], NULL);
t                  25 tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h #define ___PPC_RT(t)	___PPC_RS(t)
t                 109 tools/testing/selftests/ptp/testptp.c static int64_t pctns(struct ptp_clock_time *t)
t                 111 tools/testing/selftests/ptp/testptp.c 	return t->sec * 1000000000LL + t->nsec;
t                 360 tools/testing/selftests/ptp/testptp.c 			       event.t.sec, event.t.nsec);
t                  99 tools/testing/selftests/timers/inconsistency-check.c 	time_t t;
t                 106 tools/testing/selftests/timers/inconsistency-check.c 	t = time(0);
t                 107 tools/testing/selftests/timers/inconsistency-check.c 	start_str = ctime(&t);
t                 141 tools/testing/selftests/timers/inconsistency-check.c 			t = time(0);
t                 142 tools/testing/selftests/timers/inconsistency-check.c 			printf("%s\n", ctime(&t));
t                 237 tools/testing/selftests/vm/va_128TBswitch.c 		struct testcase *t = test + i;
t                 239 tools/testing/selftests/vm/va_128TBswitch.c 		p = mmap(t->addr, t->size, PROT_READ | PROT_WRITE, t->flags, -1, 0);
t                 241 tools/testing/selftests/vm/va_128TBswitch.c 		printf("%s: %p - ", t->msg, p);
t                 249 tools/testing/selftests/vm/va_128TBswitch.c 		if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
t                 257 tools/testing/selftests/vm/va_128TBswitch.c 			memset(p, 0, t->size);
t                 260 tools/testing/selftests/vm/va_128TBswitch.c 		if (!t->keep_mapped)
t                 261 tools/testing/selftests/vm/va_128TBswitch.c 			munmap(p, t->size);
t                  62 tools/testing/selftests/x86/test_vsyscall.c typedef long (*time_func_t)(time_t *t);
t                 169 tools/testing/selftests/x86/test_vsyscall.c static inline long sys_time(time_t *t)
t                 171 tools/testing/selftests/x86/test_vsyscall.c 	return syscall(SYS_time, t);
t                 336 tools/usb/ffs-test.c static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes);
t                 337 tools/usb/ffs-test.c static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes);
t                 338 tools/usb/ffs-test.c static ssize_t ep0_consume(struct thread *t, const void *buf, size_t nbytes);
t                 339 tools/usb/ffs-test.c static ssize_t fill_in_buf(struct thread *t, void *buf, size_t nbytes);
t                 340 tools/usb/ffs-test.c static ssize_t empty_out_buf(struct thread *t, const void *buf, size_t nbytes);
t                 379 tools/usb/ffs-test.c static void init_thread(struct thread *t)
t                 381 tools/usb/ffs-test.c 	t->buf = malloc(t->buf_size);
t                 382 tools/usb/ffs-test.c 	die_on(!t->buf, "malloc");
t                 384 tools/usb/ffs-test.c 	t->fd = open(t->filename, O_RDWR);
t                 385 tools/usb/ffs-test.c 	die_on(t->fd < 0, "%s", t->filename);
t                 390 tools/usb/ffs-test.c 	struct thread *t = arg;
t                 393 tools/usb/ffs-test.c 	fd = t->fd;
t                 394 tools/usb/ffs-test.c 	if (t->fd < 0)
t                 396 tools/usb/ffs-test.c 	t->fd = -1;
t                 399 tools/usb/ffs-test.c 	if (t != threads) {
t                 404 tools/usb/ffs-test.c 				err("%s: get fifo status", t->filename);
t                 406 tools/usb/ffs-test.c 			warn("%s: unclaimed = %d\n", t->filename, ret);
t                 408 tools/usb/ffs-test.c 				err("%s: fifo flush", t->filename);
t                 413 tools/usb/ffs-test.c 		err("%s: close", t->filename);
t                 415 tools/usb/ffs-test.c 	free(t->buf);
t                 416 tools/usb/ffs-test.c 	t->buf = NULL;
t                 422 tools/usb/ffs-test.c 	struct thread *t = arg;
t                 425 tools/usb/ffs-test.c 	info("%s: starts\n", t->filename);
t                 426 tools/usb/ffs-test.c 	in_name = t->in_name ? t->in_name : t->filename;
t                 427 tools/usb/ffs-test.c 	out_name = t->out_name ? t->out_name : t->filename;
t                 434 tools/usb/ffs-test.c 		ret = t->in(t, t->buf, t->buf_size);
t                 436 tools/usb/ffs-test.c 			ret = t->out(t, t->buf, ret);
t                 459 tools/usb/ffs-test.c 	t->status = ret;
t                 460 tools/usb/ffs-test.c 	info("%s: ends\n", t->filename);
t                 464 tools/usb/ffs-test.c static void start_thread(struct thread *t)
t                 466 tools/usb/ffs-test.c 	debug("%s: starting\n", t->filename);
t                 468 tools/usb/ffs-test.c 	die_on(pthread_create(&t->id, NULL, start_thread_helper, t) < 0,
t                 469 tools/usb/ffs-test.c 	       "pthread_create(%s)", t->filename);
t                 472 tools/usb/ffs-test.c static void join_thread(struct thread *t)
t                 474 tools/usb/ffs-test.c 	int ret = pthread_join(t->id, NULL);
t                 477 tools/usb/ffs-test.c 		err("%s: joining thread", t->filename);
t                 479 tools/usb/ffs-test.c 		debug("%s: joined\n", t->filename);
t                 483 tools/usb/ffs-test.c static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes)
t                 485 tools/usb/ffs-test.c 	return read(t->fd, buf, nbytes);
t                 488 tools/usb/ffs-test.c static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes)
t                 490 tools/usb/ffs-test.c 	return write(t->fd, buf, nbytes);
t                 626 tools/usb/ffs-test.c static void ep0_init(struct thread *t, bool legacy_descriptors)
t                 633 tools/usb/ffs-test.c 		info("%s: writing descriptors\n", t->filename);
t                 637 tools/usb/ffs-test.c 	info("%s: writing descriptors (in v2 format)\n", t->filename);
t                 638 tools/usb/ffs-test.c 	ret = write(t->fd, &descriptors, sizeof descriptors);
t                 641 tools/usb/ffs-test.c 		warn("%s: new format rejected, trying legacy\n", t->filename);
t                 645 tools/usb/ffs-test.c 			ret = write(t->fd, legacy, len);
t                 649 tools/usb/ffs-test.c 	die_on(ret < 0, "%s: write: descriptors", t->filename);
t                 651 tools/usb/ffs-test.c 	info("%s: writing strings\n", t->filename);
t                 652 tools/usb/ffs-test.c 	ret = write(t->fd, &strings, sizeof strings);
t                 653 tools/usb/ffs-test.c 	die_on(ret < 0, "%s: write: strings", t->filename);
t                  22 tools/virtio/ringtest/main.h 	unsigned long long t;
t                  24 tools/virtio/ringtest/main.h 	t = __rdtsc();
t                  25 tools/virtio/ringtest/main.h 	while (__rdtsc() - t < cycles) {}
t                 274 tools/vm/slabinfo.c static void decode_numa_list(int *numa, char *t)
t                 281 tools/vm/slabinfo.c 	if (!t)
t                 284 tools/vm/slabinfo.c 	while (*t == 'N') {
t                 285 tools/vm/slabinfo.c 		t++;
t                 286 tools/vm/slabinfo.c 		node = strtoul(t, &t, 10);
t                 287 tools/vm/slabinfo.c 		if (*t == '=') {
t                 288 tools/vm/slabinfo.c 			t++;
t                 289 tools/vm/slabinfo.c 			nr = strtoul(t, &t, 10);
t                 294 tools/vm/slabinfo.c 		while (*t == ' ')
t                 295 tools/vm/slabinfo.c 			t++;
t                1063 tools/vm/slabinfo.c 				struct slabinfo t;
t                1065 tools/vm/slabinfo.c 				memcpy(&t, s1, sizeof(struct slabinfo));
t                1067 tools/vm/slabinfo.c 				memcpy(s2, &t, sizeof(struct slabinfo));
t                1088 tools/vm/slabinfo.c 				struct aliasinfo t;
t                1090 tools/vm/slabinfo.c 				memcpy(&t, a1, sizeof(struct aliasinfo));
t                1092 tools/vm/slabinfo.c 				memcpy(a2, &t, sizeof(struct aliasinfo));
t                1182 tools/vm/slabinfo.c 	char *t;
t                1227 tools/vm/slabinfo.c 			slab->partial = get_obj_and_str("partial", &t);
t                1228 tools/vm/slabinfo.c 			decode_numa_list(slab->numa_partial, t);
t                1229 tools/vm/slabinfo.c 			free(t);
t                1235 tools/vm/slabinfo.c 			slab->slabs = get_obj_and_str("slabs", &t);
t                1236 tools/vm/slabinfo.c 			decode_numa_list(slab->numa, t);
t                1237 tools/vm/slabinfo.c 			free(t);