curr              506 arch/ia64/kernel/mca.c         const struct mca_table_entry *curr;
curr              509 arch/ia64/kernel/mca.c         curr = first;
curr              510 arch/ia64/kernel/mca.c         while (curr <= last) {
curr              511 arch/ia64/kernel/mca.c                 curr_start = (u64) &curr->start_addr + curr->start_addr;
curr              512 arch/ia64/kernel/mca.c                 curr_end = (u64) &curr->end_addr + curr->end_addr;
curr              517 arch/ia64/kernel/mca.c                 curr++;
curr              625 arch/ia64/kernel/unwind.c 	memcpy(rs, &sr->curr, sizeof(*rs));
curr              626 arch/ia64/kernel/unwind.c 	sr->curr.next = rs;
curr              632 arch/ia64/kernel/unwind.c 	struct unw_reg_state *rs = sr->curr.next;
curr              638 arch/ia64/kernel/unwind.c 	memcpy(&sr->curr, rs, sizeof(*rs));
curr              757 arch/ia64/kernel/unwind.c 		reg = sr->curr.reg + unw.save_order[i];
curr              778 arch/ia64/kernel/unwind.c 		regs[0] = sr->curr.reg + UNW_REG_F2;
curr              779 arch/ia64/kernel/unwind.c 		regs[1] = sr->curr.reg + UNW_REG_R4;
curr              780 arch/ia64/kernel/unwind.c 		regs[2] = sr->curr.reg + UNW_REG_B1;
curr              787 arch/ia64/kernel/unwind.c 				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
curr              796 arch/ia64/kernel/unwind.c 		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
curr              797 arch/ia64/kernel/unwind.c 		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
curr              798 arch/ia64/kernel/unwind.c 		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
curr              838 arch/ia64/kernel/unwind.c 				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
curr              872 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
curr              885 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
curr              900 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
curr              909 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
curr              924 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
curr              939 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
curr              952 arch/ia64/kernel/unwind.c 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
curr              963 arch/ia64/kernel/unwind.c 	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
curr              970 arch/ia64/kernel/unwind.c 	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
curr              976 arch/ia64/kernel/unwind.c 	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
curr              982 arch/ia64/kernel/unwind.c 	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
curr              989 arch/ia64/kernel/unwind.c 	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
curr             1002 arch/ia64/kernel/unwind.c 	struct unw_reg_info *reg = sr->curr.reg + regnum;
curr             1039 arch/ia64/kernel/unwind.c 			free_state_stack(&sr->curr);
curr             1040 arch/ia64/kernel/unwind.c 			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
curr             1041 arch/ia64/kernel/unwind.c 			sr->curr.next = dup_state_stack(ls->saved_state.next);
curr             1059 arch/ia64/kernel/unwind.c 	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
curr             1060 arch/ia64/kernel/unwind.c 	ls->saved_state.next = dup_state_stack(sr->curr.next);
curr             1092 arch/ia64/kernel/unwind.c 	r = sr->curr.reg + decode_abreg(abreg, 0);
curr             1113 arch/ia64/kernel/unwind.c 	r = sr->curr.reg + decode_abreg(abreg, 0);
curr             1128 arch/ia64/kernel/unwind.c 	r = sr->curr.reg + decode_abreg(abreg, 1);
curr             1143 arch/ia64/kernel/unwind.c 	r = sr->curr.reg + decode_abreg(abreg, 1);
curr             1359 arch/ia64/kernel/unwind.c 	struct unw_reg_info *r = sr->curr.reg + i;
curr             1404 arch/ia64/kernel/unwind.c 	struct unw_reg_info *r = sr->curr.reg + i;
curr             1546 arch/ia64/kernel/unwind.c 	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
curr             1588 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
curr             1589 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_RP].when = -1;
curr             1590 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_RP].val = 0;
curr             1612 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_PSP].val = 0;
curr             1613 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
curr             1614 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
curr             1615 arch/ia64/kernel/unwind.c 		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
curr             1631 arch/ia64/kernel/unwind.c 	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
curr             1632 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
curr             1633 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_RP].when = -1;
curr             1634 arch/ia64/kernel/unwind.c 		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
curr             1636 arch/ia64/kernel/unwind.c 			   __func__, ip, sr.curr.reg[UNW_REG_RP].where,
curr             1637 arch/ia64/kernel/unwind.c 			   sr.curr.reg[UNW_REG_RP].val);
curr             1643 arch/ia64/kernel/unwind.c 	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
curr             1645 arch/ia64/kernel/unwind.c 			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
curr             1653 arch/ia64/kernel/unwind.c 				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
curr             1673 arch/ia64/kernel/unwind.c 	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
curr             1674 arch/ia64/kernel/unwind.c 	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
curr             1675 arch/ia64/kernel/unwind.c 	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
curr             1679 arch/ia64/kernel/unwind.c 		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
curr             1684 arch/ia64/kernel/unwind.c 	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
curr             1686 arch/ia64/kernel/unwind.c 	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
curr             1688 arch/ia64/kernel/unwind.c 	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
curr             1706 arch/ia64/kernel/unwind.c 	free_state_stack(&sr.curr);
curr              115 arch/ia64/kernel/unwind_i.h 	struct unw_reg_state curr;	/* current state */
curr               39 arch/mips/include/asm/sgialib.h extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
curr               47 arch/mips/kernel/csrc-r4k.c 	unsigned int prev, curr, i;
curr               56 arch/mips/kernel/csrc-r4k.c 		curr = rdhwr_count();
curr               58 arch/mips/kernel/csrc-r4k.c 		if (curr != prev)
curr               61 arch/mips/kernel/csrc-r4k.c 		prev = curr;
curr              688 arch/parisc/kernel/drivers.c 	struct parisc_device *curr = to_parisc_device(dev);
curr              691 arch/parisc/kernel/drivers.c 	return (curr->hw_path == id);
curr              347 arch/powerpc/lib/feature-fixups.c static void patch_btb_flush_section(long *curr)
curr              351 arch/powerpc/lib/feature-fixups.c 	start = (void *)curr + *curr;
curr              352 arch/powerpc/lib/feature-fixups.c 	end = (void *)curr + *(curr + 1);
curr               96 arch/powerpc/platforms/pseries/vio.c 	size_t curr;
curr              161 arch/powerpc/platforms/pseries/vio.c 		vio_cmo.curr += size;
curr              162 arch/powerpc/platforms/pseries/vio.c 		if (vio_cmo.curr > vio_cmo.high)
curr              163 arch/powerpc/platforms/pseries/vio.c 			vio_cmo.high = vio_cmo.curr;
curr              196 arch/powerpc/platforms/pseries/vio.c 	vio_cmo.curr -= size;
curr             1022 arch/powerpc/platforms/pseries/vio.c viobus_cmo_rd_attr(curr);
curr             1038 arch/powerpc/platforms/pseries/vio.c 	vio_cmo.high = vio_cmo.curr;
curr              126 arch/s390/kernel/perf_cpum_sf.c 	unsigned long *sdbt, *curr;
curr              132 arch/s390/kernel/perf_cpum_sf.c 	curr = sdbt;
curr              136 arch/s390/kernel/perf_cpum_sf.c 		if (!*curr || !sdbt)
curr              140 arch/s390/kernel/perf_cpum_sf.c 		if (is_link_entry(curr)) {
curr              141 arch/s390/kernel/perf_cpum_sf.c 			curr = get_next_sdbt(curr);
curr              146 arch/s390/kernel/perf_cpum_sf.c 			if (curr == sfb->sdbt)
curr              149 arch/s390/kernel/perf_cpum_sf.c 				sdbt = curr;
curr              152 arch/s390/kernel/perf_cpum_sf.c 			if (*curr) {
curr              153 arch/s390/kernel/perf_cpum_sf.c 				free_page(*curr);
curr              154 arch/s390/kernel/perf_cpum_sf.c 				curr++;
curr             1952 arch/x86/kernel/cpu/common.c 	struct task_struct *curr = current;
curr             1973 arch/x86/kernel/cpu/common.c 	curr->active_mm = &init_mm;
curr             1974 arch/x86/kernel/cpu/common.c 	BUG_ON(curr->mm);
curr             1976 arch/x86/kernel/cpu/common.c 	enter_lazy_tlb(&init_mm, curr);
curr               83 arch/x86/kernel/cpu/mtrr/generic.c static int check_type_overlap(u8 *prev, u8 *curr)
curr               85 arch/x86/kernel/cpu/mtrr/generic.c 	if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
curr               87 arch/x86/kernel/cpu/mtrr/generic.c 		*curr = MTRR_TYPE_UNCACHABLE;
curr               91 arch/x86/kernel/cpu/mtrr/generic.c 	if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
curr               92 arch/x86/kernel/cpu/mtrr/generic.c 	    (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
curr               94 arch/x86/kernel/cpu/mtrr/generic.c 		*curr = MTRR_TYPE_WRTHROUGH;
curr               97 arch/x86/kernel/cpu/mtrr/generic.c 	if (*prev != *curr) {
curr               99 arch/x86/kernel/cpu/mtrr/generic.c 		*curr = MTRR_TYPE_UNCACHABLE;
curr              174 arch/x86/kvm/x86.c 		u64 curr;
curr              258 arch/x86/kvm/x86.c 		if (values->host != values->curr) {
curr              260 arch/x86/kvm/x86.c 			values->curr = values->host;
curr              279 arch/x86/kvm/x86.c 	smsr->values[slot].curr = value;
curr              306 arch/x86/kvm/x86.c 	if (value == smsr->values[slot].curr)
curr              312 arch/x86/kvm/x86.c 	smsr->values[slot].curr = value;
curr               98 arch/x86/um/tls_32.c 		struct uml_tls_struct* curr =
curr              105 arch/x86/um/tls_32.c 		if (!curr->present) {
curr              106 arch/x86/um/tls_32.c 			if (!curr->flushed) {
curr              107 arch/x86/um/tls_32.c 				clear_user_desc(&curr->tls);
curr              108 arch/x86/um/tls_32.c 				curr->tls.entry_number = idx;
curr              110 arch/x86/um/tls_32.c 				WARN_ON(!LDT_empty(&curr->tls));
curr              115 arch/x86/um/tls_32.c 		if (!(flags & O_FORCE) && curr->flushed)
curr              118 arch/x86/um/tls_32.c 		ret = do_set_thread_area(&curr->tls);
curr              122 arch/x86/um/tls_32.c 		curr->flushed = 1;
curr              138 arch/x86/um/tls_32.c 		struct uml_tls_struct* curr =
curr              145 arch/x86/um/tls_32.c 		if (curr->flushed)
curr              162 arch/x86/um/tls_32.c 		struct uml_tls_struct* curr =
curr              169 arch/x86/um/tls_32.c 		if (!curr->present)
curr              172 arch/x86/um/tls_32.c 		curr->flushed = 0;
curr              212 block/blk-rq-qos.c static int rq_qos_wake_function(struct wait_queue_entry *curr,
curr              215 block/blk-rq-qos.c 	struct rq_qos_wait_data *data = container_of(curr,
curr              228 block/blk-rq-qos.c 	list_del_init(&curr->entry);
curr              323 crypto/drbg.c  	struct drbg_string *curr = NULL;
curr              331 crypto/drbg.c  	list_for_each_entry(curr, in, list) {
curr              332 crypto/drbg.c  		const unsigned char *pos = curr->buf;
curr              333 crypto/drbg.c  		size_t len = curr->len;
curr              361 drivers/acpi/acpi_video.c 	device->brightness->curr = level;
curr              618 drivers/acpi/acpi_video.c 					device->brightness->curr = *level;
curr              643 drivers/acpi/acpi_video.c 	*level = device->brightness->curr;
curr              950 drivers/acpi/acpi_video.c 	br->curr = level = max_level;
curr             1713 drivers/acpi/acpi_video.c 					video_device->brightness->curr);
curr               82 drivers/acpi/arm64/iort.c 	struct iort_fwnode *curr;
curr               86 drivers/acpi/arm64/iort.c 	list_for_each_entry(curr, &iort_fwnode_list, list) {
curr               87 drivers/acpi/arm64/iort.c 		if (curr->iort_node == node) {
curr               88 drivers/acpi/arm64/iort.c 			fwnode = curr->fwnode;
curr              104 drivers/acpi/arm64/iort.c 	struct iort_fwnode *curr, *tmp;
curr              107 drivers/acpi/arm64/iort.c 	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
curr              108 drivers/acpi/arm64/iort.c 		if (curr->iort_node == node) {
curr              109 drivers/acpi/arm64/iort.c 			list_del(&curr->list);
curr              110 drivers/acpi/arm64/iort.c 			kfree(curr);
curr              127 drivers/acpi/arm64/iort.c 	struct iort_fwnode *curr;
curr              131 drivers/acpi/arm64/iort.c 	list_for_each_entry(curr, &iort_fwnode_list, list) {
curr              132 drivers/acpi/arm64/iort.c 		if (curr->fwnode == fwnode) {
curr              133 drivers/acpi/arm64/iort.c 			iort_node = curr->iort_node;
curr              583 drivers/acpi/ec.c 	    (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
curr              595 drivers/acpi/ec.c 	if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
curr              607 drivers/acpi/ec.c 	if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
curr              615 drivers/acpi/ec.c 	ec->curr->flags |= flag;
curr              616 drivers/acpi/ec.c 	if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
curr              644 drivers/acpi/ec.c 	t = ec->curr;
curr              723 drivers/acpi/ec.c 	ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
curr              724 drivers/acpi/ec.c 	ec->curr->flags = 0;
curr              800 drivers/acpi/ec.c 	ec->curr = t;
curr              811 drivers/acpi/ec.c 	ec->curr = NULL;
curr             1230 drivers/acpi/ec.c 			if (!ec->curr)
curr              177 drivers/acpi/internal.h 	struct transaction *curr;
curr             1170 drivers/acpi/processor_idle.c 	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
curr             1180 drivers/acpi/processor_idle.c 	curr = &info[1];
curr             1199 drivers/acpi/processor_idle.c 		ret = acpi_processor_evaluate_lpi(handle, curr);
curr             1204 drivers/acpi/processor_idle.c 		flatten_lpi_states(pr, curr, prev);
curr             1206 drivers/acpi/processor_idle.c 		tmp = prev, prev = curr, curr = tmp;
curr              660 drivers/atm/eni.c 	struct atm_vcc *curr;
curr              663 drivers/atm/eni.c 	while ((curr = eni_dev->fast)) {
curr              665 drivers/atm/eni.c 		if (rx_vcc(curr)) return;
curr              666 drivers/atm/eni.c 		eni_dev->fast = ENI_VCC(curr)->next;
curr              667 drivers/atm/eni.c 		ENI_VCC(curr)->next = ENI_VCC_NOS;
curr              669 drivers/atm/eni.c 		ENI_VCC(curr)->servicing--;
curr              671 drivers/atm/eni.c 	while ((curr = eni_dev->slow)) {
curr              673 drivers/atm/eni.c 		if (rx_vcc(curr)) return;
curr              674 drivers/atm/eni.c 		eni_dev->slow = ENI_VCC(curr)->next;
curr              675 drivers/atm/eni.c 		ENI_VCC(curr)->next = ENI_VCC_NOS;
curr              677 drivers/atm/eni.c 		ENI_VCC(curr)->servicing--;
curr             1267 drivers/atm/zatm.c 	unsigned long curr;
curr             1293 drivers/atm/zatm.c 	curr = rx*RX_SIZE/4;
curr             1294 drivers/atm/zatm.c 	DPRINTK("RX pool 0x%08lx\n",curr);
curr             1295 drivers/atm/zatm.c 	zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
curr             1296 drivers/atm/zatm.c 	zatm_dev->pool_base = curr;
curr             1297 drivers/atm/zatm.c 	curr += pools*POOL_SIZE/4;
curr             1298 drivers/atm/zatm.c 	DPRINTK("Shapers 0x%08lx\n",curr);
curr             1299 drivers/atm/zatm.c 	zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
curr             1300 drivers/atm/zatm.c 	curr += NR_SHAPERS*SHAPER_SIZE/4;
curr             1301 drivers/atm/zatm.c 	DPRINTK("Free    0x%08lx\n",curr);
curr             1302 drivers/atm/zatm.c 	zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
curr             1305 drivers/atm/zatm.c 	    (zatm_dev->mem-curr*4)/VC_SIZE);
curr               60 drivers/auxdisplay/img-ascii-lcd.c 	char curr[] __aligned(8);
curr               72 drivers/auxdisplay/img-ascii-lcd.c 	val = *((u64 *)&ctx->curr[0]);
curr               75 drivers/auxdisplay/img-ascii-lcd.c 	val = *((u32 *)&ctx->curr[0]);
curr               77 drivers/auxdisplay/img-ascii-lcd.c 	val = *((u32 *)&ctx->curr[4]);
curr              100 drivers/auxdisplay/img-ascii-lcd.c 				   ctx->offset + (i * 8), ctx->curr[i]);
curr              198 drivers/auxdisplay/img-ascii-lcd.c 				   ctx->curr[i]);
curr              238 drivers/auxdisplay/img-ascii-lcd.c 			ctx->curr[i] = ctx->message[ch];
curr              214 drivers/base/dd.c 	struct device_private *curr;
curr              218 drivers/base/dd.c 	list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
curr              219 drivers/base/dd.c 		seq_printf(s, "%s\n", dev_name(curr->device));
curr              854 drivers/cdrom/cdrom.c 		ret = !rfd.curr;
curr             1565 drivers/cdrom/cdrom.c 		     u_char * curr, u_char requested)
curr             1567 drivers/cdrom/cdrom.c 	if (*curr == requested)
curr             1580 drivers/cdrom/cdrom.c 	*curr = requested;
curr              200 drivers/char/agp/agp.h void agp_generic_free_by_type(struct agp_memory *curr);
curr               49 drivers/char/agp/frontend.c 	struct agp_memory *curr;
curr               54 drivers/char/agp/frontend.c 	curr = agp_fe.current_controller->pool;
curr               56 drivers/char/agp/frontend.c 	while (curr != NULL) {
curr               57 drivers/char/agp/frontend.c 		if (curr->key == key)
curr               59 drivers/char/agp/frontend.c 		curr = curr->next;
curr               62 drivers/char/agp/frontend.c 	DBG("key=%d -> mem=%p", key, curr);
curr               63 drivers/char/agp/frontend.c 	return curr;
curr              217 drivers/char/agp/frontend.c 	struct agp_file_private *curr;
curr              219 drivers/char/agp/frontend.c 	curr = agp_fe.file_priv_list;
curr              221 drivers/char/agp/frontend.c 	while (curr != NULL) {
curr              222 drivers/char/agp/frontend.c 		if (curr->my_pid == pid)
curr              223 drivers/char/agp/frontend.c 			return curr;
curr              224 drivers/char/agp/frontend.c 		curr = curr->next;
curr              163 drivers/char/agp/generic.c void agp_free_memory(struct agp_memory *curr)
curr              167 drivers/char/agp/generic.c 	if (curr == NULL)
curr              170 drivers/char/agp/generic.c 	if (curr->is_bound)
curr              171 drivers/char/agp/generic.c 		agp_unbind_memory(curr);
curr              173 drivers/char/agp/generic.c 	if (curr->type >= AGP_USER_TYPES) {
curr              174 drivers/char/agp/generic.c 		agp_generic_free_by_type(curr);
curr              178 drivers/char/agp/generic.c 	if (curr->type != 0) {
curr              179 drivers/char/agp/generic.c 		curr->bridge->driver->free_by_type(curr);
curr              182 drivers/char/agp/generic.c 	if (curr->page_count != 0) {
curr              183 drivers/char/agp/generic.c 		if (curr->bridge->driver->agp_destroy_pages) {
curr              184 drivers/char/agp/generic.c 			curr->bridge->driver->agp_destroy_pages(curr);
curr              187 drivers/char/agp/generic.c 			for (i = 0; i < curr->page_count; i++) {
curr              188 drivers/char/agp/generic.c 				curr->bridge->driver->agp_destroy_page(
curr              189 drivers/char/agp/generic.c 					curr->pages[i],
curr              192 drivers/char/agp/generic.c 			for (i = 0; i < curr->page_count; i++) {
curr              193 drivers/char/agp/generic.c 				curr->bridge->driver->agp_destroy_page(
curr              194 drivers/char/agp/generic.c 					curr->pages[i],
curr              199 drivers/char/agp/generic.c 	agp_free_key(curr->key);
curr              200 drivers/char/agp/generic.c 	agp_free_page_array(curr);
curr              201 drivers/char/agp/generic.c 	kfree(curr);
curr              407 drivers/char/agp/generic.c int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
curr              411 drivers/char/agp/generic.c 	if (curr == NULL)
curr              414 drivers/char/agp/generic.c 	if (curr->is_bound) {
curr              415 drivers/char/agp/generic.c 		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
curr              418 drivers/char/agp/generic.c 	if (!curr->is_flushed) {
curr              419 drivers/char/agp/generic.c 		curr->bridge->driver->cache_flush();
curr              420 drivers/char/agp/generic.c 		curr->is_flushed = true;
curr              423 drivers/char/agp/generic.c 	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
curr              428 drivers/char/agp/generic.c 	curr->is_bound = true;
curr              429 drivers/char/agp/generic.c 	curr->pg_start = pg_start;
curr              431 drivers/char/agp/generic.c 	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
curr              447 drivers/char/agp/generic.c int agp_unbind_memory(struct agp_memory *curr)
curr              451 drivers/char/agp/generic.c 	if (curr == NULL)
curr              454 drivers/char/agp/generic.c 	if (!curr->is_bound) {
curr              455 drivers/char/agp/generic.c 		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
curr              459 drivers/char/agp/generic.c 	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
curr              464 drivers/char/agp/generic.c 	curr->is_bound = false;
curr              465 drivers/char/agp/generic.c 	curr->pg_start = 0;
curr              466 drivers/char/agp/generic.c 	spin_lock(&curr->bridge->mapped_lock);
curr              467 drivers/char/agp/generic.c 	list_del(&curr->mapped_list);
curr              468 drivers/char/agp/generic.c 	spin_unlock(&curr->bridge->mapped_lock);
curr             1155 drivers/char/agp/generic.c void agp_generic_free_by_type(struct agp_memory *curr)
curr             1157 drivers/char/agp/generic.c 	agp_free_page_array(curr);
curr             1158 drivers/char/agp/generic.c 	agp_free_key(curr->key);
curr             1159 drivers/char/agp/generic.c 	kfree(curr);
curr              276 drivers/char/agp/intel-gtt.c static void intel_i810_free_by_type(struct agp_memory *curr)
curr              278 drivers/char/agp/intel-gtt.c 	agp_free_key(curr->key);
curr              279 drivers/char/agp/intel-gtt.c 	if (curr->type == AGP_PHYS_MEMORY) {
curr              280 drivers/char/agp/intel-gtt.c 		if (curr->page_count == 4)
curr              281 drivers/char/agp/intel-gtt.c 			i8xx_destroy_pages(curr->pages[0]);
curr              283 drivers/char/agp/intel-gtt.c 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
curr              285 drivers/char/agp/intel-gtt.c 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
curr              288 drivers/char/agp/intel-gtt.c 		agp_free_page_array(curr);
curr              290 drivers/char/agp/intel-gtt.c 	kfree(curr);
curr               60 drivers/char/ipmi/ipmi_si_hotmod.c 		     const char **curr)
curr               65 drivers/char/ipmi/ipmi_si_hotmod.c 	s = strchr(*curr, ',');
curr               73 drivers/char/ipmi/ipmi_si_hotmod.c 		if (strcmp(*curr, v[i].name) == 0) {
curr               75 drivers/char/ipmi/ipmi_si_hotmod.c 			*curr = s;
curr               80 drivers/char/ipmi/ipmi_si_hotmod.c 	pr_warn("Invalid hotmod %s '%s'\n", name, *curr);
curr               84 drivers/char/ipmi/ipmi_si_hotmod.c static int check_hotmod_int_op(const char *curr, const char *option,
curr               89 drivers/char/ipmi/ipmi_si_hotmod.c 	if (strcmp(curr, name) == 0) {
curr               91 drivers/char/ipmi/ipmi_si_hotmod.c 			pr_warn("No option given for '%s'\n", curr);
curr               96 drivers/char/ipmi/ipmi_si_hotmod.c 			pr_warn("Bad option given for '%s'\n", curr);
curr              104 drivers/char/ipmi/ipmi_si_hotmod.c static int parse_hotmod_str(const char *curr, enum hotmod_op *op,
curr              112 drivers/char/ipmi/ipmi_si_hotmod.c 	rv = parse_str(hotmod_ops, &ival, "operation", &curr);
curr              117 drivers/char/ipmi/ipmi_si_hotmod.c 	rv = parse_str(hotmod_si, &ival, "interface type", &curr);
curr              122 drivers/char/ipmi/ipmi_si_hotmod.c 	rv = parse_str(hotmod_as, &ival, "address space", &curr);
curr              127 drivers/char/ipmi/ipmi_si_hotmod.c 	s = strchr(curr, ',');
curr              132 drivers/char/ipmi/ipmi_si_hotmod.c 	rv = kstrtoul(curr, 0, &h->addr);
curr              134 drivers/char/ipmi/ipmi_si_hotmod.c 		pr_warn("Invalid hotmod address '%s': %d\n", curr, rv);
curr              139 drivers/char/ipmi/ipmi_si_hotmod.c 		curr = s;
curr              140 drivers/char/ipmi/ipmi_si_hotmod.c 		s = strchr(curr, ',');
curr              145 drivers/char/ipmi/ipmi_si_hotmod.c 		o = strchr(curr, '=');
curr              150 drivers/char/ipmi/ipmi_si_hotmod.c 		rv = check_hotmod_int_op(curr, o, "rsp", &h->regspacing);
curr              155 drivers/char/ipmi/ipmi_si_hotmod.c 		rv = check_hotmod_int_op(curr, o, "rsi", &h->regsize);
curr              160 drivers/char/ipmi/ipmi_si_hotmod.c 		rv = check_hotmod_int_op(curr, o, "rsh", &h->regshift);
curr              165 drivers/char/ipmi/ipmi_si_hotmod.c 		rv = check_hotmod_int_op(curr, o, "irq", &h->irq);
curr              170 drivers/char/ipmi/ipmi_si_hotmod.c 		rv = check_hotmod_int_op(curr, o, "ipmb", &h->slave_addr);
curr              176 drivers/char/ipmi/ipmi_si_hotmod.c 		pr_warn("Invalid hotmod option '%s'\n", curr);
curr              188 drivers/char/ipmi/ipmi_si_hotmod.c 	char *str = kstrdup(val, GFP_KERNEL), *curr, *next;
curr              205 drivers/char/ipmi/ipmi_si_hotmod.c 	for (curr = str; curr; curr = next) {
curr              208 drivers/char/ipmi/ipmi_si_hotmod.c 		next = strchr(curr, ':');
curr              215 drivers/char/ipmi/ipmi_si_hotmod.c 		rv = parse_hotmod_str(curr, &op, &h);
curr              165 drivers/clk/clk-max9485.c 	const struct max9485_rate *curr, *prev = NULL;
curr              167 drivers/clk/clk-max9485.c 	for (curr = max9485_rates; curr->out != 0; curr++) {
curr              169 drivers/clk/clk-max9485.c 		if (curr->out == rate)
curr              176 drivers/clk/clk-max9485.c 		if (curr->out > rate) {
curr              184 drivers/clk/clk-max9485.c 				return curr->out;
curr              190 drivers/clk/clk-max9485.c 			mid = prev->out + ((curr->out - prev->out) / 2);
curr              192 drivers/clk/clk-max9485.c 			return (mid > rate) ? prev->out : curr->out;
curr              195 drivers/clk/clk-max9485.c 		prev = curr;
curr             1128 drivers/crypto/amcc/crypto4xx_core.c 	unsigned int i, curr = 0;
curr             1149 drivers/crypto/amcc/crypto4xx_core.c 		if ((max - curr) >= 8) {
curr             1152 drivers/crypto/amcc/crypto4xx_core.c 			curr += 8;
curr             1155 drivers/crypto/amcc/crypto4xx_core.c 			memcpy(data, &val, max - curr);
curr             1158 drivers/crypto/amcc/crypto4xx_core.c 	} while (curr < max);
curr             1160 drivers/crypto/amcc/crypto4xx_core.c 	return curr;
curr              224 drivers/crypto/cavium/cpt/cptvf_main.c 		struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
curr              230 drivers/crypto/cavium/cpt/cptvf_main.c 			curr = kzalloc(sizeof(*curr), GFP_KERNEL);
curr              231 drivers/crypto/cavium/cpt/cptvf_main.c 			if (!curr)
curr              236 drivers/crypto/cavium/cpt/cptvf_main.c 			curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
curr              238 drivers/crypto/cavium/cpt/cptvf_main.c 							      &curr->dma_addr,
curr              240 drivers/crypto/cavium/cpt/cptvf_main.c 			if (!curr->head) {
curr              243 drivers/crypto/cavium/cpt/cptvf_main.c 				kfree(curr);
curr              247 drivers/crypto/cavium/cpt/cptvf_main.c 			curr->size = c_size;
curr              249 drivers/crypto/cavium/cpt/cptvf_main.c 				hlist_add_head(&curr->nextchunk,
curr              251 drivers/crypto/cavium/cpt/cptvf_main.c 				first = curr;
curr              253 drivers/crypto/cavium/cpt/cptvf_main.c 				hlist_add_behind(&curr->nextchunk,
curr              260 drivers/crypto/cavium/cpt/cptvf_main.c 				*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
curr              262 drivers/crypto/cavium/cpt/cptvf_main.c 			last = curr;
curr              267 drivers/crypto/cavium/cpt/cptvf_main.c 		curr = first;
curr              268 drivers/crypto/cavium/cpt/cptvf_main.c 		*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
curr              269 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->qhead = curr;
curr              263 drivers/dma/img-mdc-dma.c 	struct mdc_hw_list_desc *curr, *next;
curr              266 drivers/dma/img-mdc-dma.c 	curr = mdesc->list;
curr              268 drivers/dma/img-mdc-dma.c 	while (curr) {
curr              269 drivers/dma/img-mdc-dma.c 		next = curr->next_desc;
curr              270 drivers/dma/img-mdc-dma.c 		next_phys = curr->node_addr;
curr              271 drivers/dma/img-mdc-dma.c 		dma_pool_free(mdma->desc_pool, curr, curr_phys);
curr              272 drivers/dma/img-mdc-dma.c 		curr = next;
curr              292 drivers/dma/img-mdc-dma.c 	struct mdc_hw_list_desc *curr, *prev = NULL;
curr              307 drivers/dma/img-mdc-dma.c 		curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
curr              308 drivers/dma/img-mdc-dma.c 		if (!curr)
curr              313 drivers/dma/img-mdc-dma.c 			prev->next_desc = curr;
curr              316 drivers/dma/img-mdc-dma.c 			mdesc->list = curr;
curr              321 drivers/dma/img-mdc-dma.c 		mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
curr              324 drivers/dma/img-mdc-dma.c 		prev = curr;
curr              374 drivers/dma/img-mdc-dma.c 	struct mdc_hw_list_desc *curr, *prev = NULL;
curr              401 drivers/dma/img-mdc-dma.c 			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
curr              403 drivers/dma/img-mdc-dma.c 			if (!curr)
curr              408 drivers/dma/img-mdc-dma.c 				mdesc->list = curr;
curr              411 drivers/dma/img-mdc-dma.c 				prev->next_desc = curr;
curr              418 drivers/dma/img-mdc-dma.c 				mdc_list_desc_config(mchan, curr, dir,
curr              423 drivers/dma/img-mdc-dma.c 				mdc_list_desc_config(mchan, curr, dir,
curr              429 drivers/dma/img-mdc-dma.c 			prev = curr;
curr              456 drivers/dma/img-mdc-dma.c 	struct mdc_hw_list_desc *curr, *prev = NULL;
curr              481 drivers/dma/img-mdc-dma.c 			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
curr              483 drivers/dma/img-mdc-dma.c 			if (!curr)
curr              488 drivers/dma/img-mdc-dma.c 				mdesc->list = curr;
curr              491 drivers/dma/img-mdc-dma.c 				prev->next_desc = curr;
curr              498 drivers/dma/img-mdc-dma.c 				mdc_list_desc_config(mchan, curr, dir, buf,
curr              502 drivers/dma/img-mdc-dma.c 				mdc_list_desc_config(mchan, curr, dir,
curr              507 drivers/dma/img-mdc-dma.c 			prev = curr;
curr              773 drivers/dma/mmp_pdma.c 	u32 curr, residue = 0;
curr              785 drivers/dma/mmp_pdma.c 		curr = readl(chan->phy->base + DTADR(chan->phy->idx));
curr              787 drivers/dma/mmp_pdma.c 		curr = readl(chan->phy->base + DSADR(chan->phy->idx));
curr              810 drivers/dma/mmp_pdma.c 		} else if (curr >= start && curr <= end) {
curr              811 drivers/dma/mmp_pdma.c 			residue += end - curr;
curr             1127 drivers/dma/pxa_dma.c 	u32 curr, start, len, end, residue = 0;
curr             1147 drivers/dma/pxa_dma.c 		curr = phy_readl_relaxed(chan->phy, DSADR);
curr             1149 drivers/dma/pxa_dma.c 		curr = phy_readl_relaxed(chan->phy, DTADR);
curr             1181 drivers/dma/pxa_dma.c 		} else if (curr >= start && curr <= end) {
curr             1182 drivers/dma/pxa_dma.c 			residue += end - curr;
curr              229 drivers/gpio/gpio-mockup.c 	int rv, val, curr, irq, irq_type;
curr              256 drivers/gpio/gpio-mockup.c 		curr = __gpio_mockup_get(chip, priv->offset);
curr              257 drivers/gpio/gpio-mockup.c 		if (curr == val)
curr             1402 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 	dw_hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[0],
curr              803 drivers/gpu/drm/i915/i915_cmd_parser.c 			u32 curr = desc->cmd.value & desc->cmd.mask;
curr              805 drivers/gpu/drm/i915/i915_cmd_parser.c 			if (curr < previous) {
curr              809 drivers/gpu/drm/i915/i915_cmd_parser.c 					  i, j, curr, previous);
curr              813 drivers/gpu/drm/i915/i915_cmd_parser.c 			previous = curr;
curr              829 drivers/gpu/drm/i915/i915_cmd_parser.c 		u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
curr              831 drivers/gpu/drm/i915/i915_cmd_parser.c 		if (curr < previous) {
curr              835 drivers/gpu/drm/i915/i915_cmd_parser.c 				  i, curr, previous);
curr              839 drivers/gpu/drm/i915/i915_cmd_parser.c 		previous = curr;
curr               25 drivers/gpu/drm/i915/i915_scatterlist.h 	unsigned int curr;
curr               31 drivers/gpu/drm/i915/i915_scatterlist.h 		s.max = s.curr = s.sgp->offset;
curr               78 drivers/gpu/drm/i915/i915_scatterlist.h 	     ((__dmap) = (__iter).dma + (__iter).curr);			\
curr               79 drivers/gpu/drm/i915/i915_scatterlist.h 	     (((__iter).curr += (__step)) >= (__iter).max) ?		\
curr               91 drivers/gpu/drm/i915/i915_scatterlist.h 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
curr               92 drivers/gpu/drm/i915/i915_scatterlist.h 	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
curr               72 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c 	struct dpu_hw_blk *curr;
curr               77 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c 		list_for_each_entry(curr, &dpu_hw_blk_list, list) {
curr               78 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c 			if ((curr->type != type) ||
curr               79 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c 					(id >= 0 && curr->id != id) ||
curr               81 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c 						atomic_read(&curr->refcount)))
curr               84 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c 			hw_blk = curr;
curr              446 drivers/hv/channel.c 	struct list_head *curr;
curr              486 drivers/hv/channel.c 	list_for_each(curr, &msginfo->submsglist) {
curr              487 drivers/hv/channel.c 		submsginfo = (struct vmbus_channel_msginfo *)curr;
curr              577 drivers/hwmon/ina3221.c 	HWMON_CHANNEL_INFO(curr,
curr              334 drivers/hwmon/lochnagar-hwmon.c 	HWMON_CHANNEL_INFO(curr,  HWMON_C_INPUT | HWMON_C_LABEL,
curr              131 drivers/hwmon/ltc4215.c 	const unsigned int curr = voltage / 4;
curr              133 drivers/hwmon/ltc4215.c 	return curr;
curr              148 drivers/hwmon/ltc4215.c 	const unsigned int curr = ltc4215_get_current(dev);
curr              150 drivers/hwmon/ltc4215.c 	return snprintf(buf, PAGE_SIZE, "%u\n", curr);
curr              156 drivers/hwmon/ltc4215.c 	const unsigned int curr = ltc4215_get_current(dev);
curr              160 drivers/hwmon/ltc4215.c 	const unsigned int power = abs(output_voltage * curr);
curr              211 drivers/hwmon/ltc4245.c 	unsigned int curr;
curr              231 drivers/hwmon/ltc4245.c 		curr = voltage / 50; /* sense resistor 50 mOhm */
curr              235 drivers/hwmon/ltc4245.c 		curr = (voltage * 10) / 35; /* sense resistor 3.5 mOhm */
curr              239 drivers/hwmon/ltc4245.c 		curr = (voltage * 10) / 25; /* sense resistor 2.5 mOhm */
curr              243 drivers/hwmon/ltc4245.c 		curr = voltage / 100; /* sense resistor 100 mOhm */
curr              248 drivers/hwmon/ltc4245.c 		curr = 0;
curr              252 drivers/hwmon/ltc4245.c 	return curr;
curr              317 drivers/hwmon/ltc4245.c 	unsigned long curr;
curr              323 drivers/hwmon/ltc4245.c 		curr = ltc4245_get_current(dev, ltc4245_curr_regs[channel]);
curr              325 drivers/hwmon/ltc4245.c 		*val = abs(curr * voltage);
curr              404 drivers/hwmon/ltc4245.c 	HWMON_CHANNEL_INFO(curr,
curr             1187 drivers/hwmon/pmbus/pmbus_core.c 	struct pmbus_sensor *curr;
curr             1191 drivers/hwmon/pmbus/pmbus_core.c 			curr = pmbus_add_sensor(data, name, l->attr, index,
curr             1195 drivers/hwmon/pmbus/pmbus_core.c 			if (!curr)
curr             1200 drivers/hwmon/pmbus/pmbus_core.c 					attr->compare ?  l->low ? curr : base
curr             1202 drivers/hwmon/pmbus/pmbus_core.c 					attr->compare ? l->low ? base : curr
curr              511 drivers/i2c/busses/i2c-ocores.c 	u32 curr, wr;
curr              517 drivers/i2c/busses/i2c-ocores.c 		curr = ioread32be(i2c->base + (rreg << i2c->reg_shift));
curr              519 drivers/i2c/busses/i2c-ocores.c 			wr = (curr & 0xff00) | value;
curr              521 drivers/i2c/busses/i2c-ocores.c 			wr = (((u32)value) << 8) | (curr & 0xff);
curr              332 drivers/iio/adc/twl4030-madc.c 	int temp, curr, volt, res, ret;
curr              341 drivers/iio/adc/twl4030-madc.c 	curr = ((val & TWL4030_BCI_ITHSENS) + 1) * 10;
curr              343 drivers/iio/adc/twl4030-madc.c 	res = volt * 1000 / curr;
curr              100 drivers/infiniband/core/addr.c 	const struct nlattr *head, *curr;
curr              109 drivers/infiniband/core/addr.c 	nla_for_each_attr(curr, head, len, rem) {
curr              110 drivers/infiniband/core/addr.c 		if (curr->nla_type == LS_NLA_TYPE_DGID)
curr              111 drivers/infiniband/core/addr.c 			memcpy(&gid, nla_data(curr), nla_len(curr));
curr               13 drivers/infiniband/core/counters.c static int __counter_set_mode(struct rdma_counter_mode *curr,
curr               19 drivers/infiniband/core/counters.c 	     (curr->mode != RDMA_COUNTER_MODE_NONE)))
curr               22 drivers/infiniband/core/counters.c 	curr->mode = new_mode;
curr               23 drivers/infiniband/core/counters.c 	curr->mask = new_mask;
curr              338 drivers/infiniband/core/nldev.c 			       const char *name, u64 curr)
curr              349 drivers/infiniband/core/nldev.c 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
curr              373 drivers/infiniband/core/nldev.c 	int ret, i, curr;
curr              385 drivers/infiniband/core/nldev.c 		curr = rdma_restrack_count(device, i);
curr              386 drivers/infiniband/core/nldev.c 		ret = fill_res_info_entry(msg, names[i], curr);
curr              928 drivers/infiniband/core/sa_query.c 	const struct nlattr *head, *curr;
curr              949 drivers/infiniband/core/sa_query.c 		nla_for_each_attr(curr, head, len, rem) {
curr              950 drivers/infiniband/core/sa_query.c 			if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
curr              951 drivers/infiniband/core/sa_query.c 				rec = nla_data(curr);
curr             13040 drivers/infiniband/hw/hfi1/chip.c 	temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
curr             1016 drivers/infiniband/hw/hfi1/hfi.h 	unsigned int curr;       /* current temperature */
curr               59 drivers/infiniband/hw/hfi1/opfn.c 	    priv->opfn.requested == priv->opfn.completed || priv->opfn.curr)
curr               88 drivers/infiniband/hw/hfi1/opfn.c 	priv->opfn.curr = capcode;	/* A new request is now in progress */
curr              105 drivers/infiniband/hw/hfi1/opfn.c 	priv->opfn.curr = STL_VERBS_EXTD_NONE;
curr              193 drivers/infiniband/hw/hfi1/opfn.c 	if (!priv->opfn.curr || capcode != priv->opfn.curr)
curr              208 drivers/infiniband/hw/hfi1/opfn.c 	priv->opfn.curr = STL_VERBS_EXTD_NONE;
curr              238 drivers/infiniband/hw/hfi1/opfn.c 	priv->opfn.curr = STL_VERBS_EXTD_NONE;
curr               68 drivers/infiniband/hw/hfi1/opfn.h 	enum hfi1_opfn_codes curr;
curr              756 drivers/infiniband/hw/hfi1/pcie.c #define eq_value(pre, curr, post) \
curr              759 drivers/infiniband/hw/hfi1/pcie.c 	| (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \
curr             1633 drivers/infiniband/hw/hfi1/rc.c 				opfn_conn_reply(qp, priv->opfn.curr);
curr              621 drivers/infiniband/hw/hfi1/sysfs.c 		idx += temp2str(temp.curr, buf, PAGE_SIZE, idx);
curr              202 drivers/infiniband/hw/hfi1/trace_tid.h 		__field(u8, curr)
curr              211 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->curr = priv->opfn.curr;
curr              219 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->curr
curr              287 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		int curr;
curr              295 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		curr = --tail;
curr              297 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 			if (curr < 0)
curr              298 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 				curr = cq->ibcq.cqe - 1;
curr              301 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 			curr_cqe = get_cqe(cq, curr);
curr              303 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 				if (curr != tail) {
curr              313 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 			curr--;
curr              106 drivers/input/mouse/hgpk.c static int approx_half(int curr, int prev)
curr              110 drivers/input/mouse/hgpk.c 	if (curr < 5 || prev < 5)
curr              116 drivers/input/mouse/hgpk.c 	return belowhalf < curr && curr <= abovehalf;
curr              183 drivers/input/serio/hp_sdc.c 	hp_sdc_transaction *curr;
curr              190 drivers/input/serio/hp_sdc.c 	curr = hp_sdc.tq[hp_sdc.rcurr];
curr              193 drivers/input/serio/hp_sdc.c 	curr->seq[curr->idx++] = status;
curr              194 drivers/input/serio/hp_sdc.c 	curr->seq[curr->idx++] = data;
curr              200 drivers/input/serio/hp_sdc.c 		if (curr->seq[curr->actidx] & HP_SDC_ACT_SEMAPHORE)
curr              201 drivers/input/serio/hp_sdc.c 			if (curr->act.semaphore)
curr              202 drivers/input/serio/hp_sdc.c 				up(curr->act.semaphore);
curr              204 drivers/input/serio/hp_sdc.c 		if (curr->seq[curr->actidx] & HP_SDC_ACT_CALLBACK)
curr              205 drivers/input/serio/hp_sdc.c 			if (curr->act.irqhook)
curr              206 drivers/input/serio/hp_sdc.c 				curr->act.irqhook(irq, dev_id, status, data);
curr              208 drivers/input/serio/hp_sdc.c 		curr->actidx = curr->idx;
curr              209 drivers/input/serio/hp_sdc.c 		curr->idx++;
curr              313 drivers/input/serio/hp_sdc.c 			hp_sdc_transaction *curr;
curr              316 drivers/input/serio/hp_sdc.c 			curr = hp_sdc.tq[hp_sdc.rcurr];
curr              323 drivers/input/serio/hp_sdc.c 			curr->idx += hp_sdc.rqty;
curr              325 drivers/input/serio/hp_sdc.c 			tmp = curr->seq[curr->actidx];
curr              326 drivers/input/serio/hp_sdc.c 			curr->seq[curr->actidx] |= HP_SDC_ACT_DEAD;
curr              328 drivers/input/serio/hp_sdc.c 				if (curr->act.semaphore)
curr              329 drivers/input/serio/hp_sdc.c 					up(curr->act.semaphore);
curr              335 drivers/input/serio/hp_sdc.c 				if (curr->act.irqhook)
curr              336 drivers/input/serio/hp_sdc.c 					curr->act.irqhook(0, NULL, 0, 0);
curr              339 drivers/input/serio/hp_sdc.c 			curr->actidx = curr->idx;
curr              340 drivers/input/serio/hp_sdc.c 			curr->idx++;
curr              350 drivers/input/serio/hp_sdc.c 	hp_sdc_transaction *curr;
curr              412 drivers/input/serio/hp_sdc.c 	curr = hp_sdc.tq[curridx];
curr              413 drivers/input/serio/hp_sdc.c 	idx = curr->actidx;
curr              415 drivers/input/serio/hp_sdc.c 	if (curr->actidx >= curr->endidx) {
curr              424 drivers/input/serio/hp_sdc.c 	act = curr->seq[idx];
curr              427 drivers/input/serio/hp_sdc.c 	if (curr->idx >= curr->endidx) {
curr              429 drivers/input/serio/hp_sdc.c 			kfree(curr);
curr              439 drivers/input/serio/hp_sdc.c 		if (curr->idx != idx) {
curr              444 drivers/input/serio/hp_sdc.c 		hp_sdc_status_out8(curr->seq[idx]);
curr              445 drivers/input/serio/hp_sdc.c 		curr->idx++;
curr              451 drivers/input/serio/hp_sdc.c 			curr->idx++;
curr              457 drivers/input/serio/hp_sdc.c 		qty = curr->seq[idx];
curr              459 drivers/input/serio/hp_sdc.c 		if (curr->idx - idx < qty) {
curr              460 drivers/input/serio/hp_sdc.c 			hp_sdc_data_out8(curr->seq[curr->idx]);
curr              461 drivers/input/serio/hp_sdc.c 			curr->idx++;
curr              463 drivers/input/serio/hp_sdc.c 			if (curr->idx - idx >= qty &&
curr              475 drivers/input/serio/hp_sdc.c 		mask = curr->seq[idx];
curr              476 drivers/input/serio/hp_sdc.c 		if (idx != curr->idx) {
curr              486 drivers/input/serio/hp_sdc.c 		w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0];
curr              487 drivers/input/serio/hp_sdc.c 		w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1];
curr              488 drivers/input/serio/hp_sdc.c 		w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2];
curr              489 drivers/input/serio/hp_sdc.c 		w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3];
curr              509 drivers/input/serio/hp_sdc.c 			curr->idx = idx;
curr              523 drivers/input/serio/hp_sdc.c 				curr->idx = idx + 1;
curr              545 drivers/input/serio/hp_sdc.c 		postcmd = curr->seq[idx];
curr              546 drivers/input/serio/hp_sdc.c 		curr->idx++;
curr              550 drivers/input/serio/hp_sdc.c 			hp_sdc.rqty = curr->seq[curr->idx];
curr              552 drivers/input/serio/hp_sdc.c 			curr->idx++;
curr              566 drivers/input/serio/hp_sdc.c 		up(curr->act.semaphore);
curr              568 drivers/input/serio/hp_sdc.c 		curr->act.irqhook(0,NULL,0,0);
curr              570 drivers/input/serio/hp_sdc.c 	if (curr->idx >= curr->endidx) { /* This transaction is over. */
curr              572 drivers/input/serio/hp_sdc.c 			kfree(curr);
curr              575 drivers/input/serio/hp_sdc.c 		curr->actidx = idx + 1;
curr              576 drivers/input/serio/hp_sdc.c 		curr->idx = idx + 2;
curr              451 drivers/iommu/io-pgtable-arm-v7s.c 					   arm_v7s_iopte curr,
curr              467 drivers/iommu/io-pgtable-arm-v7s.c 	old = cmpxchg_relaxed(ptep, curr, new);
curr              353 drivers/iommu/io-pgtable-arm.c 					     arm_lpae_iopte curr,
curr              369 drivers/iommu/io-pgtable-arm.c 	old = cmpxchg64_relaxed(ptep, curr, new);
curr              376 drivers/iommu/io-pgtable-arm.c 	if (old == curr)
curr              184 drivers/iommu/iova.c 	struct rb_node *curr, *prev;
curr              199 drivers/iommu/iova.c 	curr = __get_cached_rbnode(iovad, limit_pfn);
curr              200 drivers/iommu/iova.c 	curr_iova = rb_entry(curr, struct iova, node);
curr              204 drivers/iommu/iova.c 		prev = curr;
curr              205 drivers/iommu/iova.c 		curr = rb_prev(curr);
curr              206 drivers/iommu/iova.c 		curr_iova = rb_entry(curr, struct iova, node);
curr              207 drivers/iommu/iova.c 	} while (curr && new_pfn <= curr_iova->pfn_hi);
curr               96 drivers/leds/leds-lp55xx-common.c 	unsigned long curr;
curr               98 drivers/leds/leds-lp55xx-common.c 	if (kstrtoul(buf, 0, &curr))
curr              101 drivers/leds/leds-lp55xx-common.c 	if (curr > led->max_current)
curr              108 drivers/leds/leds-lp55xx-common.c 	chip->cfg->set_led_current(led, (u8)curr);
curr               27 drivers/leds/trigger/ledtrig-pattern.c 	struct led_pattern *curr;
curr               41 drivers/leds/trigger/ledtrig-pattern.c 	data->curr = data->next;
curr               42 drivers/leds/trigger/ledtrig-pattern.c 	if (!data->is_indefinite && data->curr == data->patterns)
curr               62 drivers/leds/trigger/ledtrig-pattern.c 	if (data->delta_t == 0 || data->curr->delta_t < UPDATE_INTERVAL)
curr               63 drivers/leds/trigger/ledtrig-pattern.c 		return data->curr->brightness;
curr               65 drivers/leds/trigger/ledtrig-pattern.c 	step_brightness = abs(data->next->brightness - data->curr->brightness);
curr               66 drivers/leds/trigger/ledtrig-pattern.c 	step_brightness = data->delta_t * step_brightness / data->curr->delta_t;
curr               68 drivers/leds/trigger/ledtrig-pattern.c 	if (data->next->brightness > data->curr->brightness)
curr               69 drivers/leds/trigger/ledtrig-pattern.c 		return data->curr->brightness + step_brightness;
curr               71 drivers/leds/trigger/ledtrig-pattern.c 		return data->curr->brightness - step_brightness;
curr               82 drivers/leds/trigger/ledtrig-pattern.c 		if (data->curr->brightness == data->next->brightness) {
curr               85 drivers/leds/trigger/ledtrig-pattern.c 					   data->curr->brightness);
curr               87 drivers/leds/trigger/ledtrig-pattern.c 				  jiffies + msecs_to_jiffies(data->curr->delta_t));
curr              102 drivers/leds/trigger/ledtrig-pattern.c 			if (data->delta_t > data->curr->delta_t) {
curr              137 drivers/leds/trigger/ledtrig-pattern.c 	data->curr = data->patterns;
curr               79 drivers/media/common/saa7146/saa7146_fops.c 	if (NULL == q->curr) {
curr               80 drivers/media/common/saa7146/saa7146_fops.c 		q->curr = buf;
curr               98 drivers/media/common/saa7146/saa7146_fops.c 	DEB_EE("q->curr:%p\n", q->curr);
curr              100 drivers/media/common/saa7146/saa7146_fops.c 	BUG_ON(!q->curr);
curr              103 drivers/media/common/saa7146/saa7146_fops.c 	if (NULL == q->curr) {
curr              108 drivers/media/common/saa7146/saa7146_fops.c 	q->curr->vb.state = state;
curr              109 drivers/media/common/saa7146/saa7146_fops.c 	q->curr->vb.ts = ktime_get_ns();
curr              110 drivers/media/common/saa7146/saa7146_fops.c 	wake_up(&q->curr->vb.done);
curr              112 drivers/media/common/saa7146/saa7146_fops.c 	q->curr = NULL;
curr              131 drivers/media/common/saa7146/saa7146_fops.c 		q->curr = buf;
curr              176 drivers/media/common/saa7146/saa7146_fops.c 	if (q->curr) {
curr              177 drivers/media/common/saa7146/saa7146_fops.c 		DEB_D("timeout on %p\n", q->curr);
curr              339 drivers/media/common/saa7146/saa7146_vbi.c 	if (vv->vbi_dmaq.curr)
curr              442 drivers/media/common/saa7146/saa7146_vbi.c 	if (vv->vbi_dmaq.curr) {
curr              443 drivers/media/common/saa7146/saa7146_vbi.c 		DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr);
curr              446 drivers/media/common/saa7146/saa7146_vbi.c 		vv->vbi_dmaq.curr->vb.field_count = vv->vbi_fieldcount;
curr              421 drivers/media/common/saa7146/saa7146_video.c 	if (q->curr)
curr             1233 drivers/media/common/saa7146/saa7146_video.c 	if( NULL != q->curr ) {
curr             1168 drivers/media/pci/bt8xx/bttv-driver.c 		if (btv->curr.frame_irq) {
curr             1661 drivers/media/pci/bt8xx/bttv-driver.c 	if (!btv->curr.frame_irq) {
curr             3394 drivers/media/pci/bt8xx/bttv-driver.c 		btv->curr.top
curr             3395 drivers/media/pci/bt8xx/bttv-driver.c 		? (unsigned long long)btv->curr.top->top.dma : 0,
curr             3396 drivers/media/pci/bt8xx/bttv-driver.c 		btv->curr.bottom
curr             3397 drivers/media/pci/bt8xx/bttv-driver.c 		? (unsigned long long)btv->curr.bottom->bottom.dma : 0);
curr             3548 drivers/media/pci/bt8xx/bttv-driver.c 		      struct bttv_buffer_set *curr, unsigned int state)
curr             3553 drivers/media/pci/bt8xx/bttv-driver.c 		if (NULL != wakeup->top && curr->top != wakeup->top) {
curr             3563 drivers/media/pci/bt8xx/bttv-driver.c 		if (NULL != wakeup->top && curr->top != wakeup->top) {
curr             3572 drivers/media/pci/bt8xx/bttv-driver.c 		if (NULL != wakeup->bottom && curr->bottom != wakeup->bottom) {
curr             3617 drivers/media/pci/bt8xx/bttv-driver.c 	old  = btv->curr;
curr             3619 drivers/media/pci/bt8xx/bttv-driver.c 	btv->curr = new;
curr             3651 drivers/media/pci/bt8xx/bttv-driver.c 	struct bttv_buffer *wakeup = btv->curr.top;
curr             3657 drivers/media/pci/bt8xx/bttv-driver.c 	btv->curr.top_irq = 0;
curr             3658 drivers/media/pci/bt8xx/bttv-driver.c 	btv->curr.top = NULL;
curr             3689 drivers/media/pci/bt8xx/bttv-driver.c 	if ((btv->curr.top    && is_active(&btv->curr.top->top,       rc)) ||
curr             3690 drivers/media/pci/bt8xx/bttv-driver.c 	    (btv->curr.bottom && is_active(&btv->curr.bottom->bottom, rc))) {
curr             3699 drivers/media/pci/bt8xx/bttv-driver.c 	old = btv->curr;
curr             3700 drivers/media/pci/bt8xx/bttv-driver.c 	btv->curr = new;
curr             4279 drivers/media/pci/bt8xx/bttv-driver.c 	btv->state.video = btv->curr;
curr             4282 drivers/media/pci/bt8xx/bttv-driver.c 	btv->curr = idle;
curr             4338 drivers/media/pci/bt8xx/bttv-driver.c 	btv->curr = btv->state.video;
curr             4341 drivers/media/pci/bt8xx/bttv-driver.c 	bttv_buffer_activate_video(btv, &btv->curr);
curr              459 drivers/media/pci/bt8xx/bttv-risc.c 	if (NULL != btv->curr.top)      btv->cap_ctl |= 0x02;
curr              460 drivers/media/pci/bt8xx/bttv-risc.c 	if (NULL != btv->curr.bottom)   btv->cap_ctl |= 0x01;
curr              471 drivers/media/pci/bt8xx/bttv-risc.c 		 btv->curr.top     ? (unsigned long long)btv->curr.top->top.dma        : 0,
curr              473 drivers/media/pci/bt8xx/bttv-risc.c 		 btv->curr.bottom  ? (unsigned long long)btv->curr.bottom->bottom.dma  : 0);
curr              481 drivers/media/pci/bt8xx/bttv-risc.c 	if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) {
curr              460 drivers/media/pci/bt8xx/bttvp.h 	struct bttv_buffer_set  curr;       /* active buffers      */
curr              138 drivers/media/pci/ngene/ngene.h 	u64 curr;
curr              272 drivers/media/pci/saa7134/saa7134-core.c 	if (NULL == q->curr) {
curr              274 drivers/media/pci/saa7134/saa7134-core.c 			q->curr = buf;
curr              281 drivers/media/pci/saa7134/saa7134-core.c 			q->curr = buf;
curr              295 drivers/media/pci/saa7134/saa7134-core.c 	core_dbg("buffer_finish %p\n", q->curr);
curr              298 drivers/media/pci/saa7134/saa7134-core.c 	q->curr->vb2.vb2_buf.timestamp = ktime_get_ns();
curr              299 drivers/media/pci/saa7134/saa7134-core.c 	q->curr->vb2.sequence = q->seq_nr++;
curr              300 drivers/media/pci/saa7134/saa7134-core.c 	vb2_buffer_done(&q->curr->vb2.vb2_buf, state);
curr              301 drivers/media/pci/saa7134/saa7134-core.c 	q->curr = NULL;
curr              310 drivers/media/pci/saa7134/saa7134-core.c 	BUG_ON(NULL != q->curr);
curr              320 drivers/media/pci/saa7134/saa7134-core.c 		q->curr = buf;
curr              347 drivers/media/pci/saa7134/saa7134-core.c 	if (q->curr) {
curr              348 drivers/media/pci/saa7134/saa7134-core.c 		core_dbg("timeout on %p\n", q->curr);
curr              390 drivers/media/pci/saa7134/saa7134-core.c 	if (dev->video_q.curr) {
curr              399 drivers/media/pci/saa7134/saa7134-core.c 	if (dev->video_q.curr && dev->fmt->planar) {
curr              412 drivers/media/pci/saa7134/saa7134-core.c 	if (dev->vbi_q.curr) {
curr              430 drivers/media/pci/saa7134/saa7134-core.c 	if (dev->ts_q.curr) {
curr             1383 drivers/media/pci/saa7134/saa7134-core.c 	buf  = q->curr;
curr               73 drivers/media/pci/saa7134/saa7134-ts.c 	dmaq->curr = NULL;
curr              142 drivers/media/pci/saa7134/saa7134-ts.c 		if (dmaq->curr) {
curr              143 drivers/media/pci/saa7134/saa7134-ts.c 			vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
curr              145 drivers/media/pci/saa7134/saa7134-ts.c 			dmaq->curr = NULL;
curr              312 drivers/media/pci/saa7134/saa7134-ts.c 	if (dev->ts_q.curr) {
curr              154 drivers/media/pci/saa7134/saa7134-vbi.c 	dmaq->curr = NULL;
curr              194 drivers/media/pci/saa7134/saa7134-vbi.c 	if (dev->vbi_q.curr) {
curr              197 drivers/media/pci/saa7134/saa7134-vbi.c 			dev->vbi_q.curr->top_seen = 1;
curr              200 drivers/media/pci/saa7134/saa7134-vbi.c 		if (!dev->vbi_q.curr->top_seen)
curr              908 drivers/media/pci/saa7134/saa7134-video.c 	dmaq->curr = NULL;
curr              994 drivers/media/pci/saa7134/saa7134-video.c 		if (dmaq->curr) {
curr              995 drivers/media/pci/saa7134/saa7134-video.c 			vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
curr              997 drivers/media/pci/saa7134/saa7134-video.c 			dmaq->curr = NULL;
curr             2253 drivers/media/pci/saa7134/saa7134-video.c 	if (dev->video_q.curr) {
curr             2258 drivers/media/pci/saa7134/saa7134-video.c 				dev->video_q.curr->top_seen = 1;
curr             2261 drivers/media/pci/saa7134/saa7134-video.c 			if (!dev->video_q.curr->top_seen)
curr              478 drivers/media/pci/saa7134/saa7134.h 	struct saa7134_buf         *curr;
curr               22 drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c 	u32 curr;
curr               27 drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c 	if (stream->curr >= stream->size)
curr               29 drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c 	return stream->addr[stream->curr++];
curr               65 drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c 	stream.curr = 0;
curr              193 drivers/media/platform/omap3isp/ispstat.c 		struct ispstat_buffer *curr = &stat->buf[i];
curr              199 drivers/media/platform/omap3isp/ispstat.c 		if (curr == stat->locked_buf || curr == stat->active_buf)
curr              203 drivers/media/platform/omap3isp/ispstat.c 		if (!look_empty && curr->empty)
curr              207 drivers/media/platform/omap3isp/ispstat.c 		if (curr->empty) {
curr              208 drivers/media/platform/omap3isp/ispstat.c 			found = curr;
curr              214 drivers/media/platform/omap3isp/ispstat.c 		    (s32)curr->frame_number - (s32)found->frame_number < 0)
curr              215 drivers/media/platform/omap3isp/ispstat.c 			found = curr;
curr              211 drivers/media/platform/rcar_jpu.c 	struct jpu_ctx		*curr;
curr              293 drivers/media/platform/rcar_jpu.c 	void *curr;
curr              580 drivers/media/platform/rcar_jpu.c 	if (buf->curr >= buf->end)
curr              583 drivers/media/platform/rcar_jpu.c 	return *(u8 *)buf->curr++;
curr              588 drivers/media/platform/rcar_jpu.c 	if (buf->end - buf->curr < 2)
curr              591 drivers/media/platform/rcar_jpu.c 	*word = get_unaligned_be16(buf->curr);
curr              592 drivers/media/platform/rcar_jpu.c 	buf->curr += 2;
curr              599 drivers/media/platform/rcar_jpu.c 	buf->curr += min((unsigned long)(buf->end - buf->curr), len);
curr              610 drivers/media/platform/rcar_jpu.c 	jpeg_buffer.curr = buffer;
curr             1370 drivers/media/platform/rcar_jpu.c 	jpu->curr = ctx;
curr             1564 drivers/media/platform/rcar_jpu.c 	jpu->curr = NULL;
curr              402 drivers/media/platform/rockchip/rga/rga-hw.c 	struct rga_ctx *ctx = rga->curr;
curr               43 drivers/media/platform/rockchip/rga/rga.c 	rga->curr = ctx;
curr               67 drivers/media/platform/rockchip/rga/rga.c 		struct rga_ctx *ctx = rga->curr;
curr               71 drivers/media/platform/rockchip/rga/rga.c 		rga->curr = NULL;
curr               81 drivers/media/platform/rockchip/rga/rga.h 	struct rga_ctx *curr;
curr              506 drivers/media/platform/s5p-g2d/g2d.c 	dev->curr = ctx;
curr              542 drivers/media/platform/s5p-g2d/g2d.c 	struct g2d_ctx *ctx = dev->curr;
curr              566 drivers/media/platform/s5p-g2d/g2d.c 	dev->curr = NULL;
curr               27 drivers/media/platform/s5p-g2d/g2d.h 	struct g2d_ctx		*curr;
curr              775 drivers/media/platform/s5p-jpeg/jpeg-core.c 	jpeg_buffer.curr = 0;
curr              783 drivers/media/platform/s5p-jpeg/jpeg-core.c 	jpeg_buffer.curr = 0;
curr              813 drivers/media/platform/s5p-jpeg/jpeg-core.c 		jpeg_buffer.curr = 0;
curr              816 drivers/media/platform/s5p-jpeg/jpeg-core.c 		while (jpeg_buffer.curr < jpeg_buffer.size) {
curr              870 drivers/media/platform/s5p-jpeg/jpeg-core.c 	jpeg_buffer.curr = 0;
curr              903 drivers/media/platform/s5p-jpeg/jpeg-core.c 		jpeg_buffer.curr = 0;
curr              906 drivers/media/platform/s5p-jpeg/jpeg-core.c 		while (jpeg_buffer.size - jpeg_buffer.curr >= 65) {
curr             1041 drivers/media/platform/s5p-jpeg/jpeg-core.c 	if (buf->curr >= buf->size)
curr             1044 drivers/media/platform/s5p-jpeg/jpeg-core.c 	return ((unsigned char *)buf->data)[buf->curr++];
curr             1124 drivers/media/platform/s5p-jpeg/jpeg-core.c 	jpeg_buffer.curr = 0;
curr             1149 drivers/media/platform/s5p-jpeg/jpeg-core.c 			sof = jpeg_buffer.curr; /* after 0xffc0 */
curr             1182 drivers/media/platform/s5p-jpeg/jpeg-core.c 			dqt[n_dqt] = jpeg_buffer.curr; /* after 0xffdb */
curr             1195 drivers/media/platform/s5p-jpeg/jpeg-core.c 			dht[n_dht] = jpeg_buffer.curr; /* after 0xffc4 */
curr             1201 drivers/media/platform/s5p-jpeg/jpeg-core.c 			sos = jpeg_buffer.curr - 2; /* 0xffda */
curr              251 drivers/media/platform/s5p-jpeg/jpeg-core.h 	unsigned long curr;
curr             1149 drivers/message/fusion/mptlan.c 	u32 curr, buckets, count, max;
curr             1154 drivers/message/fusion/mptlan.c 	curr = atomic_read(&priv->buckets_out);
curr             1155 drivers/message/fusion/mptlan.c 	buckets = (priv->max_buckets_out - curr);
curr             1159 drivers/message/fusion/mptlan.c 			__func__, buckets, curr));
curr              467 drivers/misc/habanalabs/device.c 	ktime_t zero_ktime, curr = ktime_get();
curr              494 drivers/misc/habanalabs/device.c 				ktime_sub(curr, ts->idle_to_busy_ts));
curr              519 drivers/misc/habanalabs/device.c 					ktime_sub(curr, ts->busy_to_idle_ts));
curr              540 drivers/misc/habanalabs/device.c 					ktime_sub(curr, ts->idle_to_busy_ts));
curr             1558 drivers/misc/habanalabs/habanalabs.h long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
curr               15 drivers/misc/habanalabs/sysfs.c long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
curr               23 drivers/misc/habanalabs/sysfs.c 	if (curr)
curr               21 drivers/misc/mic/scif/scif_rma_list.c 	struct scif_window *curr = NULL;
curr               28 drivers/misc/mic/scif/scif_rma_list.c 		curr = list_entry(head->prev, struct scif_window, list);
curr               29 drivers/misc/mic/scif/scif_rma_list.c 		if (curr->va_for_temp < window->va_for_temp) {
curr               35 drivers/misc/mic/scif/scif_rma_list.c 		curr = list_entry(item, struct scif_window, list);
curr               36 drivers/misc/mic/scif/scif_rma_list.c 		if (curr->va_for_temp > window->va_for_temp)
curr               38 drivers/misc/mic/scif/scif_rma_list.c 		prev = curr;
curr               51 drivers/misc/mic/scif/scif_rma_list.c 	struct scif_window *curr = NULL, *prev = NULL;
curr               56 drivers/misc/mic/scif/scif_rma_list.c 		curr = list_entry(item, struct scif_window, list);
curr               57 drivers/misc/mic/scif/scif_rma_list.c 		if (curr->offset > window->offset)
curr               59 drivers/misc/mic/scif/scif_rma_list.c 		prev = curr;
curr              226 drivers/mmc/host/sdhci-st.c 	unsigned long curr, value;
curr              231 drivers/mmc/host/sdhci-st.c 		curr = jiffies;
curr              237 drivers/mmc/host/sdhci-st.c 	} while (!time_after_eq(curr, finish));
curr             4100 drivers/mmc/host/sdhci.c 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
curr             4101 drivers/mmc/host/sdhci.c 		if (curr > 0) {
curr             4104 drivers/mmc/host/sdhci.c 			curr = curr/1000;  /* convert to mA */
curr             4105 drivers/mmc/host/sdhci.c 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
curr             4107 drivers/mmc/host/sdhci.c 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
curr             4109 drivers/mmc/host/sdhci.c 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
curr             4110 drivers/mmc/host/sdhci.c 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
curr             4111 drivers/mmc/host/sdhci.c 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
curr             1552 drivers/net/bonding/bond_3ad.c 						struct aggregator *curr)
curr             1579 drivers/net/bonding/bond_3ad.c 		return curr;
curr             1581 drivers/net/bonding/bond_3ad.c 	if (!curr->is_individual && best->is_individual)
curr             1582 drivers/net/bonding/bond_3ad.c 		return curr;
curr             1584 drivers/net/bonding/bond_3ad.c 	if (curr->is_individual && !best->is_individual)
curr             1587 drivers/net/bonding/bond_3ad.c 	if (__agg_has_partner(curr) && !__agg_has_partner(best))
curr             1588 drivers/net/bonding/bond_3ad.c 		return curr;
curr             1590 drivers/net/bonding/bond_3ad.c 	if (!__agg_has_partner(curr) && __agg_has_partner(best))
curr             1593 drivers/net/bonding/bond_3ad.c 	switch (__get_agg_selection_mode(curr->lag_ports)) {
curr             1595 drivers/net/bonding/bond_3ad.c 		if (__agg_active_ports(curr) > __agg_active_ports(best))
curr             1596 drivers/net/bonding/bond_3ad.c 			return curr;
curr             1598 drivers/net/bonding/bond_3ad.c 		if (__agg_active_ports(curr) < __agg_active_ports(best))
curr             1604 drivers/net/bonding/bond_3ad.c 		if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best))
curr             1605 drivers/net/bonding/bond_3ad.c 			return curr;
curr             1611 drivers/net/bonding/bond_3ad.c 				     curr->slave->bond->dev->name,
curr             1612 drivers/net/bonding/bond_3ad.c 				     curr->slave->dev->name,
curr             1613 drivers/net/bonding/bond_3ad.c 				     __get_agg_selection_mode(curr->lag_ports));
curr              891 drivers/net/bonding/bond_alb.c 		struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
curr              894 drivers/net/bonding/bond_alb.c 		if (curr->vlan_id == vlan_id)
curr              725 drivers/net/bonding/bond_main.c 	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
curr              728 drivers/net/bonding/bond_main.c 		if (!curr || curr->link != BOND_LINK_UP)
curr              730 drivers/net/bonding/bond_main.c 		return curr;
curr              738 drivers/net/bonding/bond_main.c 	if (!curr || curr->link != BOND_LINK_UP)
curr              746 drivers/net/bonding/bond_main.c 		if (prim->speed < curr->speed)
curr              747 drivers/net/bonding/bond_main.c 			return curr;
curr              748 drivers/net/bonding/bond_main.c 		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
curr              749 drivers/net/bonding/bond_main.c 			return curr;
curr              752 drivers/net/bonding/bond_main.c 		return curr;
curr              756 drivers/net/bonding/bond_main.c 		return curr;
curr               61 drivers/net/bonding/bond_procfs.c 	struct slave *curr, *primary;
curr               64 drivers/net/bonding/bond_procfs.c 	curr = rcu_dereference(bond->curr_active_slave);
curr               97 drivers/net/bonding/bond_procfs.c 			   (curr) ? curr->dev->name : "None");
curr             1068 drivers/net/can/c_can/c_can.c 	u16 curr, last = priv->last_status;
curr             1073 drivers/net/can/c_can/c_can.c 		priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
curr             1079 drivers/net/can/c_can/c_can.c 		curr = last;
curr             1083 drivers/net/can/c_can/c_can.c 	if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
curr             1088 drivers/net/can/c_can/c_can.c 	if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
curr             1093 drivers/net/can/c_can/c_can.c 	if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
curr             1100 drivers/net/can/c_can/c_can.c 	if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
curr             1105 drivers/net/can/c_can/c_can.c 	if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
curr             1110 drivers/net/can/c_can/c_can.c 	if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
curr             1116 drivers/net/can/c_can/c_can.c 	work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
curr              265 drivers/net/ethernet/atheros/ag71xx.c 	unsigned int curr;
curr              642 drivers/net/ethernet/atheros/ag71xx.c 	while (ring->dirty + n != ring->curr) {
curr              691 drivers/net/ethernet/atheros/ag71xx.c 	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
curr              822 drivers/net/ethernet/atheros/ag71xx.c 	ag->tx_ring.curr = 0;
curr              962 drivers/net/ethernet/atheros/ag71xx.c 	while (ring->curr != ring->dirty) {
curr             1007 drivers/net/ethernet/atheros/ag71xx.c 	ring->curr = 0;
curr             1092 drivers/net/ethernet/atheros/ag71xx.c 	ring->curr = 0;
curr             1106 drivers/net/ethernet/atheros/ag71xx.c 	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
curr             1293 drivers/net/ethernet/atheros/ag71xx.c 		i = (ring->curr + ndesc) & ring_mask;
curr             1348 drivers/net/ethernet/atheros/ag71xx.c 	i = ring->curr & ring_mask;
curr             1357 drivers/net/ethernet/atheros/ag71xx.c 	i = (ring->curr + n - 1) & ring_mask;
curr             1366 drivers/net/ethernet/atheros/ag71xx.c 	ring->curr += n;
curr             1375 drivers/net/ethernet/atheros/ag71xx.c 	if (ring->curr - ring->dirty >= ring_size - ring_min) {
curr             1451 drivers/net/ethernet/atheros/ag71xx.c 		  limit, ring->curr, ring->dirty);
curr             1456 drivers/net/ethernet/atheros/ag71xx.c 		unsigned int i = ring->curr & ring_mask;
curr             1464 drivers/net/ethernet/atheros/ag71xx.c 		if ((ring->dirty + ring_size) == ring->curr) {
curr             1502 drivers/net/ethernet/atheros/ag71xx.c 		ring->curr++;
curr             1512 drivers/net/ethernet/atheros/ag71xx.c 		  ring->curr, ring->dirty, done);
curr              847 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				       u16 cp_cons, u16 curr)
curr              851 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
curr              859 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					      u16 agg_id, u16 curr)
curr              863 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return &tpa_info->agg_arr[curr];
curr              394 drivers/net/ethernet/intel/ice/ice_lib.c static int ice_get_free_slot(void *array, int size, int curr)
curr              399 drivers/net/ethernet/intel/ice/ice_lib.c 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
curr              400 drivers/net/ethernet/intel/ice/ice_lib.c 		next = curr + 1;
curr             1061 drivers/net/ethernet/marvell/pxa168_eth.c 	int curr;
curr             1064 drivers/net/ethernet/marvell/pxa168_eth.c 	for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
curr             1065 drivers/net/ethernet/marvell/pxa168_eth.c 		if (pep->rx_skb[curr]) {
curr             1066 drivers/net/ethernet/marvell/pxa168_eth.c 			dev_kfree_skb(pep->rx_skb[curr]);
curr              137 drivers/net/ethernet/mellanox/mlx4/en_port.c 	__be64 *curr = start;
curr              143 drivers/net/ethernet/mellanox/mlx4/en_port.c 		ret += be64_to_cpu(*curr);
curr              144 drivers/net/ethernet/mellanox/mlx4/en_port.c 		curr += offset;
curr               66 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c 	const u32 *curr = (const u32 *)input_data;
curr               75 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c 		one = *curr++ ^ crc;
curr               76 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c 		two = *curr++;
curr               90 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c 	curr_char = (const u8 *)curr;
curr               33 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
curr               35 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		INIT_LIST_HEAD(&curr->bound_ports_list);
curr               36 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		curr->id = i;
curr               47 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
curr               49 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
curr              656 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
curr              658 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		if (curr->ref_count && curr->to_dev == to_dev)
curr              659 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			return curr;
curr              677 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
curr              679 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		if (curr->ref_count && curr->id == span_id)
curr              680 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			return curr;
curr              719 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
curr              721 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		list_for_each_entry(p, &curr->bound_ports_list, list)
curr              808 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			struct mlxsw_sp_span_entry *curr =
curr              811 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			if (mlxsw_sp_span_entry_bound_port_find(curr, type,
curr              966 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
curr              969 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		if (!curr->ref_count)
curr              972 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		err = curr->ops->parms(curr->to_dev, &sparms);
curr              976 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
curr              977 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			mlxsw_sp_span_entry_deconfigure(curr);
curr              978 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
curr             2895 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct sk_buff *segs, *curr;
curr             2905 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		curr = segs;
curr             2907 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		curr->next = NULL;
curr             2908 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		status = myri10ge_xmit(curr, dev);
curr             2910 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			dev_kfree_skb_any(curr);
curr             2912 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 				curr = segs;
curr             2914 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 				curr->next = NULL;
curr              715 drivers/net/ethernet/sun/sungem.c 	int cluster_start, curr, count, kick;
curr              717 drivers/net/ethernet/sun/sungem.c 	cluster_start = curr = (gp->rx_new & ~(4 - 1));
curr              721 drivers/net/ethernet/sun/sungem.c 	while (curr != limit) {
curr              722 drivers/net/ethernet/sun/sungem.c 		curr = NEXT_RX(curr);
curr              730 drivers/net/ethernet/sun/sungem.c 				if (cluster_start == curr)
curr              733 drivers/net/ethernet/sun/sungem.c 			kick = curr;
curr             1286 drivers/net/ethernet/sun/sunvnet_common.c 		struct sk_buff *curr = segs;
curr             1289 drivers/net/ethernet/sun/sunvnet_common.c 		curr->next = NULL;
curr             1290 drivers/net/ethernet/sun/sunvnet_common.c 		if (port->tso && curr->len > dev->mtu) {
curr             1291 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(curr)->gso_size = gso_size;
curr             1292 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(curr)->gso_type = gso_type;
curr             1293 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(curr)->gso_segs =
curr             1294 drivers/net/ethernet/sun/sunvnet_common.c 				DIV_ROUND_UP(curr->len - hlen, gso_size);
curr             1296 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(curr)->gso_size = 0;
curr             1299 drivers/net/ethernet/sun/sunvnet_common.c 		skb_push(curr, maclen);
curr             1300 drivers/net/ethernet/sun/sunvnet_common.c 		skb_reset_mac_header(curr);
curr             1301 drivers/net/ethernet/sun/sunvnet_common.c 		memcpy(skb_mac_header(curr), skb_mac_header(skb),
curr             1303 drivers/net/ethernet/sun/sunvnet_common.c 		curr->csum_start = skb_transport_header(curr) - curr->head;
curr             1304 drivers/net/ethernet/sun/sunvnet_common.c 		if (ip_hdr(curr)->protocol == IPPROTO_TCP)
curr             1305 drivers/net/ethernet/sun/sunvnet_common.c 			curr->csum_offset = offsetof(struct tcphdr, check);
curr             1306 drivers/net/ethernet/sun/sunvnet_common.c 		else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
curr             1307 drivers/net/ethernet/sun/sunvnet_common.c 			curr->csum_offset = offsetof(struct udphdr, check);
curr             1310 drivers/net/ethernet/sun/sunvnet_common.c 			status = sunvnet_start_xmit_common(curr, dev,
curr             1313 drivers/net/ethernet/sun/sunvnet_common.c 			dev_kfree_skb_any(curr);
curr             1837 drivers/net/ethernet/ti/netcp_ethss.c 	u32 curr, delta;
curr             1844 drivers/net/ethernet/ti/netcp_ethss.c 	curr = readl(p_stats_entry);
curr             1845 drivers/net/ethernet/ti/netcp_ethss.c 	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
curr             1846 drivers/net/ethernet/ti/netcp_ethss.c 	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
curr              543 drivers/net/ethernet/via/via-velocity.c 	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
curr             1561 drivers/net/ethernet/via/via-velocity.c 	} while (dirty != vptr->rx.curr);
curr             1659 drivers/net/ethernet/via/via-velocity.c 		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
curr             2091 drivers/net/ethernet/via/via-velocity.c 	int rd_curr = vptr->rx.curr;
curr             2128 drivers/net/ethernet/via/via-velocity.c 	vptr->rx.curr = rd_curr;
curr             2540 drivers/net/ethernet/via/via-velocity.c 	index = vptr->tx.curr[qnum];
curr             2598 drivers/net/ethernet/via/via-velocity.c 	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
curr             1446 drivers/net/ethernet/via/via-velocity.h 		int curr[TX_QUEUE_NO];
curr             1457 drivers/net/ethernet/via/via-velocity.h 		int curr;
curr             1506 drivers/net/wan/cosa.c 	int i=0, id=0, prev=0, curr=0;
curr             1524 drivers/net/wan/cosa.c 	for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) {
curr             1525 drivers/net/wan/cosa.c 		if ((curr = get_wait_data(cosa)) == -1) {
curr             1528 drivers/net/wan/cosa.c 		curr &= 0xff;
curr             1529 drivers/net/wan/cosa.c 		if (curr != '\r' && curr != '\n' && curr != 0x2e)
curr             1530 drivers/net/wan/cosa.c 			idstring[id++] = curr;
curr             1531 drivers/net/wan/cosa.c 		if (curr == 0x2e && prev == '\n')
curr              871 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	uint i, curr;
curr              881 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	curr =
curr              887 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	if (!forceall && (i == curr))
curr              100 drivers/net/wireless/intersil/prism54/islpci_mgt.c 	u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
curr              106 drivers/net/wireless/intersil/prism54/islpci_mgt.c 	while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
curr              107 drivers/net/wireless/intersil/prism54/islpci_mgt.c 		u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
curr              132 drivers/net/wireless/intersil/prism54/islpci_mgt.c 		curr++;
curr              138 drivers/net/wireless/intersil/prism54/islpci_mgt.c 		cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
curr               35 drivers/net/wireless/marvell/mwifiex/sta_event.c 	u8 *curr;
curr               43 drivers/net/wireless/marvell/mwifiex/sta_event.c 	curr = event->data;
curr               50 drivers/net/wireless/marvell/mwifiex/sta_event.c 	tlv_mgmt_frame = (void *)curr;
curr               59 drivers/net/wireless/marvell/mwifiex/sta_event.c 		curr += (sizeof(*tlv_mgmt_frame) + 12);
curr               67 drivers/net/wireless/marvell/mwifiex/sta_event.c 		ele_hdr = (struct ieee_types_header *)curr;
curr              114 drivers/net/wireless/marvell/mwifiex/sta_event.c 		curr += (ele_len + sizeof(*ele_hdr));
curr               30 drivers/net/wireless/marvell/mwifiex/uap_event.c 	u8 *curr;
curr               39 drivers/net/wireless/marvell/mwifiex/uap_event.c 	curr = event->data;
curr               47 drivers/net/wireless/marvell/mwifiex/uap_event.c 		tlv_hdr = (struct mwifiex_ie_types_data *)curr;
curr               66 drivers/net/wireless/marvell/mwifiex/uap_event.c 			wmm_param_ie = (void *)(curr + 2);
curr               84 drivers/net/wireless/marvell/mwifiex/uap_event.c 		curr += (tlv_len + sizeof(tlv_hdr->header));
curr              914 drivers/net/wireless/marvell/mwifiex/wmm.c 	u8 *curr = (u8 *) &resp->params.get_wmm_status;
curr              929 drivers/net/wireless/marvell/mwifiex/wmm.c 		tlv_hdr = (struct mwifiex_ie_types_data *) curr;
curr              962 drivers/net/wireless/marvell/mwifiex/wmm.c 				(struct ieee_types_wmm_parameter *) (curr +
curr              988 drivers/net/wireless/marvell/mwifiex/wmm.c 		curr += (tlv_len + sizeof(tlv_hdr->header));
curr              903 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	u8 *curr = (u8 *)fw;
curr              949 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		memcpy(data, curr, size);
curr              954 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			 count, (void *)curr, &paddr, size);
curr              966 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		curr += size;
curr              112 drivers/net/wireless/st/cw1200/scan.c 	priv->scan.curr = priv->scan.begin;
curr              139 drivers/net/wireless/st/cw1200/scan.c 	bool first_run = (priv->scan.begin == priv->scan.curr &&
curr              168 drivers/net/wireless/st/cw1200/scan.c 	if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
curr              198 drivers/net/wireless/st/cw1200/scan.c 		struct ieee80211_channel *first = *priv->scan.curr;
curr              199 drivers/net/wireless/st/cw1200/scan.c 		for (it = priv->scan.curr + 1, i = 1;
curr              221 drivers/net/wireless/st/cw1200/scan.c 		scan.num_channels = it - priv->scan.curr;
curr              232 drivers/net/wireless/st/cw1200/scan.c 		scan.ch = kcalloc(it - priv->scan.curr,
curr              240 drivers/net/wireless/st/cw1200/scan.c 			scan.ch[i].number = priv->scan.curr[i]->hw_value;
curr              241 drivers/net/wireless/st/cw1200/scan.c 			if (priv->scan.curr[i]->flags & IEEE80211_CHAN_NO_IR) {
curr              259 drivers/net/wireless/st/cw1200/scan.c 		priv->scan.curr = it;
curr              265 drivers/net/wireless/st/cw1200/scan.c 	priv->scan.curr = priv->scan.end;
curr              349 drivers/net/wireless/st/cw1200/scan.c 			priv->scan.curr = priv->scan.end;
curr               27 drivers/net/wireless/st/cw1200/scan.h 	struct ieee80211_channel **curr;
curr              262 drivers/pci/controller/pcie-rockchip-host.c 	int curr;
curr              274 drivers/pci/controller/pcie-rockchip-host.c 	curr = regulator_get_current_limit(rockchip->vpcie3v3);
curr              275 drivers/pci/controller/pcie-rockchip-host.c 	if (curr <= 0)
curr              279 drivers/pci/controller/pcie-rockchip-host.c 	curr = curr / 1000; /* convert to mA */
curr              280 drivers/pci/controller/pcie-rockchip-host.c 	power = (curr * 3300) / 1000; /* milliwatt */
curr              193 drivers/pci/hotplug/cpqphp_core.c 						void __iomem *curr)
curr              200 drivers/pci/hotplug/cpqphp_core.c 	if (!smbios_table || !curr)
curr              206 drivers/pci/hotplug/cpqphp_core.c 	p_temp = curr;
curr              207 drivers/pci/hotplug/cpqphp_core.c 	p_temp += readb(curr + SMBIOS_GENERIC_LENGTH);
curr               35 drivers/pci/hotplug/ibmphp_res.c static struct bus_node * __init alloc_error_bus(struct ebda_pci_rsrc *curr, u8 busno, int flag)
curr               39 drivers/pci/hotplug/ibmphp_res.c 	if (!(curr) && !(flag)) {
curr               51 drivers/pci/hotplug/ibmphp_res.c 		newbus->busno = curr->bus_num;
curr               56 drivers/pci/hotplug/ibmphp_res.c static struct resource_node * __init alloc_resources(struct ebda_pci_rsrc *curr)
curr               60 drivers/pci/hotplug/ibmphp_res.c 	if (!curr) {
curr               69 drivers/pci/hotplug/ibmphp_res.c 	rs->busno = curr->bus_num;
curr               70 drivers/pci/hotplug/ibmphp_res.c 	rs->devfunc = curr->dev_fun;
curr               71 drivers/pci/hotplug/ibmphp_res.c 	rs->start = curr->start_addr;
curr               72 drivers/pci/hotplug/ibmphp_res.c 	rs->end = curr->end_addr;
curr               73 drivers/pci/hotplug/ibmphp_res.c 	rs->len = curr->end_addr - curr->start_addr + 1;
curr               77 drivers/pci/hotplug/ibmphp_res.c static int __init alloc_bus_range(struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus)
curr               88 drivers/pci/hotplug/ibmphp_res.c 		newbus->busno = curr->bus_num;
curr              110 drivers/pci/hotplug/ibmphp_res.c 	newrange->start = curr->start_addr;
curr              111 drivers/pci/hotplug/ibmphp_res.c 	newrange->end = curr->end_addr;
curr              182 drivers/pci/hotplug/ibmphp_res.c 	struct ebda_pci_rsrc *curr;
curr              192 drivers/pci/hotplug/ibmphp_res.c 	list_for_each_entry(curr, &ibmphp_ebda_pci_rsrc_head,
curr              194 drivers/pci/hotplug/ibmphp_res.c 		if (!(curr->rsrc_type & PCIDEVMASK)) {
curr              201 drivers/pci/hotplug/ibmphp_res.c 		if (curr->rsrc_type & PRIMARYBUSMASK) {
curr              203 drivers/pci/hotplug/ibmphp_res.c 			if ((curr->rsrc_type & RESTYPE) == MMASK) {
curr              206 drivers/pci/hotplug/ibmphp_res.c 					rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
curr              212 drivers/pci/hotplug/ibmphp_res.c 					bus_cur = find_bus_wprev(curr->bus_num, &bus_prev, 1);
curr              215 drivers/pci/hotplug/ibmphp_res.c 						rc = alloc_bus_range(&bus_cur, &newrange, curr, MEM, 0);
curr              220 drivers/pci/hotplug/ibmphp_res.c 						rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
curr              228 drivers/pci/hotplug/ibmphp_res.c 			} else if ((curr->rsrc_type & RESTYPE) == PFMASK) {
curr              232 drivers/pci/hotplug/ibmphp_res.c 					rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
curr              238 drivers/pci/hotplug/ibmphp_res.c 					bus_cur = find_bus_wprev(curr->bus_num, &bus_prev, 1);
curr              241 drivers/pci/hotplug/ibmphp_res.c 						rc = alloc_bus_range(&bus_cur, &newrange, curr, PFMEM, 0);
curr              246 drivers/pci/hotplug/ibmphp_res.c 						rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
curr              253 drivers/pci/hotplug/ibmphp_res.c 			} else if ((curr->rsrc_type & RESTYPE) == IOMASK) {
curr              257 drivers/pci/hotplug/ibmphp_res.c 					rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
curr              263 drivers/pci/hotplug/ibmphp_res.c 					bus_cur = find_bus_wprev(curr->bus_num, &bus_prev, 1);
curr              265 drivers/pci/hotplug/ibmphp_res.c 						rc = alloc_bus_range(&bus_cur, &newrange, curr, IO, 0);
curr              270 drivers/pci/hotplug/ibmphp_res.c 						rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
curr              284 drivers/pci/hotplug/ibmphp_res.c 			if ((curr->rsrc_type & RESTYPE) == MMASK) {
curr              286 drivers/pci/hotplug/ibmphp_res.c 				new_mem = alloc_resources(curr);
curr              298 drivers/pci/hotplug/ibmphp_res.c 					newbus = alloc_error_bus(curr, 0, 0);
curr              307 drivers/pci/hotplug/ibmphp_res.c 			} else if ((curr->rsrc_type & RESTYPE) == PFMASK) {
curr              309 drivers/pci/hotplug/ibmphp_res.c 				new_pfmem = alloc_resources(curr);
curr              315 drivers/pci/hotplug/ibmphp_res.c 					newbus = alloc_error_bus(curr, 0, 0);
curr              324 drivers/pci/hotplug/ibmphp_res.c 			} else if ((curr->rsrc_type & RESTYPE) == IOMASK) {
curr              326 drivers/pci/hotplug/ibmphp_res.c 				new_io = alloc_resources(curr);
curr              339 drivers/pci/hotplug/ibmphp_res.c 					newbus = alloc_error_bus(curr, 0, 0);
curr              261 drivers/perf/arm_smmuv3_pmu.c static bool smmu_pmu_check_global_filter(struct perf_event *curr,
curr              264 drivers/perf/arm_smmuv3_pmu.c 	if (get_filter_enable(new) != get_filter_enable(curr))
curr              270 drivers/perf/arm_smmuv3_pmu.c 	return get_filter_span(new) == get_filter_span(curr) &&
curr              271 drivers/perf/arm_smmuv3_pmu.c 	       get_filter_stream_id(new) == get_filter_stream_id(curr);
curr              323 drivers/perf/arm_smmuv3_pmu.c static bool smmu_pmu_events_compatible(struct perf_event *curr,
curr              326 drivers/perf/arm_smmuv3_pmu.c 	if (new->pmu != curr->pmu)
curr              330 drivers/perf/arm_smmuv3_pmu.c 	    !smmu_pmu_check_global_filter(curr, new))
curr             1601 drivers/pinctrl/intel/pinctrl-intel.c 	u32 curr, updated;
curr             1603 drivers/pinctrl/intel/pinctrl-intel.c 	curr = readl(hostown);
curr             1604 drivers/pinctrl/intel/pinctrl-intel.c 	updated = (curr & ~mask) | (value & mask);
curr             1607 drivers/pinctrl/intel/pinctrl-intel.c 	return curr;
curr              199 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	struct mvebu_mpp_ctrl_setting *curr;
curr              206 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	curr = mvebu_pinctrl_find_setting_by_val(pctl, grp, config);
curr              208 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	if (curr) {
curr              209 drivers/pinctrl/mvebu/pinctrl-mvebu.c 		seq_printf(s, "current: %s", curr->name);
curr              210 drivers/pinctrl/mvebu/pinctrl-mvebu.c 		if (curr->subname)
curr              211 drivers/pinctrl/mvebu/pinctrl-mvebu.c 			seq_printf(s, "(%s)", curr->subname);
curr              212 drivers/pinctrl/mvebu/pinctrl-mvebu.c 		if (curr->flags & (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
curr              214 drivers/pinctrl/mvebu/pinctrl-mvebu.c 			if (curr->flags & MVEBU_SETTING_GPI)
curr              216 drivers/pinctrl/mvebu/pinctrl-mvebu.c 			if (curr->flags & MVEBU_SETTING_GPO)
curr              227 drivers/pinctrl/mvebu/pinctrl-mvebu.c 			if (curr == &grp->settings[n])
curr              429 drivers/platform/x86/asus-laptop.c static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
curr              442 drivers/platform/x86/asus-laptop.c 		delta = abs(curr - (short)val);
curr              669 drivers/platform/x86/fujitsu-laptop.c 	int curr;
curr              671 drivers/platform/x86/fujitsu-laptop.c 	curr = call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0);
curr              674 drivers/platform/x86/fujitsu-laptop.c 				      curr | ECO_LED_ON);
curr              677 drivers/platform/x86/fujitsu-laptop.c 				      curr & ~ECO_LED_ON);
curr               27 drivers/pnp/interface.c 	char *curr;		/* current position in buffer */
curr               44 drivers/pnp/interface.c 	res = vsnprintf(buffer->curr, buffer->len - buffer->size, fmt, args);
curr               50 drivers/pnp/interface.c 	buffer->curr += res;
curr              222 drivers/pnp/interface.c 	buffer->curr = buffer->buffer;
curr              241 drivers/pnp/interface.c 	ret = (buffer->curr - buf);
curr              265 drivers/pnp/interface.c 	buffer->curr = buffer->buffer;
curr              297 drivers/pnp/interface.c 	ret = (buffer->curr - buf);
curr              204 drivers/power/supply/ab8500_btemp.c 	int curr;
curr              218 drivers/power/supply/ab8500_btemp.c 			curr = BAT_CTRL_7U_ENA;
curr              220 drivers/power/supply/ab8500_btemp.c 			curr = BAT_CTRL_20U_ENA;
curr              242 drivers/power/supply/ab8500_btemp.c 			FORCE_BAT_CTRL_CMP_HIGH | curr);
curr             1004 drivers/power/supply/ab8500_charger.c static int ab8500_current_to_regval(struct ab8500_charger *di, int curr)
curr             1008 drivers/power/supply/ab8500_charger.c 	if (curr < di->bm->chg_output_curr[0])
curr             1012 drivers/power/supply/ab8500_charger.c 		if (curr < di->bm->chg_output_curr[i])
curr             1018 drivers/power/supply/ab8500_charger.c 	if (curr == di->bm->chg_output_curr[i])
curr             1024 drivers/power/supply/ab8500_charger.c static int ab8500_vbus_in_curr_to_regval(struct ab8500_charger *di, int curr)
curr             1028 drivers/power/supply/ab8500_charger.c 	if (curr < di->bm->chg_input_curr[0])
curr             1032 drivers/power/supply/ab8500_charger.c 		if (curr < di->bm->chg_input_curr[i])
curr             1038 drivers/power/supply/ab8500_charger.c 	if (curr == di->bm->chg_input_curr[i])
curr             2633 drivers/power/supply/ab8500_charger.c 	int ret, curr;
curr             2649 drivers/power/supply/ab8500_charger.c 	curr = di->bm->chg_input_curr[
curr             2652 drivers/power/supply/ab8500_charger.c 	if (di->max_usb_in_curr.calculated_max != curr) {
curr             2654 drivers/power/supply/ab8500_charger.c 		di->max_usb_in_curr.calculated_max = curr;
curr              361 drivers/power/supply/ab8500_fg.c static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
curr              366 drivers/power/supply/ab8500_fg.c 	if (curr > -di->bm->fg_params->high_curr_threshold)
curr              324 drivers/power/supply/axp288_fuel_gauge.c 	int pwr_stat, fg_res, curr, ret;
curr              359 drivers/power/supply/axp288_fuel_gauge.c 	ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &curr);
curr              364 drivers/power/supply/axp288_fuel_gauge.c 	if (curr == 0) {
curr              954 drivers/power/supply/bq24190_charger.c 	int curr, ret;
curr              959 drivers/power/supply/bq24190_charger.c 			ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
curr              971 drivers/power/supply/bq24190_charger.c 		curr /= 5;
curr              973 drivers/power/supply/bq24190_charger.c 	val->intval = curr;
curr              990 drivers/power/supply/bq24190_charger.c 	int ret, curr = val->intval;
curr             1000 drivers/power/supply/bq24190_charger.c 		curr *= 5;
curr             1005 drivers/power/supply/bq24190_charger.c 			ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
curr             1646 drivers/power/supply/bq27xxx_battery.c 	int curr;
curr             1649 drivers/power/supply/bq27xxx_battery.c 	curr = bq27xxx_read(di, BQ27XXX_REG_AI, false);
curr             1650 drivers/power/supply/bq27xxx_battery.c 	if (curr < 0) {
curr             1652 drivers/power/supply/bq27xxx_battery.c 		return curr;
curr             1659 drivers/power/supply/bq27xxx_battery.c 			curr = -curr;
curr             1662 drivers/power/supply/bq27xxx_battery.c 		val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
curr             1665 drivers/power/supply/bq27xxx_battery.c 		val->intval = (int)((s16)curr) * 1000;
curr              558 drivers/power/supply/charger-manager.c 	u64 curr = ktime_to_ms(ktime_get());
curr              567 drivers/power/supply/charger-manager.c 		duration = curr - cm->charging_start_time;
curr              577 drivers/power/supply/charger-manager.c 		duration = curr - cm->charging_end_time;
curr              527 drivers/power/supply/pm2301_charger.c static int pm2xxx_current_to_regval(int curr)
curr              531 drivers/power/supply/pm2301_charger.c 	if (curr < pm2xxx_charger_current_map[0])
curr              535 drivers/power/supply/pm2301_charger.c 		if (curr < pm2xxx_charger_current_map[i])
curr              540 drivers/power/supply/pm2301_charger.c 	if (curr == pm2xxx_charger_current_map[i])
curr              546 drivers/power/supply/pm2301_charger.c static int pm2xxx_voltage_to_regval(int curr)
curr              550 drivers/power/supply/pm2301_charger.c 	if (curr < pm2xxx_charger_voltage_map[0])
curr              554 drivers/power/supply/pm2301_charger.c 		if (curr < pm2xxx_charger_voltage_map[i])
curr              559 drivers/power/supply/pm2301_charger.c 	if (curr == pm2xxx_charger_voltage_map[i])
curr              397 drivers/power/supply/power_supply_core.c 	int curr;
curr              407 drivers/power/supply/power_supply_core.c 	curr = class_for_each_device(power_supply_class, NULL, psy,
curr              409 drivers/power/supply/power_supply_core.c 	if (curr <= 0)
curr              410 drivers/power/supply/power_supply_core.c 		return (curr == 0) ? -ENODEV : curr;
curr              412 drivers/power/supply/power_supply_core.c 	val.intval = curr;
curr              257 drivers/power/supply/power_supply_hwmon.c 	HWMON_CHANNEL_INFO(curr,
curr              525 drivers/power/supply/rt9455_charger.c 	int curr;
curr              531 drivers/power/supply/rt9455_charger.c 				   &curr);
curr              537 drivers/power/supply/rt9455_charger.c 	val->intval = curr;
curr              401 drivers/power/supply/twl4030_charger.c 	int v, curr;
curr              412 drivers/power/supply/twl4030_charger.c 	curr = twl4030_charger_get_current();
curr              414 drivers/power/supply/twl4030_charger.c 	dev_dbg(bci->dev, "v=%d cur=%d limit=%d target=%d\n", v, curr,
curr              742 drivers/power/supply/twl4030_charger.c 	int curr;
curr              746 drivers/power/supply/twl4030_charger.c 	curr = twl4030bci_read_adc_val(TWL4030_BCIICHG);
curr              747 drivers/power/supply/twl4030_charger.c 	if (curr < 0)
curr              748 drivers/power/supply/twl4030_charger.c 		return curr;
curr              754 drivers/power/supply/twl4030_charger.c 	return regval2ua(curr, bcictl1 & TWL4030_CGAIN);
curr              755 drivers/scsi/aic7xxx/aic79xx.h 	struct ahd_transinfo curr;
curr             1944 drivers/scsi/aic7xxx/aic79xx_core.c 			tinfo = &targ_info->curr;
curr             3149 drivers/scsi/aic7xxx/aic79xx_core.c 			if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) {
curr             3165 drivers/scsi/aic7xxx/aic79xx_core.c 				tinfo->curr.transport_version = 2;
curr             3648 drivers/scsi/aic7xxx/aic79xx_core.c 			memset(&tstate->transinfo[i].curr, 0,
curr             3649 drivers/scsi/aic7xxx/aic79xx_core.c 			      sizeof(tstate->transinfo[i].curr));
curr             3855 drivers/scsi/aic7xxx/aic79xx_core.c 			tinfo->curr.width = AHD_WIDTH_UNKNOWN;
curr             3856 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.period = AHD_PERIOD_UNKNOWN;
curr             3857 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
curr             3859 drivers/scsi/aic7xxx/aic79xx_core.c 	if (tinfo->curr.period != tinfo->goal.period
curr             3860 drivers/scsi/aic7xxx/aic79xx_core.c 	 || tinfo->curr.width != tinfo->goal.width
curr             3861 drivers/scsi/aic7xxx/aic79xx_core.c 	 || tinfo->curr.offset != tinfo->goal.offset
curr             3862 drivers/scsi/aic7xxx/aic79xx_core.c 	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
curr             3918 drivers/scsi/aic7xxx/aic79xx_core.c 	old_period = tinfo->curr.period;
curr             3919 drivers/scsi/aic7xxx/aic79xx_core.c 	old_offset = tinfo->curr.offset;
curr             3920 drivers/scsi/aic7xxx/aic79xx_core.c 	old_ppr	   = tinfo->curr.ppr_options;
curr             3929 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.period = period;
curr             3930 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.offset = offset;
curr             3931 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.ppr_options = ppr_options;
curr             3988 drivers/scsi/aic7xxx/aic79xx_core.c 		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
curr             4049 drivers/scsi/aic7xxx/aic79xx_core.c 	oldwidth = tinfo->curr.width;
curr             4054 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.width = width;
curr             4067 drivers/scsi/aic7xxx/aic79xx_core.c 		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
curr             4548 drivers/scsi/aic7xxx/aic79xx_core.c 	dowide = tinfo->curr.width != tinfo->goal.width;
curr             4549 drivers/scsi/aic7xxx/aic79xx_core.c 	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
curr             4594 drivers/scsi/aic7xxx/aic79xx_core.c 					  : tinfo->curr.width,
curr             5173 drivers/scsi/aic7xxx/aic79xx_core.c 					    tinfo->curr.width, devinfo->role);
curr             5567 drivers/scsi/aic7xxx/aic79xx_core.c 			tinfo->curr.transport_version = 2;
curr             5591 drivers/scsi/aic7xxx/aic79xx_core.c 		if (tinfo->goal.offset != tinfo->curr.offset) {
curr             7426 drivers/scsi/aic7xxx/aic79xx_core.c 		ahd_update_neg_table(ahd, &devinfo, &tinfo->curr);
curr             7639 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.protocol_version = 2;
curr             7640 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.transport_version = 2;
curr             7753 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.protocol_version = 2;
curr             7754 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo->curr.transport_version = 2;
curr             9002 drivers/scsi/aic7xxx/aic79xx_core.c 		tinfo = &targ_info->curr;
curr              845 drivers/scsi/aic7xxx/aic79xx_osm.c 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
curr             1578 drivers/scsi/aic7xxx/aic79xx_osm.c 	 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
curr             1608 drivers/scsi/aic7xxx/aic79xx_osm.c 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
curr             1706 drivers/scsi/aic7xxx/aic79xx_osm.c 		if (tinfo->curr.period != tinfo->goal.period
curr             1707 drivers/scsi/aic7xxx/aic79xx_osm.c 		 || tinfo->curr.width != tinfo->goal.width
curr             1708 drivers/scsi/aic7xxx/aic79xx_osm.c 		 || tinfo->curr.offset != tinfo->goal.offset
curr             1709 drivers/scsi/aic7xxx/aic79xx_osm.c 		 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
curr             1731 drivers/scsi/aic7xxx/aic79xx_osm.c 		if (tinfo->curr.period == spi_period(starget)
curr             1732 drivers/scsi/aic7xxx/aic79xx_osm.c 		    && tinfo->curr.width == spi_width(starget)
curr             1733 drivers/scsi/aic7xxx/aic79xx_osm.c 		    && tinfo->curr.offset == spi_offset(starget)
curr             1734 drivers/scsi/aic7xxx/aic79xx_osm.c 		 && tinfo->curr.ppr_options == target_ppr_options)
curr             1738 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_period(starget) = tinfo->curr.period;
curr             1739 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_width(starget) = tinfo->curr.width;
curr             1740 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_offset(starget) = tinfo->curr.offset;
curr             1741 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
curr             1742 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
curr             1743 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
curr             1744 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0;
curr             1745 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_pcomp_en(starget) =  tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0;
curr             1746 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_rti(starget) =  tinfo->curr.ppr_options &  MSG_EXT_PPR_RTI ? 1 : 0;
curr             1747 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0;
curr             1748 drivers/scsi/aic7xxx/aic79xx_osm.c 		spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0;
curr              177 drivers/scsi/aic7xxx/aic79xx_proc.c 	ahd_format_transinfo(m, &tinfo->curr);
curr              701 drivers/scsi/aic7xxx/aic7xxx.h 	struct ahc_transinfo curr;
curr             1072 drivers/scsi/aic7xxx/aic7xxx_core.c 			tinfo = &targ_info->curr;
curr             1877 drivers/scsi/aic7xxx/aic7xxx_core.c 				tinfo->curr.transport_version = 2;
curr             2163 drivers/scsi/aic7xxx/aic7xxx_core.c 			memset(&tstate->transinfo[i].curr, 0,
curr             2164 drivers/scsi/aic7xxx/aic7xxx_core.c 			      sizeof(tstate->transinfo[i].curr));
curr             2458 drivers/scsi/aic7xxx/aic7xxx_core.c 			tinfo->curr.width = AHC_WIDTH_UNKNOWN;
curr             2459 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.period = AHC_PERIOD_UNKNOWN;
curr             2460 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
curr             2462 drivers/scsi/aic7xxx/aic7xxx_core.c 	if (tinfo->curr.period != tinfo->goal.period
curr             2463 drivers/scsi/aic7xxx/aic7xxx_core.c 	 || tinfo->curr.width != tinfo->goal.width
curr             2464 drivers/scsi/aic7xxx/aic7xxx_core.c 	 || tinfo->curr.offset != tinfo->goal.offset
curr             2465 drivers/scsi/aic7xxx/aic7xxx_core.c 	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
curr             2521 drivers/scsi/aic7xxx/aic7xxx_core.c 	old_period = tinfo->curr.period;
curr             2522 drivers/scsi/aic7xxx/aic7xxx_core.c 	old_offset = tinfo->curr.offset;
curr             2523 drivers/scsi/aic7xxx/aic7xxx_core.c 	old_ppr	   = tinfo->curr.ppr_options;
curr             2576 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.period = period;
curr             2577 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.offset = offset;
curr             2578 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.ppr_options = ppr_options;
curr             2633 drivers/scsi/aic7xxx/aic7xxx_core.c 	oldwidth = tinfo->curr.width;
curr             2648 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.width = width;
curr             2713 drivers/scsi/aic7xxx/aic7xxx_core.c 		pending_hscb->scsioffset = tinfo->curr.offset;
curr             2988 drivers/scsi/aic7xxx/aic7xxx_core.c 	dowide = tinfo->curr.width != tinfo->goal.width;
curr             2989 drivers/scsi/aic7xxx/aic7xxx_core.c 	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
curr             3035 drivers/scsi/aic7xxx/aic7xxx_core.c 					  : tinfo->curr.width,
curr             4034 drivers/scsi/aic7xxx/aic7xxx_core.c 		tinfo->curr.transport_version = 2;
curr             4057 drivers/scsi/aic7xxx/aic7xxx_core.c 		if (tinfo->goal.offset != tinfo->curr.offset) {
curr             5546 drivers/scsi/aic7xxx/aic7xxx_core.c 			tinfo->curr.protocol_version = 2;
curr             5547 drivers/scsi/aic7xxx/aic7xxx_core.c 			tinfo->curr.transport_version = 2;
curr             1484 drivers/scsi/aic7xxx/aic7xxx_osm.c 	hscb->scsioffset = tinfo->curr.offset;
curr             1628 drivers/scsi/aic7xxx/aic7xxx_osm.c 		if (tinfo->curr.period != tinfo->goal.period
curr             1629 drivers/scsi/aic7xxx/aic7xxx_osm.c 		 || tinfo->curr.width != tinfo->goal.width
curr             1630 drivers/scsi/aic7xxx/aic7xxx_osm.c 		 || tinfo->curr.offset != tinfo->goal.offset
curr             1631 drivers/scsi/aic7xxx/aic7xxx_osm.c 		 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
curr             1652 drivers/scsi/aic7xxx/aic7xxx_osm.c 		if (tinfo->curr.period == spi_period(starget)
curr             1653 drivers/scsi/aic7xxx/aic7xxx_osm.c 		    && tinfo->curr.width == spi_width(starget)
curr             1654 drivers/scsi/aic7xxx/aic7xxx_osm.c 		    && tinfo->curr.offset == spi_offset(starget)
curr             1655 drivers/scsi/aic7xxx/aic7xxx_osm.c 		 && tinfo->curr.ppr_options == target_ppr_options)
curr             1659 drivers/scsi/aic7xxx/aic7xxx_osm.c 		spi_period(starget) = tinfo->curr.period;
curr             1660 drivers/scsi/aic7xxx/aic7xxx_osm.c 		spi_width(starget) = tinfo->curr.width;
curr             1661 drivers/scsi/aic7xxx/aic7xxx_osm.c 		spi_offset(starget) = tinfo->curr.offset;
curr             1662 drivers/scsi/aic7xxx/aic7xxx_osm.c 		spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
curr             1663 drivers/scsi/aic7xxx/aic7xxx_osm.c 		spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
curr             1664 drivers/scsi/aic7xxx/aic7xxx_osm.c 		spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
curr              157 drivers/scsi/aic7xxx/aic7xxx_proc.c 	ahc_format_transinfo(m, &tinfo->curr);
curr              608 drivers/scsi/esas2r/esas2r.h 			struct atto_vda_sge *curr;
curr              614 drivers/scsi/esas2r/esas2r.h 			struct atto_physical_region_description *curr;
curr             1186 drivers/scsi/esas2r/esas2r.h 			sgc->sge.a64.curr = first;
curr             1192 drivers/scsi/esas2r/esas2r.h 			sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
curr              222 drivers/scsi/esas2r/esas2r_io.c 		if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
curr              237 drivers/scsi/esas2r/esas2r_io.c 			sgelen = (u8)((u8 *)sgc->sge.a64.curr
curr              247 drivers/scsi/esas2r/esas2r_io.c 			sgc->sge.a64.curr =
curr              306 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.a64.last = sgc->sge.a64.curr;
curr              309 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
curr              310 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.a64.curr->address = cpu_to_le32(addr);
curr              311 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.a64.curr++;
curr              336 drivers/scsi/esas2r/esas2r_io.c 			((u8 *)(sgc->sge.a64.curr) -
curr              418 drivers/scsi/esas2r/esas2r_io.c 				sgc->sge.prd.curr->ctl_len = cpu_to_le32(
curr              420 drivers/scsi/esas2r/esas2r_io.c 				sgc->sge.prd.curr->address = cpu_to_le64(addr);
curr              467 drivers/scsi/esas2r/esas2r_io.c 			sgc->sge.prd.chain = sgc->sge.prd.curr;
curr              478 drivers/scsi/esas2r/esas2r_io.c 			sgc->sge.prd.curr =
curr              487 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
curr              488 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.prd.curr->address = cpu_to_le64(addr);
curr              492 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.prd.curr++;
curr              614 drivers/scsi/esas2r/esas2r_io.c 		(struct atto_physical_region_description *)sgc->sge.a64.curr;
curr              625 drivers/scsi/esas2r/esas2r_io.c 		sgc->sge.prd.curr = curr_iblk_chn;
curr              883 drivers/scsi/lpfc/lpfc_bsg.c 	struct list_head head, *curr, *next;
curr              893 drivers/scsi/lpfc/lpfc_bsg.c 	list_for_each_safe(curr, next, &head) {
curr              894 drivers/scsi/lpfc/lpfc_bsg.c 		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
curr             2960 drivers/scsi/lpfc/lpfc_bsg.c 	struct list_head head, *curr, *next;
curr             2998 drivers/scsi/lpfc/lpfc_bsg.c 	list_for_each_safe(curr, next, &head) {
curr             2999 drivers/scsi/lpfc/lpfc_bsg.c 		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
curr             3000 drivers/scsi/lpfc/lpfc_bsg.c 		list_del(curr);
curr             3118 drivers/scsi/lpfc/lpfc_bsg.c 	struct lpfc_dmabuf  *curr;
curr             3254 drivers/scsi/lpfc/lpfc_bsg.c 	list_for_each_entry(curr, &head, list) {
curr             3255 drivers/scsi/lpfc/lpfc_bsg.c 		segment_len = ((struct lpfc_dmabufext *)curr)->size;
curr             3257 drivers/scsi/lpfc/lpfc_bsg.c 			ctreq = curr->virt;
curr             3270 drivers/scsi/lpfc/lpfc_bsg.c 		memcpy(curr->virt + segment_offset,
curr              129 drivers/scsi/xen-scsifront.c 	struct task_struct *curr;
curr              659 drivers/scsi/xen-scsifront.c 	if (info && current == info->curr) {
curr              677 drivers/scsi/xen-scsifront.c 	if (info && current == info->curr) {
curr              984 drivers/scsi/xen-scsifront.c 	BUG_ON(info->curr);
curr              985 drivers/scsi/xen-scsifront.c 	info->curr = current;
curr             1050 drivers/scsi/xen-scsifront.c 	info->curr = NULL;
curr              140 drivers/soc/aspeed/aspeed-p2a-ctrl.c 		const struct region *curr = &ctrl->config->regions[i];
curr              144 drivers/soc/aspeed/aspeed-p2a-ctrl.c 		if (curr->max < base)
curr              149 drivers/soc/aspeed/aspeed-p2a-ctrl.c 		if (curr->min > end)
curr              165 drivers/soc/aspeed/aspeed-p2a-ctrl.c 		regmap_update_bits(ctrl->regmap, SCU2C, curr->bit, 0);
curr              991 drivers/staging/greybus/audio_topology.c 	struct gb_audio_control *curr;
curr             1014 drivers/staging/greybus/audio_topology.c 	curr = w->ctl;
curr             1017 drivers/staging/greybus/audio_topology.c 						   curr);
curr             1021 drivers/staging/greybus/audio_topology.c 				curr->name, curr->iface);
curr             1031 drivers/staging/greybus/audio_topology.c 		control->id = curr->id;
curr             1032 drivers/staging/greybus/audio_topology.c 		control->name = curr->name;
curr             1035 drivers/staging/greybus/audio_topology.c 		if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
curr             1037 drivers/staging/greybus/audio_topology.c 				&curr->info.value.enumerated;
curr             1051 drivers/staging/greybus/audio_topology.c 		curr = (void *)curr + csize;
curr             1152 drivers/staging/greybus/audio_topology.c 	struct gb_audio_control *curr;
curr             1162 drivers/staging/greybus/audio_topology.c 	curr = controls;
curr             1165 drivers/staging/greybus/audio_topology.c 						   curr);
curr             1168 drivers/staging/greybus/audio_topology.c 				curr->name, curr->iface);
curr             1178 drivers/staging/greybus/audio_topology.c 		control->id = curr->id;
curr             1180 drivers/staging/greybus/audio_topology.c 		strlcpy(temp_name, curr->name, NAME_SIZE);
curr             1181 drivers/staging/greybus/audio_topology.c 		snprintf(curr->name, NAME_SIZE, "GB %d %s", module->dev_id,
curr             1183 drivers/staging/greybus/audio_topology.c 		control->name = curr->name;
curr             1184 drivers/staging/greybus/audio_topology.c 		if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
curr             1186 drivers/staging/greybus/audio_topology.c 				&curr->info.value.enumerated;
curr             1200 drivers/staging/greybus/audio_topology.c 		dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
curr             1201 drivers/staging/greybus/audio_topology.c 			curr->name, curr->info.type);
curr             1202 drivers/staging/greybus/audio_topology.c 		curr = (void *)curr + csize;
curr             1222 drivers/staging/greybus/audio_topology.c 	struct gb_audio_widget *curr;
curr             1231 drivers/staging/greybus/audio_topology.c 	curr = widgets;
curr             1234 drivers/staging/greybus/audio_topology.c 						 curr, &w_size);
curr             1237 drivers/staging/greybus/audio_topology.c 				curr->name, curr->type);
curr             1247 drivers/staging/greybus/audio_topology.c 		widget->id = curr->id;
curr             1248 drivers/staging/greybus/audio_topology.c 		widget->name = curr->name;
curr             1250 drivers/staging/greybus/audio_topology.c 		curr = (void *)curr + w_size;
curr             1271 drivers/staging/greybus/audio_topology.c 	struct gb_audio_route *curr;
curr             1280 drivers/staging/greybus/audio_topology.c 	curr = routes;
curr             1284 drivers/staging/greybus/audio_topology.c 			gbaudio_map_widgetid(module, curr->destination_id);
curr             1287 drivers/staging/greybus/audio_topology.c 				curr->source_id, curr->destination_id,
curr             1288 drivers/staging/greybus/audio_topology.c 				curr->control_id, curr->index);
curr             1293 drivers/staging/greybus/audio_topology.c 			gbaudio_map_widgetid(module, curr->source_id);
curr             1296 drivers/staging/greybus/audio_topology.c 				curr->source_id, curr->destination_id,
curr             1297 drivers/staging/greybus/audio_topology.c 				curr->control_id, curr->index);
curr             1303 drivers/staging/greybus/audio_topology.c 					      curr->control_id,
curr             1304 drivers/staging/greybus/audio_topology.c 					      curr->index);
curr             1305 drivers/staging/greybus/audio_topology.c 		if ((curr->control_id !=  GBAUDIO_INVALID_ID) &&
curr             1308 drivers/staging/greybus/audio_topology.c 				curr->source_id, curr->destination_id,
curr             1309 drivers/staging/greybus/audio_topology.c 				curr->control_id, curr->index);
curr             1317 drivers/staging/greybus/audio_topology.c 		curr++;
curr             1483 drivers/staging/media/allegro-dvt/allegro-core.c 	char *curr;
curr             1536 drivers/staging/media/allegro-dvt/allegro-core.c 	curr = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
curr             1539 drivers/staging/media/allegro-dvt/allegro-core.c 		len = allegro_h264_write_sps(channel, curr, free);
curr             1546 drivers/staging/media/allegro-dvt/allegro-core.c 		curr += len;
curr             1554 drivers/staging/media/allegro-dvt/allegro-core.c 		len = allegro_h264_write_pps(channel, curr, free);
curr             1561 drivers/staging/media/allegro-dvt/allegro-core.c 		curr += len;
curr             1568 drivers/staging/media/allegro-dvt/allegro-core.c 	len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
curr             1574 drivers/staging/media/allegro-dvt/allegro-core.c 	curr += len;
curr              190 drivers/staging/media/imx/imx-media-vdic.c 						  struct imx_media_buffer *curr)
curr              200 drivers/staging/media/imx/imx-media-vdic.c 	priv->curr_in_buf = curr;
curr              201 drivers/staging/media/imx/imx-media-vdic.c 	prev = priv->prev_in_buf ? priv->prev_in_buf : curr;
curr              204 drivers/staging/media/imx/imx-media-vdic.c 	curr_vb = &curr->vbuf.vb2_buf;
curr             1195 drivers/staging/unisys/visornic/visornic_main.c 	struct sk_buff *skb, *prev, *curr;
curr             1295 drivers/staging/unisys/visornic/visornic_main.c 			curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
curr             1296 drivers/staging/unisys/visornic/visornic_main.c 			curr->next = NULL;
curr             1299 drivers/staging/unisys/visornic/visornic_main.c 				skb_shinfo(skb)->frag_list = curr;
curr             1301 drivers/staging/unisys/visornic/visornic_main.c 				prev->next = curr;
curr             1302 drivers/staging/unisys/visornic/visornic_main.c 			prev = curr;
curr             1309 drivers/staging/unisys/visornic/visornic_main.c 			curr->len = currsize;
curr             1310 drivers/staging/unisys/visornic/visornic_main.c 			curr->tail += currsize;
curr             1311 drivers/staging/unisys/visornic/visornic_main.c 			curr->data_len = 0;
curr              529 drivers/staging/vt6655/device_main.c 	dma_addr_t      curr = priv->rd0_pool_dma;
curr              535 drivers/staging/vt6655/device_main.c 	     i ++, curr += sizeof(struct vnt_rx_desc)) {
curr              550 drivers/staging/vt6655/device_main.c 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
curr              575 drivers/staging/vt6655/device_main.c 	dma_addr_t      curr = priv->rd1_pool_dma;
curr              581 drivers/staging/vt6655/device_main.c 	     i ++, curr += sizeof(struct vnt_rx_desc)) {
curr              596 drivers/staging/vt6655/device_main.c 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
curr              645 drivers/staging/vt6655/device_main.c 	dma_addr_t  curr;
curr              649 drivers/staging/vt6655/device_main.c 	curr = priv->td0_pool_dma;
curr              651 drivers/staging/vt6655/device_main.c 	     i++, curr += sizeof(struct vnt_tx_desc)) {
curr              663 drivers/staging/vt6655/device_main.c 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
curr              684 drivers/staging/vt6655/device_main.c 	dma_addr_t  curr;
curr              689 drivers/staging/vt6655/device_main.c 	curr = priv->td1_pool_dma;
curr              691 drivers/staging/vt6655/device_main.c 	     i++, curr += sizeof(struct vnt_tx_desc)) {
curr              703 drivers/staging/vt6655/device_main.c 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
curr               52 drivers/usb/chipidea/udc.h 	__le32 curr;
curr              639 drivers/usb/gadget/udc/goku_udc.c 	u32				curr, master;
curr              660 drivers/usb/gadget/udc/goku_udc.c 		curr = readl(&regs->in_dma_current);
curr              662 drivers/usb/gadget/udc/goku_udc.c 		writel(curr, &regs->in_dma_end);
curr              663 drivers/usb/gadget/udc/goku_udc.c 		writel(curr, &regs->in_dma_start);
curr              675 drivers/usb/gadget/udc/goku_udc.c 		curr = readl(&regs->out_dma_current);
curr              677 drivers/usb/gadget/udc/goku_udc.c 		writel(curr, &regs->out_dma_end);
curr              678 drivers/usb/gadget/udc/goku_udc.c 		writel(curr, &regs->out_dma_start);
curr              687 drivers/usb/gadget/udc/goku_udc.c 	req->req.actual = (curr - req->req.dma) + 1;
curr              306 drivers/usb/isp1760/isp1760-hcd.c 	int i, curr;
curr              317 drivers/usb/isp1760/isp1760-hcd.c 	curr = i;
curr              319 drivers/usb/isp1760/isp1760-hcd.c 		priv->memory_pool[curr + i].start = payload_addr;
curr              320 drivers/usb/isp1760/isp1760-hcd.c 		priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
curr              321 drivers/usb/isp1760/isp1760-hcd.c 		priv->memory_pool[curr + i].free = 1;
curr              322 drivers/usb/isp1760/isp1760-hcd.c 		payload_addr += priv->memory_pool[curr + i].size;
curr              325 drivers/usb/isp1760/isp1760-hcd.c 	curr = i;
curr              327 drivers/usb/isp1760/isp1760-hcd.c 		priv->memory_pool[curr + i].start = payload_addr;
curr              328 drivers/usb/isp1760/isp1760-hcd.c 		priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
curr              329 drivers/usb/isp1760/isp1760-hcd.c 		priv->memory_pool[curr + i].free = 1;
curr              330 drivers/usb/isp1760/isp1760-hcd.c 		payload_addr += priv->memory_pool[curr + i].size;
curr              769 drivers/usb/typec/tcpm/tcpm.c 			unsigned int curr = pdo_max_current(pdo);
curr              771 drivers/usb/typec/tcpm/tcpm.c 			if (curr >= 3000)
curr              773 drivers/usb/typec/tcpm/tcpm.c 			else if (curr >= 1500)
curr              317 drivers/video/fbdev/matrox/matroxfb_Ti3026.c 		Bpp = minfo->curr.final_bppShift;
curr               85 drivers/video/fbdev/matrox/matroxfb_accel.c #define curr_ydstorg(x)	((x)->curr.ydstorg.pixels)
curr              325 drivers/video/fbdev/matrox/matroxfb_base.c 	pos = (minfo->fbcon.var.yoffset * minfo->fbcon.var.xres_virtual + minfo->fbcon.var.xoffset) * minfo->curr.final_bppShift / 32;
curr              326 drivers/video/fbdev/matrox/matroxfb_base.c 	pos += minfo->curr.ydstorg.chunks;
curr              658 drivers/video/fbdev/matrox/matroxfb_base.c 	if (regno >= minfo->curr.cmap_len)
curr              726 drivers/video/fbdev/matrox/matroxfb_base.c 	fix->smem_start = minfo->video.base + minfo->curr.ydstorg.bytes;
curr              727 drivers/video/fbdev/matrox/matroxfb_base.c 	fix->smem_len = minfo->video.len_usable - minfo->curr.ydstorg.bytes;
curr              774 drivers/video/fbdev/matrox/matroxfb_base.c 		minfo->curr.cmap_len = cmap_len;
curr              776 drivers/video/fbdev/matrox/matroxfb_base.c 		minfo->curr.ydstorg.bytes = ydstorg;
curr              777 drivers/video/fbdev/matrox/matroxfb_base.c 		minfo->curr.ydstorg.chunks = ydstorg >> (isInterleave(minfo) ? 3 : 2);
curr              779 drivers/video/fbdev/matrox/matroxfb_base.c 			minfo->curr.ydstorg.pixels = ydstorg;
curr              781 drivers/video/fbdev/matrox/matroxfb_base.c 			minfo->curr.ydstorg.pixels = (ydstorg * 8) / var->bits_per_pixel;
curr              782 drivers/video/fbdev/matrox/matroxfb_base.c 		minfo->curr.final_bppShift = matroxfb_get_final_bppShift(minfo, var->bits_per_pixel);
curr              811 drivers/video/fbdev/matrox/matroxfb_base.c 			pos = (var->yoffset * var->xres_virtual + var->xoffset) * minfo->curr.final_bppShift / 32;
curr              812 drivers/video/fbdev/matrox/matroxfb_base.c 			pos += minfo->curr.ydstorg.chunks;
curr              350 drivers/video/fbdev/matrox/matroxfb_base.h 	struct matroxfb_par	curr;
curr              247 drivers/video/fbdev/matrox/matroxfb_misc.c 	divider = minfo->curr.final_bppShift;
curr              277 drivers/video/fbdev/matrox/matroxfb_misc.c 	wd = minfo->fbcon.var.xres_virtual * minfo->curr.final_bppShift / 64;
curr              257 drivers/video/fbdev/pm2fb.c 	s32 curr;
curr              266 drivers/video/fbdev/pm2fb.c 					curr = (clk > f) ? clk - f : f - clk;
curr              267 drivers/video/fbdev/pm2fb.c 					if (curr < delta) {
curr              268 drivers/video/fbdev/pm2fb.c 						delta = curr;
curr             1105 drivers/virtio/virtio_ring.c 	u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
curr             1144 drivers/virtio/virtio_ring.c 	curr = id;
curr             1166 drivers/virtio/virtio_ring.c 				vq->packed.desc_extra[curr].addr = addr;
curr             1167 drivers/virtio/virtio_ring.c 				vq->packed.desc_extra[curr].len = sg->length;
curr             1168 drivers/virtio/virtio_ring.c 				vq->packed.desc_extra[curr].flags =
curr             1171 drivers/virtio/virtio_ring.c 			prev = curr;
curr             1172 drivers/virtio/virtio_ring.c 			curr = vq->packed.desc_state[curr].next;
curr             1191 drivers/virtio/virtio_ring.c 	vq->free_head = curr;
curr             1286 drivers/virtio/virtio_ring.c 	unsigned int i, curr;
curr             1298 drivers/virtio/virtio_ring.c 		curr = id;
curr             1301 drivers/virtio/virtio_ring.c 				&vq->packed.desc_extra[curr]);
curr             1302 drivers/virtio/virtio_ring.c 			curr = vq->packed.desc_state[curr].next;
curr               43 drivers/visorbus/visorchipset.c 	u8 *curr;
curr              587 drivers/visorbus/visorchipset.c 	ctx->curr = (char *)&phdr + phdr->name_offset;
curr              589 drivers/visorbus/visorchipset.c 	return parser_string_get(ctx->curr, phdr->name_length);
curr              356 fs/afs/dir.c   	unsigned offset, next, curr;
curr              362 fs/afs/dir.c   	curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
curr              376 fs/afs/dir.c   			if (offset >= curr)
curr              390 fs/afs/dir.c   		       (offset < curr ? "skip" : "fill"),
curr              419 fs/afs/dir.c   		if (offset < curr)
curr              699 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr, *next;
curr              731 fs/btrfs/delayed-inode.c 		curr = next;
curr              732 fs/btrfs/delayed-inode.c 		next = __btrfs_next_delayed_item(curr);
curr              736 fs/btrfs/delayed-inode.c 		if (!btrfs_is_continuous_delayed_item(curr, next))
curr              778 fs/btrfs/delayed-inode.c 	list_for_each_entry_safe(curr, next, &head, tree_list) {
curr              780 fs/btrfs/delayed-inode.c 		write_extent_buffer(leaf, &curr->data,
curr              782 fs/btrfs/delayed-inode.c 				    curr->data_len);
curr              785 fs/btrfs/delayed-inode.c 		btrfs_delayed_item_release_metadata(root, curr);
curr              787 fs/btrfs/delayed-inode.c 		list_del(&curr->tree_list);
curr              788 fs/btrfs/delayed-inode.c 		btrfs_release_delayed_item(curr);
curr              840 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr, *prev;
curr              845 fs/btrfs/delayed-inode.c 	curr = __btrfs_first_delayed_insertion_item(node);
curr              846 fs/btrfs/delayed-inode.c 	if (!curr)
curr              849 fs/btrfs/delayed-inode.c 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
curr              855 fs/btrfs/delayed-inode.c 	prev = curr;
curr              856 fs/btrfs/delayed-inode.c 	curr = __btrfs_next_delayed_item(prev);
curr              857 fs/btrfs/delayed-inode.c 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
curr              860 fs/btrfs/delayed-inode.c 		btrfs_batch_insert_items(root, path, curr);
curr              879 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr, *next;
curr              906 fs/btrfs/delayed-inode.c 		curr = next;
curr              907 fs/btrfs/delayed-inode.c 		next = __btrfs_next_delayed_item(curr);
curr              911 fs/btrfs/delayed-inode.c 		if (!btrfs_is_continuous_delayed_item(curr, next))
curr              927 fs/btrfs/delayed-inode.c 	list_for_each_entry_safe(curr, next, &head, tree_list) {
curr              928 fs/btrfs/delayed-inode.c 		btrfs_delayed_item_release_metadata(root, curr);
curr              929 fs/btrfs/delayed-inode.c 		list_del(&curr->tree_list);
curr              930 fs/btrfs/delayed-inode.c 		btrfs_release_delayed_item(curr);
curr              942 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr, *prev;
curr              948 fs/btrfs/delayed-inode.c 	curr = __btrfs_first_delayed_deletion_item(node);
curr              949 fs/btrfs/delayed-inode.c 	if (!curr)
curr              953 fs/btrfs/delayed-inode.c 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
curr              962 fs/btrfs/delayed-inode.c 		prev = curr;
curr              963 fs/btrfs/delayed-inode.c 		curr = __btrfs_next_delayed_item(prev);
curr              967 fs/btrfs/delayed-inode.c 		if (curr) {
curr              974 fs/btrfs/delayed-inode.c 	btrfs_batch_delete_items(trans, root, path, curr);
curr             1641 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr, *next;
curr             1643 fs/btrfs/delayed-inode.c 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
curr             1644 fs/btrfs/delayed-inode.c 		list_del(&curr->readdir_list);
curr             1645 fs/btrfs/delayed-inode.c 		if (refcount_dec_and_test(&curr->refs))
curr             1646 fs/btrfs/delayed-inode.c 			kfree(curr);
curr             1649 fs/btrfs/delayed-inode.c 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
curr             1650 fs/btrfs/delayed-inode.c 		list_del(&curr->readdir_list);
curr             1651 fs/btrfs/delayed-inode.c 		if (refcount_dec_and_test(&curr->refs))
curr             1652 fs/btrfs/delayed-inode.c 			kfree(curr);
curr             1665 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr;
curr             1668 fs/btrfs/delayed-inode.c 	list_for_each_entry(curr, del_list, readdir_list) {
curr             1669 fs/btrfs/delayed-inode.c 		if (curr->key.offset > index)
curr             1671 fs/btrfs/delayed-inode.c 		if (curr->key.offset == index) {
curr             1687 fs/btrfs/delayed-inode.c 	struct btrfs_delayed_item *curr, *next;
curr             1702 fs/btrfs/delayed-inode.c 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
curr             1703 fs/btrfs/delayed-inode.c 		list_del(&curr->readdir_list);
curr             1705 fs/btrfs/delayed-inode.c 		if (curr->key.offset < ctx->pos) {
curr             1706 fs/btrfs/delayed-inode.c 			if (refcount_dec_and_test(&curr->refs))
curr             1707 fs/btrfs/delayed-inode.c 				kfree(curr);
curr             1711 fs/btrfs/delayed-inode.c 		ctx->pos = curr->key.offset;
curr             1713 fs/btrfs/delayed-inode.c 		di = (struct btrfs_dir_item *)curr->data;
curr             1723 fs/btrfs/delayed-inode.c 		if (refcount_dec_and_test(&curr->refs))
curr             1724 fs/btrfs/delayed-inode.c 			kfree(curr);
curr              158 fs/btrfs/scrub.c 	int			curr;
curr              546 fs/btrfs/scrub.c 	if (sctx->curr != -1) {
curr              547 fs/btrfs/scrub.c 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
curr              587 fs/btrfs/scrub.c 	sctx->curr = -1;
curr             2024 fs/btrfs/scrub.c 	if (sctx->curr == -1)
curr             2027 fs/btrfs/scrub.c 	sbio = sctx->bios[sctx->curr];
curr             2028 fs/btrfs/scrub.c 	sctx->curr = -1;
curr             2044 fs/btrfs/scrub.c 	while (sctx->curr == -1) {
curr             2046 fs/btrfs/scrub.c 		sctx->curr = sctx->first_free;
curr             2047 fs/btrfs/scrub.c 		if (sctx->curr != -1) {
curr             2048 fs/btrfs/scrub.c 			sctx->first_free = sctx->bios[sctx->curr]->next_free;
curr             2049 fs/btrfs/scrub.c 			sctx->bios[sctx->curr]->next_free = -1;
curr             2050 fs/btrfs/scrub.c 			sctx->bios[sctx->curr]->page_count = 0;
curr             2057 fs/btrfs/scrub.c 	sbio = sctx->bios[sctx->curr];
curr             2703 fs/btrfs/scrub.c 	struct scrub_page *curr, *next;
curr             2714 fs/btrfs/scrub.c 	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
curr             2715 fs/btrfs/scrub.c 		list_del_init(&curr->list);
curr             2716 fs/btrfs/scrub.c 		scrub_page_put(curr);
curr             7649 fs/btrfs/volumes.c 	struct btrfs_device *curr, *next;
curr             7662 fs/btrfs/volumes.c 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
curr             7664 fs/btrfs/volumes.c 		list_del_init(&curr->post_commit_list);
curr             7665 fs/btrfs/volumes.c 		curr->commit_total_bytes = curr->disk_total_bytes;
curr             7666 fs/btrfs/volumes.c 		curr->commit_bytes_used = curr->bytes_used;
curr              100 fs/char_dev.c  	struct char_device_struct *cd, *curr, *prev = NULL;
curr              134 fs/char_dev.c  	for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
curr              135 fs/char_dev.c  		if (curr->major < major)
curr              138 fs/char_dev.c  		if (curr->major > major)
curr              141 fs/char_dev.c  		if (curr->baseminor + curr->minorct <= baseminor)
curr              144 fs/char_dev.c  		if (curr->baseminor >= baseminor + minorct)
curr              156 fs/char_dev.c  		cd->next = curr;
curr              478 fs/coredump.c  	struct core_thread *curr, *next;
curr              489 fs/coredump.c  	while ((curr = next) != NULL) {
curr              490 fs/coredump.c  		next = curr->next;
curr              491 fs/coredump.c  		task = curr->task;
curr              497 fs/coredump.c  		curr->task = NULL;
curr               31 fs/erofs/zpvec.h 	struct page *curr, *next;
curr               40 fs/erofs/zpvec.h 	if (!ctor->curr)
curr               46 fs/erofs/zpvec.h 		kunmap(ctor->curr);
curr               78 fs/erofs/zpvec.h 	ctor->curr = next;
curr               81 fs/erofs/zpvec.h 		kmap_atomic(ctor->curr) : kmap(ctor->curr);
curr               93 fs/erofs/zpvec.h 	ctor->curr = ctor->next = NULL;
curr             1150 fs/ext2/balloc.c 			int curr = my_rsv->rsv_end -
curr             1153 fs/ext2/balloc.c 			if (curr < *count)
curr             1155 fs/ext2/balloc.c 							*count - curr);
curr               31 fs/hfs/bitmap.c 	__be32 *curr, *end;
curr               40 fs/hfs/bitmap.c 	curr = bitmap + (offset / 32);
curr               44 fs/hfs/bitmap.c 	val = *curr;
curr               56 fs/hfs/bitmap.c 	while (++curr < end) {
curr               57 fs/hfs/bitmap.c 		val = *curr;
curr               70 fs/hfs/bitmap.c 	start = (curr - bitmap) * 32 + i;
curr               85 fs/hfs/bitmap.c 	*curr++ = cpu_to_be32(n);
curr               88 fs/hfs/bitmap.c 		n = be32_to_cpu(*curr);
curr               95 fs/hfs/bitmap.c 		*curr++ = cpu_to_be32(0xffffffff);
curr              107 fs/hfs/bitmap.c 	*curr = cpu_to_be32(n);
curr              108 fs/hfs/bitmap.c 	*max = (curr - bitmap) * 32 + i - start;
curr              195 fs/hfs/bitmap.c 	__be32 *curr;
curr              210 fs/hfs/bitmap.c 	curr = HFS_SB(sb)->bitmap + (start / 32);
curr              220 fs/hfs/bitmap.c 			*curr &= cpu_to_be32(mask);
curr              223 fs/hfs/bitmap.c 		*curr++ &= cpu_to_be32(mask);
curr              229 fs/hfs/bitmap.c 		*curr++ = 0;
curr              235 fs/hfs/bitmap.c 		*curr &= cpu_to_be32(mask);
curr               25 fs/hfsplus/bitmap.c 	__be32 *pptr, *curr, *end;
curr               43 fs/hfsplus/bitmap.c 	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
curr               52 fs/hfsplus/bitmap.c 	val = *curr;
curr               61 fs/hfsplus/bitmap.c 	curr++;
curr               65 fs/hfsplus/bitmap.c 		while (curr < end) {
curr               66 fs/hfsplus/bitmap.c 			val = *curr;
curr               75 fs/hfsplus/bitmap.c 			curr++;
curr               87 fs/hfsplus/bitmap.c 		curr = pptr = kmap(page);
curr               98 fs/hfsplus/bitmap.c 	start = offset + (curr - pptr) * 32 + i;
curr              115 fs/hfsplus/bitmap.c 	*curr++ = cpu_to_be32(n);
curr              118 fs/hfsplus/bitmap.c 		while (curr < end) {
curr              119 fs/hfsplus/bitmap.c 			n = be32_to_cpu(*curr);
curr              126 fs/hfsplus/bitmap.c 			*curr++ = cpu_to_be32(0xffffffff);
curr              139 fs/hfsplus/bitmap.c 		curr = pptr;
curr              152 fs/hfsplus/bitmap.c 	*curr = cpu_to_be32(n);
curr              155 fs/hfsplus/bitmap.c 	*max = offset + (curr - pptr) * 32 + i - start;
curr              169 fs/hfsplus/bitmap.c 	__be32 *pptr, *curr, *end;
curr              189 fs/hfsplus/bitmap.c 	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
curr              200 fs/hfsplus/bitmap.c 			*curr++ &= cpu_to_be32(mask);
curr              203 fs/hfsplus/bitmap.c 		*curr++ &= cpu_to_be32(mask);
curr              209 fs/hfsplus/bitmap.c 		while (curr < end) {
curr              212 fs/hfsplus/bitmap.c 			*curr++ = 0;
curr              223 fs/hfsplus/bitmap.c 		curr = pptr;
curr              230 fs/hfsplus/bitmap.c 		*curr &= cpu_to_be32(mask);
curr             2954 fs/io_uring.c  static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
curr             2957 fs/io_uring.c  	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
curr             2963 fs/io_uring.c  	return autoremove_wake_function(curr, mode, wake_flags, key);
curr              421 fs/nilfs2/alloc.c 				       unsigned long curr, unsigned long max)
curr              425 fs/nilfs2/alloc.c 		     curr % nilfs_palloc_groups_per_desc_block(inode),
curr              426 fs/nilfs2/alloc.c 		     max - curr + 1);
curr               53 fs/nilfs2/cpfile.c 				  __u64 curr,
curr               58 fs/nilfs2/cpfile.c 		     nilfs_cpfile_get_offset(cpfile, curr),
curr               59 fs/nilfs2/cpfile.c 		     max - curr);
curr              483 fs/nilfs2/cpfile.c 	__u64 curr = *cnop, next;
curr              490 fs/nilfs2/cpfile.c 	if (curr == 0) {
curr              496 fs/nilfs2/cpfile.c 		curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
curr              499 fs/nilfs2/cpfile.c 		if (curr == 0) {
curr              503 fs/nilfs2/cpfile.c 	} else if (unlikely(curr == ~(__u64)0)) {
curr              508 fs/nilfs2/cpfile.c 	curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
curr              509 fs/nilfs2/cpfile.c 	ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
curr              517 fs/nilfs2/cpfile.c 		cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
curr              518 fs/nilfs2/cpfile.c 		curr = ~(__u64)0; /* Terminator */
curr              541 fs/nilfs2/cpfile.c 		curr = next;
curr              546 fs/nilfs2/cpfile.c 	*cnop = curr;
curr              623 fs/nilfs2/cpfile.c 	__u64 curr, prev;
curr              657 fs/nilfs2/cpfile.c 	curr = 0;
curr              662 fs/nilfs2/cpfile.c 		curr = prev;
curr              666 fs/nilfs2/cpfile.c 			ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
curr              674 fs/nilfs2/cpfile.c 			cpfile, curr, curr_bh, kaddr);
curr              692 fs/nilfs2/cpfile.c 		cpfile, curr, curr_bh, kaddr);
curr              698 fs/nilfs2/cpfile.c 	cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
curr               64 fs/nilfs2/sufile.c nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
curr               69 fs/nilfs2/sufile.c 		     nilfs_sufile_get_offset(sufile, curr),
curr               70 fs/nilfs2/sufile.c 		     max - curr + 1);
curr              406 fs/ocfs2/suballoc.c 	u16 curr, best;
curr              408 fs/ocfs2/suballoc.c 	best = curr = 0;
curr              409 fs/ocfs2/suballoc.c 	while (curr < le16_to_cpu(cl->cl_count)) {
curr              411 fs/ocfs2/suballoc.c 		    le32_to_cpu(cl->cl_recs[curr].c_total))
curr              412 fs/ocfs2/suballoc.c 			best = curr;
curr              413 fs/ocfs2/suballoc.c 		curr++;
curr             1384 fs/ocfs2/suballoc.c 	u16 curr, best;
curr             1388 fs/ocfs2/suballoc.c 	best = curr = 0;
curr             1389 fs/ocfs2/suballoc.c 	while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
curr             1390 fs/ocfs2/suballoc.c 		if (le32_to_cpu(cl->cl_recs[curr].c_free) >
curr             1392 fs/ocfs2/suballoc.c 			best = curr;
curr             1393 fs/ocfs2/suballoc.c 		curr++;
curr              925 fs/udf/inode.c 		int curr = *c;
curr              926 fs/udf/inode.c 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
curr              928 fs/udf/inode.c 		int8_t etype = (laarr[curr].extLength >> 30);
curr              933 fs/udf/inode.c 			laarr[curr + 2] = laarr[curr + 1];
curr              934 fs/udf/inode.c 			laarr[curr + 1] = laarr[curr];
curr              936 fs/udf/inode.c 			laarr[curr + 3] = laarr[curr + 1];
curr              937 fs/udf/inode.c 			laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
curr              943 fs/udf/inode.c 						&laarr[curr].extLocation,
curr              945 fs/udf/inode.c 				laarr[curr].extLength =
curr              948 fs/udf/inode.c 				laarr[curr].extLocation.logicalBlockNum = 0;
curr              949 fs/udf/inode.c 				laarr[curr].extLocation.
curr              952 fs/udf/inode.c 				laarr[curr].extLength = (etype << 30) |
curr              954 fs/udf/inode.c 			curr++;
curr              959 fs/udf/inode.c 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
curr              961 fs/udf/inode.c 			laarr[curr].extLocation.partitionReferenceNum =
curr              963 fs/udf/inode.c 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
curr              965 fs/udf/inode.c 		curr++;
curr              969 fs/udf/inode.c 				laarr[curr].extLocation.logicalBlockNum +=
curr              971 fs/udf/inode.c 			laarr[curr].extLength = (etype << 30) |
curr              973 fs/udf/inode.c 			curr++;
curr             1661 fs/udf/super.c 	struct udf_vds_record *curr;
curr             1717 fs/udf/super.c 			curr = get_volume_descriptor_record(ident, bh, &data);
curr             1718 fs/udf/super.c 			if (IS_ERR(curr)) {
curr             1720 fs/udf/super.c 				return PTR_ERR(curr);
curr             1723 fs/udf/super.c 			if (!curr)
curr             1725 fs/udf/super.c 			if (vdsn >= curr->volDescSeqNum) {
curr             1726 fs/udf/super.c 				curr->volDescSeqNum = vdsn;
curr             1727 fs/udf/super.c 				curr->block = block;
curr             1488 fs/xfs/libxfs/xfs_da_btree.c 	struct xfs_da_blkinfo	*curr;
curr             1526 fs/xfs/libxfs/xfs_da_btree.c 		curr = blk->bp->b_addr;
curr             1527 fs/xfs/libxfs/xfs_da_btree.c 		magic = be16_to_cpu(curr->magic);
curr               15 include/acpi/video.h 	int curr;
curr              103 include/drm/bridge/dw_hdmi.h 	u16 curr[DW_HDMI_RES_MAX];
curr              569 include/linux/lockdep.h extern void print_irqtrace_events(struct task_struct *curr);
curr              571 include/linux/lockdep.h static inline void print_irqtrace_events(struct task_struct *curr)
curr               56 include/linux/page_ext.h static inline struct page_ext *page_ext_next(struct page_ext *curr)
curr               58 include/linux/page_ext.h 	void *next = curr;
curr               57 include/linux/sched/cputime.h extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
curr               28 include/linux/skmsg.h 	u32				curr;
curr              549 include/linux/xarray.h 	void *curr;
curr              552 include/linux/xarray.h 	curr = __xa_store(xa, index, entry, gfp);
curr              555 include/linux/xarray.h 	return curr;
curr              575 include/linux/xarray.h 	void *curr;
curr              578 include/linux/xarray.h 	curr = __xa_store(xa, index, entry, gfp);
curr              581 include/linux/xarray.h 	return curr;
curr              650 include/linux/xarray.h 	void *curr;
curr              653 include/linux/xarray.h 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
curr              656 include/linux/xarray.h 	return curr;
curr              677 include/linux/xarray.h 	void *curr;
curr              680 include/linux/xarray.h 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
curr              683 include/linux/xarray.h 	return curr;
curr              704 include/linux/xarray.h 	void *curr;
curr              707 include/linux/xarray.h 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
curr              710 include/linux/xarray.h 	return curr;
curr               74 include/media/drv-intf/saa7146_vv.h 	struct saa7146_buf	*curr;
curr               17 include/sound/info.h 	unsigned int curr;	/* current position in buffer */
curr              762 include/uapi/linux/cdrom.h 	__u8 curr		: 1;
curr              764 include/uapi/linux/cdrom.h 	__u8 curr		: 1;
curr              789 include/uapi/linux/cdrom.h 	__u8 curr		: 1;
curr              791 include/uapi/linux/cdrom.h 	__u8 curr		: 1;
curr              919 include/uapi/linux/cdrom.h 	__u8 curr:1;
curr              921 include/uapi/linux/cdrom.h 	__u8 curr:1;
curr              271 ipc/sem.c      		struct sem *curr;
curr              272 ipc/sem.c      		curr = &sma->sems[q->sops[0].sem_num];
curr              274 ipc/sem.c      		list_add_tail(&q->list, &curr->pending_alter);
curr              630 ipc/sem.c      	struct sem *curr;
curr              640 ipc/sem.c      		curr = &sma->sems[idx];
curr              642 ipc/sem.c      		result = curr->semval;
curr              661 ipc/sem.c      		curr->semval = result;
curr              702 ipc/sem.c      	struct sem *curr;
curr              722 ipc/sem.c      		curr = &sma->sems[idx];
curr              724 ipc/sem.c      		result = curr->semval;
curr              746 ipc/sem.c      		curr = &sma->sems[sop->sem_num];
curr              748 ipc/sem.c      		result = curr->semval;
curr              755 ipc/sem.c      		curr->semval += sem_op;
curr              756 ipc/sem.c      		ipc_update_pid(&curr->sempid, q->pid);
curr             1329 ipc/sem.c      	struct sem *curr;
curr             1369 ipc/sem.c      	curr = &sma->sems[semnum];
curr             1375 ipc/sem.c      	curr->semval = val;
curr             1376 ipc/sem.c      	ipc_update_pid(&curr->sempid, task_tgid(current));
curr             1390 ipc/sem.c      	struct sem *curr;
curr             1524 ipc/sem.c      	curr = &sma->sems[semnum];
curr             1528 ipc/sem.c      		err = curr->semval;
curr             1531 ipc/sem.c      		err = pid_vnr(curr->sempid);
curr             2122 ipc/sem.c      		struct sem *curr;
curr             2124 ipc/sem.c      		curr = &sma->sems[idx];
curr             2133 ipc/sem.c      						&curr->pending_alter);
curr             2136 ipc/sem.c      			list_add_tail(&queue.list, &curr->pending_const);
curr              337 kernel/bpf/core.c 				s32 end_new, s32 curr, const bool probe_pass)
curr              343 kernel/bpf/core.c 	if (curr < pos && curr + imm + 1 >= end_old)
curr              345 kernel/bpf/core.c 	else if (curr >= end_new && curr + imm + 1 < end_new)
curr              355 kernel/bpf/core.c 				s32 end_new, s32 curr, const bool probe_pass)
curr              361 kernel/bpf/core.c 	if (curr < pos && curr + off + 1 >= end_old)
curr              363 kernel/bpf/core.c 	else if (curr >= end_new && curr + off + 1 < end_new)
curr              976 kernel/events/uprobes.c 	struct map_info *curr = NULL;
curr             1007 kernel/events/uprobes.c 		info->next = curr;
curr             1008 kernel/events/uprobes.c 		curr = info;
curr             1018 kernel/events/uprobes.c 	prev = curr;
curr             1019 kernel/events/uprobes.c 	while (curr) {
curr             1020 kernel/events/uprobes.c 		mmput(curr->mm);
curr             1021 kernel/events/uprobes.c 		curr = curr->next;
curr             1027 kernel/events/uprobes.c 			curr = ERR_PTR(-ENOMEM);
curr             1038 kernel/events/uprobes.c 	return curr;
curr              329 kernel/futex.c static void compat_exit_robust_list(struct task_struct *curr);
curr              331 kernel/futex.c static inline void compat_exit_robust_list(struct task_struct *curr) { }
curr              916 kernel/futex.c static void exit_pi_state_list(struct task_struct *curr)
curr              918 kernel/futex.c 	struct list_head *next, *head = &curr->pi_state_list;
curr              930 kernel/futex.c 	raw_spin_lock_irq(&curr->pi_lock);
curr              948 kernel/futex.c 			raw_spin_unlock_irq(&curr->pi_lock);
curr              950 kernel/futex.c 			raw_spin_lock_irq(&curr->pi_lock);
curr              953 kernel/futex.c 		raw_spin_unlock_irq(&curr->pi_lock);
curr              957 kernel/futex.c 		raw_spin_lock(&curr->pi_lock);
curr              970 kernel/futex.c 		WARN_ON(pi_state->owner != curr);
curr              975 kernel/futex.c 		raw_spin_unlock(&curr->pi_lock);
curr              982 kernel/futex.c 		raw_spin_lock_irq(&curr->pi_lock);
curr              984 kernel/futex.c 	raw_spin_unlock_irq(&curr->pi_lock);
curr              987 kernel/futex.c static inline void exit_pi_state_list(struct task_struct *curr) { }
curr             3566 kernel/futex.c static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
curr             3616 kernel/futex.c 	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
curr             3694 kernel/futex.c static void exit_robust_list(struct task_struct *curr)
curr             3696 kernel/futex.c 	struct robust_list_head __user *head = curr->robust_list;
curr             3737 kernel/futex.c 						curr, pi, HANDLE_DEATH_LIST))
curr             3755 kernel/futex.c 				   curr, pip, HANDLE_DEATH_PENDING);
curr             3994 kernel/futex.c static void compat_exit_robust_list(struct task_struct *curr)
curr             3996 kernel/futex.c 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
curr             4041 kernel/futex.c 			if (handle_futex_death(uaddr, curr, pi,
curr             4061 kernel/futex.c 		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
curr              567 kernel/gcov/fs.c 	char *curr;
curr              577 kernel/gcov/fs.c 	for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) {
curr              578 kernel/gcov/fs.c 		if (curr == next)
curr              581 kernel/gcov/fs.c 		if (strcmp(curr, ".") == 0)
curr              583 kernel/gcov/fs.c 		if (strcmp(curr, "..") == 0) {
curr              589 kernel/gcov/fs.c 		node = get_child_by_name(parent, curr);
curr              591 kernel/gcov/fs.c 			node = new_node(parent, NULL, curr);
curr              598 kernel/gcov/fs.c 	node = new_node(parent, info, curr);
curr             1620 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             1631 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr));
curr             1653 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             1681 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             2028 kernel/locking/lockdep.c print_bad_irq_dependency(struct task_struct *curr,
curr             2049 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr),
curr             2050 kernel/locking/lockdep.c 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
curr             2051 kernel/locking/lockdep.c 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
curr             2052 kernel/locking/lockdep.c 		curr->hardirqs_enabled,
curr             2053 kernel/locking/lockdep.c 		curr->softirqs_enabled);
curr             2082 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             2227 kernel/locking/lockdep.c static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
curr             2296 kernel/locking/lockdep.c 	print_bad_irq_dependency(curr, &this, &that,
curr             2319 kernel/locking/lockdep.c static inline int check_irq_usage(struct task_struct *curr,
curr             2352 kernel/locking/lockdep.c print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
curr             2364 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr));
curr             2371 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             2386 kernel/locking/lockdep.c check_deadlock(struct task_struct *curr, struct held_lock *next)
curr             2392 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             2393 kernel/locking/lockdep.c 		prev = curr->held_locks + i;
curr             2415 kernel/locking/lockdep.c 		print_deadlock_bug(curr, prev, next);
curr             2444 kernel/locking/lockdep.c check_prev_add(struct task_struct *curr, struct held_lock *prev,
curr             2483 kernel/locking/lockdep.c 	if (!check_irq_usage(curr, prev, next))
curr             2554 kernel/locking/lockdep.c check_prevs_add(struct task_struct *curr, struct held_lock *next)
curr             2557 kernel/locking/lockdep.c 	int depth = curr->lockdep_depth;
curr             2571 kernel/locking/lockdep.c 	if (curr->held_locks[depth].irq_context !=
curr             2572 kernel/locking/lockdep.c 			curr->held_locks[depth-1].irq_context)
curr             2576 kernel/locking/lockdep.c 		int distance = curr->lockdep_depth - depth + 1;
curr             2577 kernel/locking/lockdep.c 		hlock = curr->held_locks + depth - 1;
curr             2584 kernel/locking/lockdep.c 			int ret = check_prev_add(curr, hlock, next, distance,
curr             2608 kernel/locking/lockdep.c 		if (curr->held_locks[depth].irq_context !=
curr             2609 kernel/locking/lockdep.c 				curr->held_locks[depth-1].irq_context)
curr             2640 kernel/locking/lockdep.c static inline int get_first_held_lock(struct task_struct *curr,
curr             2646 kernel/locking/lockdep.c 	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
curr             2647 kernel/locking/lockdep.c 		hlock_curr = curr->held_locks + i;
curr             2671 kernel/locking/lockdep.c print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
curr             2675 kernel/locking/lockdep.c 	int depth = curr->lockdep_depth;
curr             2676 kernel/locking/lockdep.c 	int i = get_first_held_lock(curr, hlock_next);
curr             2681 kernel/locking/lockdep.c 		hlock = curr->held_locks + i;
curr             2707 kernel/locking/lockdep.c static void print_collision(struct task_struct *curr,
curr             2720 kernel/locking/lockdep.c 	print_chain_keys_held_locks(curr, hlock_next);
curr             2736 kernel/locking/lockdep.c static int check_no_collision(struct task_struct *curr,
curr             2743 kernel/locking/lockdep.c 	i = get_first_held_lock(curr, hlock);
curr             2745 kernel/locking/lockdep.c 	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
curr             2746 kernel/locking/lockdep.c 		print_collision(curr, hlock, chain);
curr             2751 kernel/locking/lockdep.c 		id = curr->held_locks[i].class_idx;
curr             2754 kernel/locking/lockdep.c 			print_collision(curr, hlock, chain);
curr             2796 kernel/locking/lockdep.c static inline int add_chain_cache(struct task_struct *curr,
curr             2824 kernel/locking/lockdep.c 	i = get_first_held_lock(curr, hlock);
curr             2825 kernel/locking/lockdep.c 	chain->depth = curr->lockdep_depth + 1 - i;
curr             2828 kernel/locking/lockdep.c 	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
curr             2834 kernel/locking/lockdep.c 			int lock_id = curr->held_locks[i].class_idx;
curr             2879 kernel/locking/lockdep.c static inline int lookup_chain_cache_add(struct task_struct *curr,
curr             2888 kernel/locking/lockdep.c 		if (!check_no_collision(curr, hlock, chain))
curr             2918 kernel/locking/lockdep.c 	if (!add_chain_cache(curr, hlock, chain_key))
curr             2924 kernel/locking/lockdep.c static int validate_chain(struct task_struct *curr,
curr             2939 kernel/locking/lockdep.c 	    lookup_chain_cache_add(curr, hlock, chain_key)) {
curr             2958 kernel/locking/lockdep.c 		int ret = check_deadlock(curr, hlock);
curr             2974 kernel/locking/lockdep.c 			if (!check_prevs_add(curr, hlock))
curr             2988 kernel/locking/lockdep.c static inline int validate_chain(struct task_struct *curr,
curr             3000 kernel/locking/lockdep.c static void check_chain_key(struct task_struct *curr)
curr             3007 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             3008 kernel/locking/lockdep.c 		hlock = curr->held_locks + i;
curr             3016 kernel/locking/lockdep.c 				curr->lockdep_depth, i,
curr             3035 kernel/locking/lockdep.c 	if (chain_key != curr->curr_chain_key) {
curr             3042 kernel/locking/lockdep.c 			curr->lockdep_depth, i,
curr             3044 kernel/locking/lockdep.c 			(unsigned long long)curr->curr_chain_key);
curr             3050 kernel/locking/lockdep.c static int mark_lock(struct task_struct *curr, struct held_lock *this,
curr             3071 kernel/locking/lockdep.c print_usage_bug(struct task_struct *curr, struct held_lock *this,
curr             3087 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr),
curr             3088 kernel/locking/lockdep.c 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
curr             3089 kernel/locking/lockdep.c 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
curr             3090 kernel/locking/lockdep.c 		trace_hardirqs_enabled(curr),
curr             3091 kernel/locking/lockdep.c 		trace_softirqs_enabled(curr));
curr             3097 kernel/locking/lockdep.c 	print_irqtrace_events(curr);
curr             3101 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             3111 kernel/locking/lockdep.c valid_state(struct task_struct *curr, struct held_lock *this,
curr             3115 kernel/locking/lockdep.c 		print_usage_bug(curr, this, bad_bit, new_bit);
curr             3126 kernel/locking/lockdep.c print_irq_inversion_bug(struct task_struct *curr,
curr             3144 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr));
curr             3173 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             3190 kernel/locking/lockdep.c check_usage_forwards(struct task_struct *curr, struct held_lock *this,
curr             3207 kernel/locking/lockdep.c 	print_irq_inversion_bug(curr, &root, target_entry,
curr             3217 kernel/locking/lockdep.c check_usage_backwards(struct task_struct *curr, struct held_lock *this,
curr             3234 kernel/locking/lockdep.c 	print_irq_inversion_bug(curr, &root, target_entry,
curr             3239 kernel/locking/lockdep.c void print_irqtrace_events(struct task_struct *curr)
curr             3241 kernel/locking/lockdep.c 	printk("irq event stamp: %u\n", curr->irq_events);
curr             3243 kernel/locking/lockdep.c 		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
curr             3244 kernel/locking/lockdep.c 		(void *)curr->hardirq_enable_ip);
curr             3246 kernel/locking/lockdep.c 		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
curr             3247 kernel/locking/lockdep.c 		(void *)curr->hardirq_disable_ip);
curr             3249 kernel/locking/lockdep.c 		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
curr             3250 kernel/locking/lockdep.c 		(void *)curr->softirq_enable_ip);
curr             3252 kernel/locking/lockdep.c 		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
curr             3253 kernel/locking/lockdep.c 		(void *)curr->softirq_disable_ip);
curr             3291 kernel/locking/lockdep.c mark_lock_irq(struct task_struct *curr, struct held_lock *this,
curr             3312 kernel/locking/lockdep.c 	if (!valid_state(curr, this, new_bit, excl_bit))
curr             3320 kernel/locking/lockdep.c 			!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
curr             3327 kernel/locking/lockdep.c 		if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
curr             3331 kernel/locking/lockdep.c 			!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
curr             3346 kernel/locking/lockdep.c mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
curr             3351 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             3353 kernel/locking/lockdep.c 		hlock = curr->held_locks + i;
curr             3363 kernel/locking/lockdep.c 		if (!mark_lock(curr, hlock, hlock_bit))
curr             3375 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             3378 kernel/locking/lockdep.c 	curr->hardirqs_enabled = 1;
curr             3384 kernel/locking/lockdep.c 	if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
curr             3391 kernel/locking/lockdep.c 	if (curr->softirqs_enabled)
curr             3392 kernel/locking/lockdep.c 		if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
curr             3395 kernel/locking/lockdep.c 	curr->hardirq_enable_ip = ip;
curr             3396 kernel/locking/lockdep.c 	curr->hardirq_enable_event = ++curr->irq_events;
curr             3447 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             3459 kernel/locking/lockdep.c 	if (curr->hardirqs_enabled) {
curr             3463 kernel/locking/lockdep.c 		curr->hardirqs_enabled = 0;
curr             3464 kernel/locking/lockdep.c 		curr->hardirq_disable_ip = ip;
curr             3465 kernel/locking/lockdep.c 		curr->hardirq_disable_event = ++curr->irq_events;
curr             3477 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             3489 kernel/locking/lockdep.c 	if (curr->softirqs_enabled) {
curr             3498 kernel/locking/lockdep.c 	curr->softirqs_enabled = 1;
curr             3499 kernel/locking/lockdep.c 	curr->softirq_enable_ip = ip;
curr             3500 kernel/locking/lockdep.c 	curr->softirq_enable_event = ++curr->irq_events;
curr             3507 kernel/locking/lockdep.c 	if (curr->hardirqs_enabled)
curr             3508 kernel/locking/lockdep.c 		mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
curr             3517 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             3528 kernel/locking/lockdep.c 	if (curr->softirqs_enabled) {
curr             3532 kernel/locking/lockdep.c 		curr->softirqs_enabled = 0;
curr             3533 kernel/locking/lockdep.c 		curr->softirq_disable_ip = ip;
curr             3534 kernel/locking/lockdep.c 		curr->softirq_disable_event = ++curr->irq_events;
curr             3545 kernel/locking/lockdep.c mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
curr             3556 kernel/locking/lockdep.c 			if (curr->hardirq_context)
curr             3557 kernel/locking/lockdep.c 				if (!mark_lock(curr, hlock,
curr             3560 kernel/locking/lockdep.c 			if (curr->softirq_context)
curr             3561 kernel/locking/lockdep.c 				if (!mark_lock(curr, hlock,
curr             3565 kernel/locking/lockdep.c 			if (curr->hardirq_context)
curr             3566 kernel/locking/lockdep.c 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
curr             3568 kernel/locking/lockdep.c 			if (curr->softirq_context)
curr             3569 kernel/locking/lockdep.c 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
curr             3575 kernel/locking/lockdep.c 			if (!mark_lock(curr, hlock,
curr             3578 kernel/locking/lockdep.c 			if (curr->softirqs_enabled)
curr             3579 kernel/locking/lockdep.c 				if (!mark_lock(curr, hlock,
curr             3583 kernel/locking/lockdep.c 			if (!mark_lock(curr, hlock,
curr             3586 kernel/locking/lockdep.c 			if (curr->softirqs_enabled)
curr             3587 kernel/locking/lockdep.c 				if (!mark_lock(curr, hlock,
curr             3595 kernel/locking/lockdep.c 	if (!mark_lock(curr, hlock, LOCK_USED))
curr             3606 kernel/locking/lockdep.c static int separate_irq_context(struct task_struct *curr,
curr             3609 kernel/locking/lockdep.c 	unsigned int depth = curr->lockdep_depth;
curr             3617 kernel/locking/lockdep.c 		prev_hlock = curr->held_locks + depth-1;
curr             3632 kernel/locking/lockdep.c static int mark_lock(struct task_struct *curr, struct held_lock *this,
curr             3669 kernel/locking/lockdep.c 		ret = mark_lock_irq(curr, this, new_bit);
curr             3682 kernel/locking/lockdep.c 		print_irqtrace_events(curr);
curr             3692 kernel/locking/lockdep.c mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
curr             3702 kernel/locking/lockdep.c static inline int separate_irq_context(struct task_struct *curr,
curr             3774 kernel/locking/lockdep.c print_lock_nested_lock_not_held(struct task_struct *curr,
curr             3789 kernel/locking/lockdep.c 	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
curr             3799 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             3820 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             3860 kernel/locking/lockdep.c 	depth = curr->lockdep_depth;
curr             3870 kernel/locking/lockdep.c 		hlock = curr->held_locks + depth - 1;
curr             3888 kernel/locking/lockdep.c 	hlock = curr->held_locks + depth;
curr             3899 kernel/locking/lockdep.c 	hlock->irq_context = task_irq_context(curr);
curr             3912 kernel/locking/lockdep.c 	if (!mark_usage(curr, hlock, check))
curr             3931 kernel/locking/lockdep.c 	chain_key = curr->curr_chain_key;
curr             3942 kernel/locking/lockdep.c 	if (separate_irq_context(curr, hlock)) {
curr             3949 kernel/locking/lockdep.c 		print_lock_nested_lock_not_held(curr, hlock, ip);
curr             3958 kernel/locking/lockdep.c 	if (!validate_chain(curr, hlock, chain_head, chain_key))
curr             3961 kernel/locking/lockdep.c 	curr->curr_chain_key = chain_key;
curr             3962 kernel/locking/lockdep.c 	curr->lockdep_depth++;
curr             3963 kernel/locking/lockdep.c 	check_chain_key(curr);
curr             3968 kernel/locking/lockdep.c 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
curr             3972 kernel/locking/lockdep.c 		       curr->lockdep_depth, MAX_LOCK_DEPTH);
curr             3981 kernel/locking/lockdep.c 	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
curr             3982 kernel/locking/lockdep.c 		max_lockdep_depth = curr->lockdep_depth;
curr             3987 kernel/locking/lockdep.c static void print_unlock_imbalance_bug(struct task_struct *curr,
curr             4002 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr));
curr             4008 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             4051 kernel/locking/lockdep.c static struct held_lock *find_held_lock(struct task_struct *curr,
curr             4059 kernel/locking/lockdep.c 	hlock = curr->held_locks + i;
curr             4086 kernel/locking/lockdep.c static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
curr             4095 kernel/locking/lockdep.c 	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
curr             4123 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4132 kernel/locking/lockdep.c 	depth = curr->lockdep_depth;
curr             4140 kernel/locking/lockdep.c 	hlock = find_held_lock(curr, lock, depth, &i);
curr             4142 kernel/locking/lockdep.c 		print_unlock_imbalance_bug(curr, lock, ip);
curr             4150 kernel/locking/lockdep.c 	curr->lockdep_depth = i;
curr             4151 kernel/locking/lockdep.c 	curr->curr_chain_key = hlock->prev_chain_key;
curr             4153 kernel/locking/lockdep.c 	if (reacquire_held_locks(curr, depth, i, &merged))
curr             4160 kernel/locking/lockdep.c 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
curr             4167 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4175 kernel/locking/lockdep.c 	depth = curr->lockdep_depth;
curr             4183 kernel/locking/lockdep.c 	hlock = find_held_lock(curr, lock, depth, &i);
curr             4185 kernel/locking/lockdep.c 		print_unlock_imbalance_bug(curr, lock, ip);
curr             4189 kernel/locking/lockdep.c 	curr->lockdep_depth = i;
curr             4190 kernel/locking/lockdep.c 	curr->curr_chain_key = hlock->prev_chain_key;
curr             4196 kernel/locking/lockdep.c 	if (reacquire_held_locks(curr, depth, i, &merged))
curr             4207 kernel/locking/lockdep.c 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
curr             4223 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4231 kernel/locking/lockdep.c 	depth = curr->lockdep_depth;
curr             4237 kernel/locking/lockdep.c 		print_unlock_imbalance_bug(curr, lock, ip);
curr             4245 kernel/locking/lockdep.c 	hlock = find_held_lock(curr, lock, depth, &i);
curr             4247 kernel/locking/lockdep.c 		print_unlock_imbalance_bug(curr, lock, ip);
curr             4274 kernel/locking/lockdep.c 	curr->lockdep_depth = i;
curr             4275 kernel/locking/lockdep.c 	curr->curr_chain_key = hlock->prev_chain_key;
curr             4284 kernel/locking/lockdep.c 	if (reacquire_held_locks(curr, depth, i + 1, &merged))
curr             4292 kernel/locking/lockdep.c 	DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
curr             4305 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4308 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             4309 kernel/locking/lockdep.c 		struct held_lock *hlock = curr->held_locks + i;
curr             4325 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4331 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             4332 kernel/locking/lockdep.c 		struct held_lock *hlock = curr->held_locks + i;
curr             4352 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4358 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             4359 kernel/locking/lockdep.c 		struct held_lock *hlock = curr->held_locks + i;
curr             4372 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4378 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             4379 kernel/locking/lockdep.c 		struct held_lock *hlock = curr->held_locks + i;
curr             4592 kernel/locking/lockdep.c static void print_lock_contention_bug(struct task_struct *curr,
curr             4607 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr));
curr             4613 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             4622 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4628 kernel/locking/lockdep.c 	depth = curr->lockdep_depth;
curr             4636 kernel/locking/lockdep.c 	hlock = find_held_lock(curr, lock, depth, &i);
curr             4638 kernel/locking/lockdep.c 		print_lock_contention_bug(curr, lock, ip);
curr             4663 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             4670 kernel/locking/lockdep.c 	depth = curr->lockdep_depth;
curr             4678 kernel/locking/lockdep.c 	hlock = find_held_lock(curr, lock, depth, &i);
curr             4680 kernel/locking/lockdep.c 		print_lock_contention_bug(curr, lock, _RET_IP_);
curr             5249 kernel/locking/lockdep.c print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
curr             5263 kernel/locking/lockdep.c 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
curr             5265 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr             5285 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             5294 kernel/locking/lockdep.c 	for (i = 0; i < curr->lockdep_depth; i++) {
curr             5295 kernel/locking/lockdep.c 		hlock = curr->held_locks + i;
curr             5301 kernel/locking/lockdep.c 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
curr             5376 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             5378 kernel/locking/lockdep.c 	if (unlikely(curr->lockdep_depth)) {
curr             5387 kernel/locking/lockdep.c 				curr->comm, curr->pid);
curr             5388 kernel/locking/lockdep.c 		lockdep_print_held_locks(curr);
curr             5400 kernel/locking/lockdep.c 	struct task_struct *curr = current;
curr             5439 kernel/locking/lockdep.c 	lockdep_print_held_locks(curr);
curr              109 kernel/locking/mutex.c 	unsigned long owner, curr = (unsigned long)current;
curr              117 kernel/locking/mutex.c 			if (likely(task != curr))
curr              137 kernel/locking/mutex.c 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
curr              168 kernel/locking/mutex.c 	unsigned long curr = (unsigned long)current;
curr              171 kernel/locking/mutex.c 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
curr              179 kernel/locking/mutex.c 	unsigned long curr = (unsigned long)current;
curr              181 kernel/locking/mutex.c 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
curr               47 kernel/locking/osq_lock.c 	int curr = encode_cpu(smp_processor_id());
curr               58 kernel/locking/osq_lock.c 		if (atomic_read(&lock->tail) == curr &&
curr               59 kernel/locking/osq_lock.c 		    atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
curr               94 kernel/locking/osq_lock.c 	int curr = encode_cpu(smp_processor_id());
curr               99 kernel/locking/osq_lock.c 	node->cpu = curr;
curr              107 kernel/locking/osq_lock.c 	old = atomic_xchg(&lock->tail, curr);
curr              209 kernel/locking/osq_lock.c 	int curr = encode_cpu(smp_processor_id());
curr              214 kernel/locking/osq_lock.c 	if (likely(atomic_cmpxchg_release(&lock->tail, curr,
curr              215 kernel/locking/osq_lock.c 					  OSQ_UNLOCKED_VAL) == curr))
curr              192 kernel/power/process.c 	struct task_struct *curr = current;
curr              212 kernel/power/process.c 		WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
curr              217 kernel/power/process.c 	WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
curr              218 kernel/power/process.c 	curr->flags &= ~PF_SUSPEND_TASK;
curr              710 kernel/power/snapshot.c 	struct mem_zone_bm_rtree *curr, *zone;
curr              722 kernel/power/snapshot.c 	list_for_each_entry(curr, &bm->zones, list) {
curr              723 kernel/power/snapshot.c 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
curr              724 kernel/power/snapshot.c 			zone = curr;
curr              246 kernel/sched/core.c 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
curr              509 kernel/sched/core.c 	struct task_struct *curr = rq->curr;
curr              514 kernel/sched/core.c 	if (test_tsk_need_resched(curr))
curr              520 kernel/sched/core.c 		set_tsk_need_resched(curr);
curr              525 kernel/sched/core.c 	if (set_nr_and_not_polling(curr))
curr             1416 kernel/sched/core.c 	if (p->sched_class == rq->curr->sched_class) {
curr             1417 kernel/sched/core.c 		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
curr             1420 kernel/sched/core.c 			if (class == rq->curr->sched_class)
curr             1433 kernel/sched/core.c 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
curr             2367 kernel/sched/core.c 	if (!is_idle_task(rcu_dereference(rq->curr)))
curr             2374 kernel/sched/core.c 		if (is_idle_task(rq->curr))
curr             3020 kernel/sched/core.c static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
curr             3024 kernel/sched/core.c 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
curr             3028 kernel/sched/core.c static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
curr             3031 kernel/sched/core.c 		__fire_sched_in_preempt_notifiers(curr);
curr             3035 kernel/sched/core.c __fire_sched_out_preempt_notifiers(struct task_struct *curr,
curr             3040 kernel/sched/core.c 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
curr             3045 kernel/sched/core.c fire_sched_out_preempt_notifiers(struct task_struct *curr,
curr             3049 kernel/sched/core.c 		__fire_sched_out_preempt_notifiers(curr, next);
curr             3054 kernel/sched/core.c static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
curr             3059 kernel/sched/core.c fire_sched_out_preempt_notifiers(struct task_struct *curr,
curr             3529 kernel/sched/core.c 	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
curr             3531 kernel/sched/core.c 	struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
curr             3533 kernel/sched/core.c 	prefetch(curr);
curr             3534 kernel/sched/core.c 	prefetch(&curr->exec_start);
curr             3589 kernel/sched/core.c 	struct task_struct *curr = rq->curr;
curr             3597 kernel/sched/core.c 	curr->sched_class->task_tick(rq, curr, 0);
curr             3654 kernel/sched/core.c 	struct task_struct *curr;
curr             3670 kernel/sched/core.c 	curr = rq->curr;
curr             3676 kernel/sched/core.c 	if (!is_idle_task(curr)) {
curr             3681 kernel/sched/core.c 		delta = rq_clock_task(rq) - curr->se.exec_start;
curr             3684 kernel/sched/core.c 	curr->sched_class->task_tick(rq, curr, 0);
curr             4007 kernel/sched/core.c 	prev = rq->curr;
curr             4057 kernel/sched/core.c 		RCU_INIT_POINTER(rq->curr, next);
curr             4335 kernel/sched/core.c int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
curr             4338 kernel/sched/core.c 	return try_to_wake_up(curr->private, mode, wake_flags);
curr             4420 kernel/sched/core.c 		WARN_ON(p != rq->curr);
curr             4620 kernel/sched/core.c 	if (rq->curr != rq->idle)
curr             5694 kernel/sched/core.c 	struct task_struct *curr = current;
curr             5719 kernel/sched/core.c 	if (!curr->sched_class->yield_to_task)
curr             5722 kernel/sched/core.c 	if (curr->sched_class != p->sched_class)
curr             5728 kernel/sched/core.c 	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
curr             6056 kernel/sched/core.c 	rcu_assign_pointer(rq->curr, idle);
curr               50 kernel/sched/cputime.c void irqtime_account_irq(struct task_struct *curr)
curr               71 kernel/sched/cputime.c 	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
curr              444 kernel/sched/cputime.c void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
curr              447 kernel/sched/cputime.c 	*ut = curr->utime;
curr              448 kernel/sched/cputime.c 	*st = curr->stime;
curr              591 kernel/sched/cputime.c void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
curr              599 kernel/sched/cputime.c 	rtime = curr->sum_exec_runtime;
curr              612 kernel/sched/cputime.c 	stime = curr->stime;
curr              613 kernel/sched/cputime.c 	utime = curr->utime;
curr              362 kernel/sched/deadline.c 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
curr             1052 kernel/sched/deadline.c 	if (dl_task(rq->curr))
curr             1181 kernel/sched/deadline.c 	struct task_struct *curr = rq->curr;
curr             1182 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &curr->dl;
curr             1187 kernel/sched/deadline.c 	if (!dl_task(curr) || !on_dl_rq(dl_se))
curr             1199 kernel/sched/deadline.c 	delta_exec = now - curr->se.exec_start;
curr             1206 kernel/sched/deadline.c 	schedstat_set(curr->se.statistics.exec_max,
curr             1207 kernel/sched/deadline.c 		      max(curr->se.statistics.exec_max, delta_exec));
curr             1209 kernel/sched/deadline.c 	curr->se.sum_exec_runtime += delta_exec;
curr             1210 kernel/sched/deadline.c 	account_group_exec_runtime(curr, delta_exec);
curr             1212 kernel/sched/deadline.c 	curr->se.exec_start = now;
curr             1213 kernel/sched/deadline.c 	cgroup_account_cputime(curr, delta_exec);
curr             1228 kernel/sched/deadline.c 						 &curr->dl);
curr             1248 kernel/sched/deadline.c 		__dequeue_task_dl(rq, curr, 0);
curr             1249 kernel/sched/deadline.c 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
curr             1250 kernel/sched/deadline.c 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
curr             1252 kernel/sched/deadline.c 		if (!is_leftmost(curr, &rq->dl))
curr             1338 kernel/sched/deadline.c 	if (dl_rq->earliest_dl.curr == 0 ||
curr             1339 kernel/sched/deadline.c 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
curr             1340 kernel/sched/deadline.c 		dl_rq->earliest_dl.curr = deadline;
curr             1354 kernel/sched/deadline.c 		dl_rq->earliest_dl.curr = 0;
curr             1362 kernel/sched/deadline.c 		dl_rq->earliest_dl.curr = entry->deadline;
curr             1585 kernel/sched/deadline.c 	rq->curr->dl.dl_yielded = 1;
curr             1604 kernel/sched/deadline.c 	struct task_struct *curr;
curr             1613 kernel/sched/deadline.c 	curr = READ_ONCE(rq->curr); /* unlocked access */
curr             1624 kernel/sched/deadline.c 	if (unlikely(dl_task(curr)) &&
curr             1625 kernel/sched/deadline.c 	    (curr->nr_cpus_allowed < 2 ||
curr             1626 kernel/sched/deadline.c 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
curr             1632 kernel/sched/deadline.c 					cpu_rq(target)->dl.earliest_dl.curr) ||
curr             1679 kernel/sched/deadline.c 	if (rq->curr->nr_cpus_allowed == 1 ||
curr             1680 kernel/sched/deadline.c 	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
curr             1719 kernel/sched/deadline.c 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
curr             1729 kernel/sched/deadline.c 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
curr             1730 kernel/sched/deadline.c 	    !test_tsk_need_resched(rq->curr))
curr             1759 kernel/sched/deadline.c 	if (rq->curr->sched_class != &dl_sched_class)
curr             1980 kernel/sched/deadline.c 					later_rq->dl.earliest_dl.curr)) {
curr             2010 kernel/sched/deadline.c 				   later_rq->dl.earliest_dl.curr))
curr             2060 kernel/sched/deadline.c 	if (WARN_ON(next_task == rq->curr))
curr             2068 kernel/sched/deadline.c 	if (dl_task(rq->curr) &&
curr             2069 kernel/sched/deadline.c 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
curr             2070 kernel/sched/deadline.c 	    rq->curr->nr_cpus_allowed > 1) {
curr             2162 kernel/sched/deadline.c 		    dl_time_before(this_rq->dl.earliest_dl.curr,
curr             2186 kernel/sched/deadline.c 				    this_rq->dl.earliest_dl.curr))) {
curr             2187 kernel/sched/deadline.c 			WARN_ON(p == src_rq->curr);
curr             2195 kernel/sched/deadline.c 					   src_rq->curr->dl.deadline))
curr             2222 kernel/sched/deadline.c 	    !test_tsk_need_resched(rq->curr) &&
curr             2224 kernel/sched/deadline.c 	    dl_task(rq->curr) &&
curr             2225 kernel/sched/deadline.c 	    (rq->curr->nr_cpus_allowed < 2 ||
curr             2226 kernel/sched/deadline.c 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
curr             2272 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
curr             2386 kernel/sched/deadline.c 	if (rq->curr != p) {
curr             2391 kernel/sched/deadline.c 		if (dl_task(rq->curr))
curr             2405 kernel/sched/deadline.c 	if (task_on_rq_queued(p) || rq->curr == p) {
curr             2421 kernel/sched/deadline.c 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
curr              436 kernel/sched/debug.c 	if (rq->curr == p)
curr              646 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
curr              532 kernel/sched/fair.c 	struct sched_entity *curr = cfs_rq->curr;
curr              537 kernel/sched/fair.c 	if (curr) {
curr              538 kernel/sched/fair.c 		if (curr->on_rq)
curr              539 kernel/sched/fair.c 			vruntime = curr->vruntime;
curr              541 kernel/sched/fair.c 			curr = NULL;
curr              548 kernel/sched/fair.c 		if (!curr)
curr              835 kernel/sched/fair.c 	struct sched_entity *curr = cfs_rq->curr;
curr              839 kernel/sched/fair.c 	if (unlikely(!curr))
curr              842 kernel/sched/fair.c 	delta_exec = now - curr->exec_start;
curr              846 kernel/sched/fair.c 	curr->exec_start = now;
curr              848 kernel/sched/fair.c 	schedstat_set(curr->statistics.exec_max,
curr              849 kernel/sched/fair.c 		      max(delta_exec, curr->statistics.exec_max));
curr              851 kernel/sched/fair.c 	curr->sum_exec_runtime += delta_exec;
curr              854 kernel/sched/fair.c 	curr->vruntime += calc_delta_fair(delta_exec, curr);
curr              857 kernel/sched/fair.c 	if (entity_is_task(curr)) {
curr              858 kernel/sched/fair.c 		struct task_struct *curtask = task_of(curr);
curr              860 kernel/sched/fair.c 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
curr              870 kernel/sched/fair.c 	update_curr(cfs_rq_of(&rq->curr->se));
curr             1004 kernel/sched/fair.c 	if (se != cfs_rq->curr)
curr             1022 kernel/sched/fair.c 	if (se != cfs_rq->curr)
curr             1605 kernel/sched/fair.c 	cur = rcu_dereference(dst_rq->curr);
curr             2280 kernel/sched/fair.c 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
curr             2673 kernel/sched/fair.c static void task_tick_numa(struct rq *rq, struct task_struct *curr)
curr             2675 kernel/sched/fair.c 	struct callback_head *work = &curr->numa_work;
curr             2681 kernel/sched/fair.c 	if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
curr             2690 kernel/sched/fair.c 	now = curr->se.sum_exec_runtime;
curr             2691 kernel/sched/fair.c 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
curr             2693 kernel/sched/fair.c 	if (now > curr->node_stamp + period) {
curr             2694 kernel/sched/fair.c 		if (!curr->node_stamp)
curr             2695 kernel/sched/fair.c 			curr->numa_scan_period = task_scan_start(curr);
curr             2696 kernel/sched/fair.c 		curr->node_stamp += period;
curr             2698 kernel/sched/fair.c 		if (!time_before(jiffies, curr->mm->numa_next_scan))
curr             2699 kernel/sched/fair.c 			task_work_add(curr, work, true);
curr             2738 kernel/sched/fair.c static void task_tick_numa(struct rq *rq, struct task_struct *curr)
curr             2881 kernel/sched/fair.c 		if (cfs_rq->curr == se)
curr             3966 kernel/sched/fair.c 	bool curr = cfs_rq->curr == se;
curr             3972 kernel/sched/fair.c 	if (renorm && curr)
curr             3983 kernel/sched/fair.c 	if (renorm && !curr)
curr             4005 kernel/sched/fair.c 	if (!curr)
curr             4091 kernel/sched/fair.c 	if (se != cfs_rq->curr)
curr             4124 kernel/sched/fair.c check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
curr             4130 kernel/sched/fair.c 	ideal_runtime = sched_slice(cfs_rq, curr);
curr             4131 kernel/sched/fair.c 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
curr             4138 kernel/sched/fair.c 		clear_buddies(cfs_rq, curr);
curr             4151 kernel/sched/fair.c 	delta = curr->vruntime - se->vruntime;
curr             4176 kernel/sched/fair.c 	cfs_rq->curr = se;
curr             4194 kernel/sched/fair.c wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
curr             4204 kernel/sched/fair.c pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
curr             4213 kernel/sched/fair.c 	if (!left || (curr && entity_before(curr, left)))
curr             4214 kernel/sched/fair.c 		left = curr;
curr             4225 kernel/sched/fair.c 		if (se == curr) {
curr             4229 kernel/sched/fair.c 			if (!second || (curr && entity_before(curr, second)))
curr             4230 kernel/sched/fair.c 				second = curr;
curr             4277 kernel/sched/fair.c 	cfs_rq->curr = NULL;
curr             4281 kernel/sched/fair.c entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
curr             4291 kernel/sched/fair.c 	update_load_avg(cfs_rq, curr, UPDATE_TG);
curr             4292 kernel/sched/fair.c 	update_cfs_group(curr);
curr             4312 kernel/sched/fair.c 		check_preempt_tick(cfs_rq, curr);
curr             4424 kernel/sched/fair.c 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
curr             4616 kernel/sched/fair.c 	if (rq->curr == rq->idle && rq->cfs.nr_running)
curr             4868 kernel/sched/fair.c 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
curr             5148 kernel/sched/fair.c 			if (rq->curr == p)
curr             5163 kernel/sched/fair.c 	struct task_struct *curr = rq->curr;
curr             5165 kernel/sched/fair.c 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
curr             5168 kernel/sched/fair.c 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
curr             5169 kernel/sched/fair.c 		hrtick_start_fair(rq, curr);
curr             6645 kernel/sched/fair.c wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
curr             6647 kernel/sched/fair.c 	s64 gran, vdiff = curr->vruntime - se->vruntime;
curr             6694 kernel/sched/fair.c 	struct task_struct *curr = rq->curr;
curr             6695 kernel/sched/fair.c 	struct sched_entity *se = &curr->se, *pse = &p->se;
curr             6696 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
curr             6727 kernel/sched/fair.c 	if (test_tsk_need_resched(curr))
curr             6731 kernel/sched/fair.c 	if (unlikely(task_has_idle_policy(curr)) &&
curr             6768 kernel/sched/fair.c 	if (unlikely(!se->on_rq || curr == rq->idle))
curr             6800 kernel/sched/fair.c 		struct sched_entity *curr = cfs_rq->curr;
curr             6808 kernel/sched/fair.c 		if (curr) {
curr             6809 kernel/sched/fair.c 			if (curr->on_rq)
curr             6812 kernel/sched/fair.c 				curr = NULL;
curr             6830 kernel/sched/fair.c 		se = pick_next_entity(cfs_rq, curr);
curr             6940 kernel/sched/fair.c 	struct task_struct *curr = rq->curr;
curr             6941 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
curr             6942 kernel/sched/fair.c 	struct sched_entity *se = &curr->se;
curr             6952 kernel/sched/fair.c 	if (curr->policy != SCHED_BATCH) {
curr             7556 kernel/sched/fair.c 	curr_class = rq->curr->sched_class;
curr             9012 kernel/sched/fair.c 			if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
curr             9979 kernel/sched/fair.c static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
curr             9982 kernel/sched/fair.c 	struct sched_entity *se = &curr->se;
curr             9990 kernel/sched/fair.c 		task_tick_numa(rq, curr);
curr             9992 kernel/sched/fair.c 	update_misfit_status(curr, rq);
curr             9993 kernel/sched/fair.c 	update_overutilized_status(task_rq(curr));
curr             10004 kernel/sched/fair.c 	struct sched_entity *se = &p->se, *curr;
curr             10012 kernel/sched/fair.c 	curr = cfs_rq->curr;
curr             10013 kernel/sched/fair.c 	if (curr) {
curr             10015 kernel/sched/fair.c 		se->vruntime = curr->vruntime;
curr             10019 kernel/sched/fair.c 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
curr             10024 kernel/sched/fair.c 		swap(curr->vruntime, se->vruntime);
curr             10047 kernel/sched/fair.c 	if (rq->curr == p) {
curr             10180 kernel/sched/fair.c 		if (rq->curr == p)
curr              428 kernel/sched/idle.c static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
curr              108 kernel/sched/membarrier.c 		p = rcu_dereference(cpu_rq(cpu)->curr);
curr              177 kernel/sched/membarrier.c 		p = rcu_dereference(cpu_rq(cpu)->curr);
curr              243 kernel/sched/membarrier.c 		p = rcu_dereference(rq->curr);
curr              280 kernel/sched/pelt.c 				cfs_rq->curr == se)) {
curr              296 kernel/sched/pelt.c 				cfs_rq->curr != NULL)) {
curr               60 kernel/sched/pelt.h 	if (unlikely(is_idle_task(rq->curr))) {
curr               90 kernel/sched/rt.c 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
curr              162 kernel/sched/rt.c 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
curr              266 kernel/sched/rt.c 	return rq->rt.highest_prio.curr > prev->prio;
curr              488 kernel/sched/rt.c 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
curr              502 kernel/sched/rt.c 		if (rt_rq->highest_prio.curr < curr->prio)
curr              871 kernel/sched/rt.c 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
curr              902 kernel/sched/rt.c 		return rt_rq->highest_prio.curr;
curr              957 kernel/sched/rt.c 	struct task_struct *curr = rq->curr;
curr              958 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &curr->rt;
curr              962 kernel/sched/rt.c 	if (curr->sched_class != &rt_sched_class)
curr              966 kernel/sched/rt.c 	delta_exec = now - curr->se.exec_start;
curr              970 kernel/sched/rt.c 	schedstat_set(curr->se.statistics.exec_max,
curr              971 kernel/sched/rt.c 		      max(curr->se.statistics.exec_max, delta_exec));
curr              973 kernel/sched/rt.c 	curr->se.sum_exec_runtime += delta_exec;
curr              974 kernel/sched/rt.c 	account_group_exec_runtime(curr, delta_exec);
curr              976 kernel/sched/rt.c 	curr->se.exec_start = now;
curr              977 kernel/sched/rt.c 	cgroup_account_cputime(curr, delta_exec);
curr             1064 kernel/sched/rt.c 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
curr             1065 kernel/sched/rt.c 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
curr             1081 kernel/sched/rt.c 	int prev_prio = rt_rq->highest_prio.curr;
curr             1084 kernel/sched/rt.c 		rt_rq->highest_prio.curr = prio;
curr             1092 kernel/sched/rt.c 	int prev_prio = rt_rq->highest_prio.curr;
curr             1105 kernel/sched/rt.c 			rt_rq->highest_prio.curr =
curr             1110 kernel/sched/rt.c 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
curr             1383 kernel/sched/rt.c 	requeue_task_rt(rq, rq->curr, 0);
curr             1392 kernel/sched/rt.c 	struct task_struct *curr;
curr             1402 kernel/sched/rt.c 	curr = READ_ONCE(rq->curr); /* unlocked access */
curr             1426 kernel/sched/rt.c 	if (curr && unlikely(rt_task(curr)) &&
curr             1427 kernel/sched/rt.c 	    (curr->nr_cpus_allowed < 2 ||
curr             1428 kernel/sched/rt.c 	     curr->prio <= p->prio)) {
curr             1436 kernel/sched/rt.c 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
curr             1451 kernel/sched/rt.c 	if (rq->curr->nr_cpus_allowed == 1 ||
curr             1452 kernel/sched/rt.c 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
curr             1495 kernel/sched/rt.c 	if (p->prio < rq->curr->prio) {
curr             1513 kernel/sched/rt.c 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
curr             1533 kernel/sched/rt.c 	if (rq->curr->sched_class != &rt_sched_class)
curr             1725 kernel/sched/rt.c 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
curr             1756 kernel/sched/rt.c 		if (lowest_rq->rt.highest_prio.curr > task->prio)
curr             1806 kernel/sched/rt.c 	if (WARN_ON(next_task == rq->curr))
curr             1814 kernel/sched/rt.c 	if (unlikely(next_task->prio < rq->curr->prio)) {
curr             2092 kernel/sched/rt.c 		    this_rq->rt.highest_prio.curr)
curr             2112 kernel/sched/rt.c 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
curr             2113 kernel/sched/rt.c 			WARN_ON(p == src_rq->curr);
curr             2124 kernel/sched/rt.c 			if (p->prio < src_rq->curr->prio)
curr             2154 kernel/sched/rt.c 	    !test_tsk_need_resched(rq->curr) &&
curr             2156 kernel/sched/rt.c 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
curr             2157 kernel/sched/rt.c 	    (rq->curr->nr_cpus_allowed < 2 ||
curr             2158 kernel/sched/rt.c 	     rq->curr->prio <= p->prio))
curr             2170 kernel/sched/rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
curr             2228 kernel/sched/rt.c 	if (task_on_rq_queued(p) && rq->curr != p) {
curr             2233 kernel/sched/rt.c 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
curr             2248 kernel/sched/rt.c 	if (rq->curr == p) {
curr             2261 kernel/sched/rt.c 		if (p->prio > rq->rt.highest_prio.curr)
curr             2274 kernel/sched/rt.c 		if (p->prio < rq->curr->prio)
curr              515 kernel/sched/sched.h 	struct sched_entity	*curr;
curr              603 kernel/sched/sched.h 		int		curr; /* highest queued rt task prio */
curr              652 kernel/sched/sched.h 		u64		curr;
curr              905 kernel/sched/sched.h 	struct task_struct	*curr;
curr             1054 kernel/sched/sched.h #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
curr             1625 kernel/sched/sched.h 	return rq->curr == p;
curr             1782 kernel/sched/sched.h 	WARN_ON_ONCE(rq->curr != prev);
curr             1788 kernel/sched/sched.h 	WARN_ON_ONCE(rq->curr != next);
curr              134 kernel/sched/stats.h 	if (unlikely(rq->curr->flags & PF_MEMSTALL))
curr              135 kernel/sched/stats.h 		psi_memstall_tick(rq->curr, cpu_of(rq));
curr               68 kernel/sched/stop_task.c 	struct task_struct *curr = rq->curr;
curr               71 kernel/sched/stop_task.c 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
curr               75 kernel/sched/stop_task.c 	schedstat_set(curr->se.statistics.exec_max,
curr               76 kernel/sched/stop_task.c 			max(curr->se.statistics.exec_max, delta_exec));
curr               78 kernel/sched/stop_task.c 	curr->se.sum_exec_runtime += delta_exec;
curr               79 kernel/sched/stop_task.c 	account_group_exec_runtime(curr, delta_exec);
curr               81 kernel/sched/stop_task.c 	curr->se.exec_start = rq_clock_task(rq);
curr               82 kernel/sched/stop_task.c 	cgroup_account_cputime(curr, delta_exec);
curr               93 kernel/sched/stop_task.c static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
curr               24 kernel/sched/swait.c 	struct swait_queue *curr;
curr               29 kernel/sched/swait.c 	curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
curr               30 kernel/sched/swait.c 	wake_up_process(curr->task);
curr               31 kernel/sched/swait.c 	list_del_init(&curr->task_list);
curr               51 kernel/sched/swait.c 	struct swait_queue *curr;
curr               57 kernel/sched/swait.c 		curr = list_first_entry(&tmp, typeof(*curr), task_list);
curr               59 kernel/sched/swait.c 		wake_up_state(curr->task, TASK_NORMAL);
curr               60 kernel/sched/swait.c 		list_del_init(&curr->task_list);
curr               70 kernel/sched/wait.c 	wait_queue_entry_t *curr, *next;
curr               76 kernel/sched/wait.c 		curr = list_next_entry(bookmark, entry);
curr               81 kernel/sched/wait.c 		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
curr               83 kernel/sched/wait.c 	if (&curr->entry == &wq_head->head)
curr               86 kernel/sched/wait.c 	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
curr               87 kernel/sched/wait.c 		unsigned flags = curr->flags;
curr               93 kernel/sched/wait.c 		ret = curr->func(curr, mode, wake_flags, key);
curr             1149 kernel/sys.c   	struct task_struct *curr = current->group_leader;
curr             1151 kernel/sys.c   	if (task_session(curr) != pid)
curr             1152 kernel/sys.c   		change_pid(curr, PIDTYPE_SID, pid);
curr             1154 kernel/sys.c   	if (task_pgrp(curr) != pid)
curr             1155 kernel/sys.c   		change_pid(curr, PIDTYPE_PGID, pid);
curr               78 kernel/time/timer_list.c 	struct timerqueue_node *curr;
curr               88 kernel/time/timer_list.c 	curr = timerqueue_getnext(&base->active);
curr               93 kernel/time/timer_list.c 	while (curr && i < next) {
curr               94 kernel/time/timer_list.c 		curr = timerqueue_iterate_next(curr);
curr               98 kernel/time/timer_list.c 	if (curr) {
curr              100 kernel/time/timer_list.c 		timer = container_of(curr, struct hrtimer, node);
curr              421 kernel/trace/trace_functions_graph.c 		struct ftrace_graph_ent_entry *curr)
curr              433 kernel/trace/trace_functions_graph.c 		curr = &data->ent;
curr              463 kernel/trace/trace_functions_graph.c 			data->ent = *curr;
curr              479 kernel/trace/trace_functions_graph.c 	if (curr->ent.pid != next->ent.pid ||
curr              480 kernel/trace/trace_functions_graph.c 			curr->graph_ent.func != next->ret.func)
curr              405 kernel/trace/trace_sched_wakeup.c 			   struct task_struct *curr,
curr              418 kernel/trace/trace_sched_wakeup.c 	entry->prev_pid			= curr->pid;
curr              419 kernel/trace/trace_sched_wakeup.c 	entry->prev_prio		= curr->prio;
curr              420 kernel/trace/trace_sched_wakeup.c 	entry->prev_state		= task_state_index(curr);
curr              137 lib/dim/net_dim.c static int net_dim_stats_compare(struct dim_stats *curr,
curr              141 lib/dim/net_dim.c 		return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
curr              143 lib/dim/net_dim.c 	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
curr              144 lib/dim/net_dim.c 		return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
curr              148 lib/dim/net_dim.c 		return curr->ppms ? DIM_STATS_BETTER :
curr              151 lib/dim/net_dim.c 	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
curr              152 lib/dim/net_dim.c 		return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
curr              158 lib/dim/net_dim.c 	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
curr              159 lib/dim/net_dim.c 		return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
curr               26 lib/dim/rdma_dim.c static int rdma_dim_stats_compare(struct dim_stats *curr,
curr               33 lib/dim/rdma_dim.c 	if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
curr               34 lib/dim/rdma_dim.c 		return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
curr               37 lib/dim/rdma_dim.c 	if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
curr               38 lib/dim/rdma_dim.c 		return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
curr               27 lib/sg_split.c 	struct sg_splitter *curr = splitters;
curr               43 lib/sg_split.c 		if (!curr->in_sg0) {
curr               44 lib/sg_split.c 			curr->in_sg0 = sg;
curr               45 lib/sg_split.c 			curr->skip_sg0 = skip;
curr               48 lib/sg_split.c 		curr->nents++;
curr               49 lib/sg_split.c 		curr->length_last_sg = len;
curr               52 lib/sg_split.c 			curr++;
curr               57 lib/sg_split.c 			curr->in_sg0 = sg;
curr               58 lib/sg_split.c 			curr->skip_sg0 = skip;
curr               59 lib/sg_split.c 			curr->nents = 1;
curr               60 lib/sg_split.c 			curr->length_last_sg = len;
curr               66 lib/sg_split.c 			curr++;
curr               75 lib/test_xarray.c 	void *curr;
curr               79 lib/test_xarray.c 		curr = xas_store(&xas, entry);
curr               83 lib/test_xarray.c 	return curr;
curr             1240 lib/xarray.c   	void *curr;
curr             1249 lib/xarray.c   		curr = xas_start(xas);
curr             1250 lib/xarray.c   		if (!curr)
curr             1252 lib/xarray.c   		while (xa_is_node(curr)) {
curr             1253 lib/xarray.c   			struct xa_node *node = xa_to_node(curr);
curr             1254 lib/xarray.c   			curr = xas_descend(xas, node);
curr             1256 lib/xarray.c   		if (curr)
curr             1257 lib/xarray.c   			return curr;
curr             1274 lib/xarray.c   		curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
curr             1275 lib/xarray.c   		if (xa_is_sibling(curr))
curr             1277 lib/xarray.c   		while (xa_is_node(curr)) {
curr             1278 lib/xarray.c   			xas->xa_node = xa_to_node(curr);
curr             1280 lib/xarray.c   			curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
curr             1282 lib/xarray.c   		if (curr)
curr             1283 lib/xarray.c   			return curr;
curr             1315 lib/xarray.c   static void *xas_result(struct xa_state *xas, void *curr)
curr             1317 lib/xarray.c   	if (xa_is_zero(curr))
curr             1320 lib/xarray.c   		curr = xas->xa_node;
curr             1321 lib/xarray.c   	return curr;
curr             1385 lib/xarray.c   	void *curr;
curr             1393 lib/xarray.c   		curr = xas_store(&xas, entry);
curr             1398 lib/xarray.c   	return xas_result(&xas, curr);
curr             1421 lib/xarray.c   	void *curr;
curr             1424 lib/xarray.c   	curr = __xa_store(xa, index, entry, gfp);
curr             1427 lib/xarray.c   	return curr;
curr             1451 lib/xarray.c   	void *curr;
curr             1457 lib/xarray.c   		curr = xas_load(&xas);
curr             1458 lib/xarray.c   		if (curr == old) {
curr             1460 lib/xarray.c   			if (xa_track_free(xa) && entry && !curr)
curr             1465 lib/xarray.c   	return xas_result(&xas, curr);
curr             1488 lib/xarray.c   	void *curr;
curr             1496 lib/xarray.c   		curr = xas_load(&xas);
curr             1497 lib/xarray.c   		if (!curr) {
curr               30 lib/zlib_inflate/inftrees.c     unsigned curr;              /* number of index bits for current table */
curr              191 lib/zlib_inflate/inftrees.c     curr = root;                /* current table index bits */
curr              220 lib/zlib_inflate/inftrees.c         fill = 1U << curr;
curr              255 lib/zlib_inflate/inftrees.c             curr = len - drop;
curr              256 lib/zlib_inflate/inftrees.c             left = (int)(1 << curr);
curr              257 lib/zlib_inflate/inftrees.c             while (curr + drop < max) {
curr              258 lib/zlib_inflate/inftrees.c                 left -= count[curr + drop];
curr              260 lib/zlib_inflate/inftrees.c                 curr++;
curr              265 lib/zlib_inflate/inftrees.c             used += 1U << curr;
curr              271 lib/zlib_inflate/inftrees.c             (*table)[low].op = (unsigned char)curr;
curr             1038 lib/zstd/compress.c 		U32 const curr = (U32)(ip - base);
curr             1041 lib/zstd/compress.c 		hashTable[h] = curr; /* update hash table */
curr             1072 lib/zstd/compress.c 			hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
curr             1141 lib/zstd/compress.c 		const U32 curr = (U32)(ip - base);
curr             1142 lib/zstd/compress.c 		const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
curr             1146 lib/zstd/compress.c 		hashTable[h] = curr; /* update hash table */
curr             1169 lib/zstd/compress.c 				offset = curr - matchIndex;
curr             1182 lib/zstd/compress.c 			hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
curr             1287 lib/zstd/compress.c 		U32 const curr = (U32)(ip - base);
curr             1292 lib/zstd/compress.c 		hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
curr             1312 lib/zstd/compress.c 				hashLong[h3] = curr + 1;
curr             1348 lib/zstd/compress.c 			hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
curr             1349 lib/zstd/compress.c 			    curr + 2; /* here because curr+2 could be > iend-8 */
curr             1428 lib/zstd/compress.c 		const U32 curr = (U32)(ip - base);
curr             1429 lib/zstd/compress.c 		const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
curr             1433 lib/zstd/compress.c 		hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
curr             1447 lib/zstd/compress.c 				offset = curr - matchLongIndex;
curr             1463 lib/zstd/compress.c 				hashLong[h3] = curr + 1;
curr             1469 lib/zstd/compress.c 					offset = curr + 1 - matchIndex3;
curr             1479 lib/zstd/compress.c 					offset = curr - matchIndex;
curr             1502 lib/zstd/compress.c 			hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
curr             1503 lib/zstd/compress.c 			hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
curr             1577 lib/zstd/compress.c 	const U32 curr = (U32)(ip - base);
curr             1578 lib/zstd/compress.c 	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
curr             1579 lib/zstd/compress.c 	U32 *smallerPtr = bt + 2 * (curr & btMask);
curr             1583 lib/zstd/compress.c 	U32 matchEndIdx = curr + 8;
curr             1586 lib/zstd/compress.c 	hashTable[h] = curr; /* Update Hash Table */
curr             1638 lib/zstd/compress.c 	if (matchEndIdx > curr + 8)
curr             1639 lib/zstd/compress.c 		return matchEndIdx - curr - 8;
curr             1659 lib/zstd/compress.c 	const U32 curr = (U32)(ip - base);
curr             1660 lib/zstd/compress.c 	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
curr             1662 lib/zstd/compress.c 	U32 *smallerPtr = bt + 2 * (curr & btMask);
curr             1663 lib/zstd/compress.c 	U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
curr             1664 lib/zstd/compress.c 	U32 matchEndIdx = curr + 8;
curr             1668 lib/zstd/compress.c 	hashTable[h] = curr; /* Update Hash Table */
curr             1689 lib/zstd/compress.c 			if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
curr             1690 lib/zstd/compress.c 				bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
curr             1720 lib/zstd/compress.c 	zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
curr             1832 lib/zstd/compress.c 	const U32 curr = (U32)(ip - base);
curr             1833 lib/zstd/compress.c 	const U32 minChain = curr > chainSize ? curr - chainSize : 0;
curr             1856 lib/zstd/compress.c 			*offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
curr             2091 lib/zstd/compress.c 		U32 curr = (U32)(ip - base);
curr             2095 lib/zstd/compress.c 			const U32 repIndex = (U32)(curr + 1 - offset_1);
curr             2126 lib/zstd/compress.c 				curr++;
curr             2129 lib/zstd/compress.c 					const U32 repIndex = (U32)(curr - offset_1);
curr             2161 lib/zstd/compress.c 					curr++;
curr             2164 lib/zstd/compress.c 						const U32 repIndex = (U32)(curr - offset_1);
curr             2338 lib/zstd/compress.c 	const U32 curr = (U32)(istart - base);
curr             2342 lib/zstd/compress.c 	if (curr > zc->nextToUpdate + 384)
curr             2343 lib/zstd/compress.c 		zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
curr             2379 lib/zstd/compress.c 			U32 const curr = (U32)(ip - cctx->base);
curr             2380 lib/zstd/compress.c 			U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
curr             2381 lib/zstd/compress.c 			U32 const correction = curr - newCurr;
curr              237 lib/zstd/huf_compress.c 			U32 curr = nextRankStart;
curr              239 lib/zstd/huf_compress.c 			rankVal[n] = curr;
curr              386 lib/zstd/huf_compress.c 	U32 curr;
curr              402 lib/zstd/huf_compress.c 		rank[n].curr = rank[n].base;
curr              406 lib/zstd/huf_compress.c 		U32 pos = rank[r].curr++;
curr              133 lib/zstd/huf_decompress.c 			U32 const curr = nextRankStart;
curr              135 lib/zstd/huf_decompress.c 			rankVal[n] = curr;
curr              541 lib/zstd/huf_decompress.c 			U32 curr = nextRankStart;
curr              543 lib/zstd/huf_decompress.c 			rankStart[w] = curr;
curr              569 lib/zstd/huf_decompress.c 				U32 curr = nextRankVal;
curr              571 lib/zstd/huf_decompress.c 				rankVal0[w] = curr;
curr              239 lib/zstd/zstd_opt.h 	const U32 curr = (U32)(ip - base);
curr              252 lib/zstd/zstd_opt.h 	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
curr              254 lib/zstd/zstd_opt.h 	U32 *smallerPtr = bt + 2 * (curr & btMask);
curr              255 lib/zstd/zstd_opt.h 	U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
curr              256 lib/zstd/zstd_opt.h 	U32 matchEndIdx = curr + 8;
curr              265 lib/zstd/zstd_opt.h 		if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
curr              282 lib/zstd/zstd_opt.h 				matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
curr              293 lib/zstd/zstd_opt.h 	hashTable[h] = curr; /* Update Hash Table */
curr              316 lib/zstd/zstd_opt.h 			matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
curr              351 lib/zstd/zstd_opt.h 	zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
curr              740 lib/zstd/zstd_opt.h 		U32 curr = (U32)(ip - base);
curr              750 lib/zstd/zstd_opt.h 				const U32 repIndex = (U32)(curr - repCur);
curr              753 lib/zstd/zstd_opt.h 				if ((repCur > 0 && repCur <= (S32)curr) &&
curr              863 lib/zstd/zstd_opt.h 					const U32 repIndex = (U32)(curr + cur - repCur);
curr              866 lib/zstd/zstd_opt.h 					if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
curr              268 mm/kasan/quarantine.c 	struct qlist_node *curr;
curr              273 mm/kasan/quarantine.c 	curr = from->head;
curr              275 mm/kasan/quarantine.c 	while (curr) {
curr              276 mm/kasan/quarantine.c 		struct qlist_node *next = curr->next;
curr              277 mm/kasan/quarantine.c 		struct kmem_cache *obj_cache = qlink_to_cache(curr);
curr              280 mm/kasan/quarantine.c 			qlist_put(to, curr, obj_cache->size);
curr              282 mm/kasan/quarantine.c 			qlist_put(from, curr, obj_cache->size);
curr              284 mm/kasan/quarantine.c 		curr = next;
curr              655 mm/nommu.c     	struct task_struct *curr = current;
curr              660 mm/nommu.c     		if (curr->vmacache.vmas[i] == vma) {
curr               44 mm/vmacache.c  	struct task_struct *curr;
curr               49 mm/vmacache.c  	curr = current;
curr               50 mm/vmacache.c  	if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
curr               55 mm/vmacache.c  		curr->vmacache.seqnum = mm->vmacache_seqnum;
curr               56 mm/vmacache.c  		vmacache_flush(curr);
curr             1385 mm/vmstat.c    			struct list_head *curr;
curr             1390 mm/vmstat.c    			list_for_each(curr, &area->free_list[mtype]) {
curr              790 net/atm/clip.c 					  struct clip_vcc *curr)
curr              792 net/atm/clip.c 	if (!curr) {
curr              793 net/atm/clip.c 		curr = e->vccs;
curr              794 net/atm/clip.c 		if (!curr)
curr              796 net/atm/clip.c 		return curr;
curr              798 net/atm/clip.c 	if (curr == SEQ_NO_VCC_TOKEN)
curr              801 net/atm/clip.c 	curr = curr->next;
curr              803 net/atm/clip.c 	return curr;
curr              221 net/atm/mpc.c  	struct atm_mpoa_qos *curr;
curr              231 net/atm/mpc.c  	curr = qos_head;
curr              232 net/atm/mpc.c  	while (curr != NULL) {
curr              233 net/atm/mpc.c  		if (curr->next == entry) {
curr              234 net/atm/mpc.c  			curr->next = entry->next;
curr              238 net/atm/mpc.c  		curr = curr->next;
curr             1597 net/batman-adv/bridge_loop_avoidance.c 	int i, curr;
curr             1611 net/batman-adv/bridge_loop_avoidance.c 		curr = (bat_priv->bla.bcast_duplist_curr + i);
curr             1612 net/batman-adv/bridge_loop_avoidance.c 		curr %= BATADV_DUPLIST_SIZE;
curr             1613 net/batman-adv/bridge_loop_avoidance.c 		entry = &bat_priv->bla.bcast_duplist[curr];
curr             1637 net/batman-adv/bridge_loop_avoidance.c 	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr             1638 net/batman-adv/bridge_loop_avoidance.c 	curr %= BATADV_DUPLIST_SIZE;
curr             1639 net/batman-adv/bridge_loop_avoidance.c 	entry = &bat_priv->bla.bcast_duplist[curr];
curr             1643 net/batman-adv/bridge_loop_avoidance.c 	bat_priv->bla.bcast_duplist_curr = curr;
curr              582 net/batman-adv/main.c 	int (*curr)(struct sk_buff *skb,
curr              584 net/batman-adv/main.c 	curr = batadv_rx_handler[packet_type];
curr              586 net/batman-adv/main.c 	if (curr != batadv_recv_unhandled_packet &&
curr              587 net/batman-adv/main.c 	    curr != batadv_recv_unhandled_unicast_packet)
curr             1683 net/core/ethtool.c 	struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
curr             1694 net/core/ethtool.c 	dev->ethtool_ops->get_channels(dev, &curr);
curr             1697 net/core/ethtool.c 	if (channels.rx_count > curr.max_rx ||
curr             1698 net/core/ethtool.c 	    channels.tx_count > curr.max_tx ||
curr             1699 net/core/ethtool.c 	    channels.combined_count > curr.max_combined ||
curr             1700 net/core/ethtool.c 	    channels.other_count > curr.max_other)
curr             1713 net/core/ethtool.c 	to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
curr              274 net/core/skmsg.c 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
curr              287 net/core/skmsg.c 		msg->sg.curr = msg->sg.start;
curr              289 net/core/skmsg.c 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
curr              292 net/core/skmsg.c 		msg->sg.curr = i;
curr              344 net/core/skmsg.c 		msg->sg.curr = msg->sg.end;
curr              359 net/core/skmsg.c 	int ret = -ENOSPC, i = msg->sg.curr;
curr              394 net/core/skmsg.c 	msg->sg.curr = i;
curr              377 net/ipv4/inet_fragment.c 			struct sk_buff *curr;
curr              381 net/ipv4/inet_fragment.c 			curr = rb_to_skb(parent);
curr              382 net/ipv4/inet_fragment.c 			curr_run_end = curr->ip_defrag_offset +
curr              383 net/ipv4/inet_fragment.c 					FRAG_CB(curr)->frag_run_len;
curr              384 net/ipv4/inet_fragment.c 			if (end <= curr->ip_defrag_offset)
curr              388 net/ipv4/inet_fragment.c 			else if (offset >= curr->ip_defrag_offset &&
curr              120 net/ipv6/exthdrs.c 	const struct tlvtype_proc *curr;
curr              175 net/ipv6/exthdrs.c 			for (curr = procs; curr->type >= 0; curr++) {
curr              176 net/ipv6/exthdrs.c 				if (curr->type == nh[off]) {
curr              180 net/ipv6/exthdrs.c 					if (curr->func(skb, off) == false)
curr              185 net/ipv6/exthdrs.c 			if (curr->type < 0 &&
curr               51 net/netfilter/ipset/ip_set_hash_gen.h tune_ahash_max(u8 curr, u32 multi)
curr               55 net/netfilter/ipset/ip_set_hash_gen.h 	if (multi < curr)
curr               56 net/netfilter/ipset/ip_set_hash_gen.h 		return curr;
curr               58 net/netfilter/ipset/ip_set_hash_gen.h 	n = curr + AHASH_INIT_SIZE;
curr               62 net/netfilter/ipset/ip_set_hash_gen.h 	return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
curr             1533 net/netfilter/x_tables.c 	struct list_head *head, *curr;
curr             1561 net/netfilter/x_tables.c 		trav->head = trav->curr = is_target ?
curr             1565 net/netfilter/x_tables.c 		trav->curr = trav->curr->next;
curr             1566 net/netfilter/x_tables.c 		if (trav->curr != trav->head)
curr             1570 net/netfilter/x_tables.c 		trav->head = trav->curr = is_target ?
curr             1575 net/netfilter/x_tables.c 		trav->curr = trav->curr->next;
curr             1576 net/netfilter/x_tables.c 		if (trav->curr != trav->head)
curr             1631 net/netfilter/x_tables.c 		if (trav->curr == trav->head)
curr             1633 net/netfilter/x_tables.c 		match = list_entry(trav->curr, struct xt_match, list);
curr             1665 net/netfilter/x_tables.c 		if (trav->curr == trav->head)
curr             1667 net/netfilter/x_tables.c 		target = list_entry(trav->curr, struct xt_target, list);
curr              987 net/packet/af_packet.c static void prb_fill_curr_block(char *curr,
curr              994 net/packet/af_packet.c 	ppd  = (struct tpacket3_hdr *)curr;
curr              996 net/packet/af_packet.c 	pkc->prev = curr;
curr             1012 net/packet/af_packet.c 	char *curr, *end;
curr             1038 net/packet/af_packet.c 	curr = pkc->nxt_offset;
curr             1043 net/packet/af_packet.c 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
curr             1044 net/packet/af_packet.c 		prb_fill_curr_block(curr, pkc, pbd, len);
curr             1045 net/packet/af_packet.c 		return (void *)curr;
curr             1052 net/packet/af_packet.c 	curr = (char *)prb_dispatch_next_block(pkc, po);
curr             1053 net/packet/af_packet.c 	if (curr) {
curr             1055 net/packet/af_packet.c 		prb_fill_curr_block(curr, pkc, pbd, len);
curr             1056 net/packet/af_packet.c 		return (void *)curr;
curr             1070 net/packet/af_packet.c 	char *curr = NULL;
curr             1074 net/packet/af_packet.c 		curr = packet_lookup_frame(po, &po->rx_ring,
curr             1076 net/packet/af_packet.c 		return curr;
curr              999 net/rds/rds.h  void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
curr               74 net/rds/threads.c void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
curr               76 net/rds/threads.c 	if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) {
curr              311 net/rfkill/core.c 	bool prev, curr;
curr              357 net/rfkill/core.c 	curr = rfkill->state & RFKILL_BLOCK_SW;
curr              363 net/rfkill/core.c 	if (prev != curr)
curr             1250 net/sctp/associola.c static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
curr             1255 net/sctp/associola.c 	if (best == NULL || curr == best)
curr             1256 net/sctp/associola.c 		return curr;
curr             1258 net/sctp/associola.c 	score_curr = sctp_trans_score(curr);
curr             1266 net/sctp/associola.c 		return curr;
curr             1268 net/sctp/associola.c 		return sctp_trans_elect_tie(best, curr);
curr              403 net/tls/tls_sw.c 					 &msg_en->sg.data[msg_en->sg.curr],
curr              439 net/tls/tls_sw.c 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
curr              510 net/tls/tls_sw.c 	msg_en->sg.curr = start;
curr              593 net/tls/tls_sw.c 	msg_opl->sg.curr = i;
curr              623 net/tls/tls_sw.c 	msg_npl->sg.curr = j;
curr              652 net/tls/tls_sw.c 	msg_opl->sg.curr = orig_end;
curr              386 scripts/kconfig/conf.c 		if (sym->curr.tri != mod)
curr              541 scripts/kconfig/confdata.c 				if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val))
curr             1182 scripts/kconfig/confdata.c 	if (csym->curr.tri != yes)
curr             1045 scripts/kconfig/expr.c 		return e->left.sym->curr.tri;
curr               98 scripts/kconfig/expr.h 	struct symbol_value curr;
curr              120 scripts/kconfig/lkc.h 	return sym->curr.tri;
curr              126 scripts/kconfig/lkc.h 	return (struct symbol *)sym->curr.val;
curr               16 scripts/kconfig/symbol.c 	.curr = { "y", yes },
curr               20 scripts/kconfig/symbol.c 	.curr = { "m", mod },
curr               24 scripts/kconfig/symbol.c 	.curr = { "n", no },
curr               28 scripts/kconfig/symbol.c 	.curr = { "", no },
curr              114 scripts/kconfig/symbol.c 	return strtoll(sym->curr.val, NULL, base);
curr              137 scripts/kconfig/symbol.c 	val = strtoll(sym->curr.val, NULL, base);
curr              148 scripts/kconfig/symbol.c 	sym->curr.val = xstrdup(str);
curr              191 scripts/kconfig/symbol.c 		    prop->visible.tri == mod && choice_sym->curr.tri == yes)
curr              293 scripts/kconfig/symbol.c 		sym->curr.tri = no;
curr              340 scripts/kconfig/symbol.c 	oldval = sym->curr;
curr              346 scripts/kconfig/symbol.c 		newval = symbol_empty.curr;
curr              350 scripts/kconfig/symbol.c 		newval = symbol_no.curr;
curr              353 scripts/kconfig/symbol.c 		sym->curr.val = sym->name;
curr              354 scripts/kconfig/symbol.c 		sym->curr.tri = no;
curr              365 scripts/kconfig/symbol.c 	sym->curr = newval;
curr              372 scripts/kconfig/symbol.c 			newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no;
curr              421 scripts/kconfig/symbol.c 				newval.val = ds->curr.val;
curr              429 scripts/kconfig/symbol.c 	sym->curr = newval;
curr              431 scripts/kconfig/symbol.c 		sym->curr.val = sym_calc_choice(sym);
curr              434 scripts/kconfig/symbol.c 	if (memcmp(&oldval, &sym->curr, sizeof(oldval))) {
curr              438 scripts/kconfig/symbol.c 			modules_val = modules_sym->curr.tri;
curr              703 scripts/kconfig/symbol.c 	val = symbol_no.curr.tri;
curr              704 scripts/kconfig/symbol.c 	str = symbol_empty.curr.val;
curr              724 scripts/kconfig/symbol.c 				str = (const char *)ds->curr.val;
curr              734 scripts/kconfig/symbol.c 		if (!sym_is_choice_value(sym) && modules_sym->curr.tri == no)
curr              777 scripts/kconfig/symbol.c 			return (modules_sym->curr.tri == no) ? "n" : "m";
curr              785 scripts/kconfig/symbol.c 	return (const char *)sym->curr.val;
curr              666 security/apparmor/apparmorfs.c 	struct aa_label *label, *curr;
curr              684 security/apparmor/apparmorfs.c 	curr = begin_current_label_crit_section();
curr              685 security/apparmor/apparmorfs.c 	label = aa_label_parse(curr, query, GFP_KERNEL, false, false);
curr              686 security/apparmor/apparmorfs.c 	end_current_label_crit_section(curr);
curr              757 security/apparmor/apparmorfs.c 	struct aa_label *label, *curr;
curr              780 security/apparmor/apparmorfs.c 	curr = begin_current_label_crit_section();
curr              781 security/apparmor/apparmorfs.c 	label = aa_label_parse(curr, label_name, GFP_KERNEL, false, false);
curr              782 security/apparmor/apparmorfs.c 	end_current_label_crit_section(curr);
curr               83 security/apparmor/include/policy_ns.h bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns);
curr               37 security/apparmor/policy_ns.c bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns)
curr               39 security/apparmor/policy_ns.c 	if (curr == view)
curr               46 security/apparmor/policy_ns.c 		if (view->parent == curr)
curr               61 security/apparmor/policy_ns.c const char *aa_ns_name(struct aa_ns *curr, struct aa_ns *view, bool subns)
curr               64 security/apparmor/policy_ns.c 	if (curr == view)
curr               67 security/apparmor/policy_ns.c 	if (aa_ns_visible(curr, view, subns)) {
curr               74 security/apparmor/policy_ns.c 		return view->base.hname + strlen(curr->base.hname) + 2;
curr              616 sound/core/info.c 		c = buffer->buffer[buffer->curr++];
curr              617 sound/core/info.c 		if (buffer->curr >= buffer->size)
curr               30 sound/core/pcm_lib.c #define trace_applptr(substream, prev, curr)
curr               34 sound/core/pcm_native.c #define trace_hw_mask_param(substream, type, index, prev, curr)
curr               35 sound/core/pcm_native.c #define trace_hw_interval_param(substream, type, index, prev, curr)
curr               29 sound/core/pcm_param_trace.h 	TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_hw_param_t type, int index, const struct snd_mask *prev, const struct snd_mask *curr),
curr               30 sound/core/pcm_param_trace.h 	TP_ARGS(substream, type, index, prev, curr),
curr               51 sound/core/pcm_param_trace.h 		memcpy(__entry->curr_bits, curr->bits, sizeof(__u32) * 8);
curr               69 sound/core/pcm_param_trace.h 	TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_hw_param_t type, int index, const struct snd_interval *prev, const struct snd_interval *curr),
curr               70 sound/core/pcm_param_trace.h 	TP_ARGS(substream, type, index, prev, curr),
curr              106 sound/core/pcm_param_trace.h 		__entry->curr_min = curr->min;
curr              107 sound/core/pcm_param_trace.h 		__entry->curr_max = curr->max;
curr              108 sound/core/pcm_param_trace.h 		__entry->curr_openmin = curr->openmin;
curr              109 sound/core/pcm_param_trace.h 		__entry->curr_openmax = curr->openmax;
curr              110 sound/core/pcm_param_trace.h 		__entry->curr_integer = curr->integer;
curr              111 sound/core/pcm_param_trace.h 		__entry->curr_empty = curr->empty;
curr              107 sound/core/pcm_trace.h 	TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_uframes_t prev, snd_pcm_uframes_t curr),
curr              108 sound/core/pcm_trace.h 	TP_ARGS(substream, prev, curr),
curr              115 sound/core/pcm_trace.h 		__field( snd_pcm_uframes_t, curr )
curr              126 sound/core/pcm_trace.h 		__entry->curr = (curr);
curr              137 sound/core/pcm_trace.h 		__entry->curr,
curr              148 sound/firewire/digi00x/digi00x-stream.c 	u32 curr;
curr              165 sound/firewire/digi00x/digi00x-stream.c 	curr = be32_to_cpu(data);
curr              167 sound/firewire/digi00x/digi00x-stream.c 	if (curr == 0)
curr              168 sound/firewire/digi00x/digi00x-stream.c 		curr = 2;
curr              170 sound/firewire/digi00x/digi00x-stream.c 	curr--;
curr              171 sound/firewire/digi00x/digi00x-stream.c 	while (curr > 0) {
curr              172 sound/firewire/digi00x/digi00x-stream.c 		data = cpu_to_be32(curr);
curr              182 sound/firewire/digi00x/digi00x-stream.c 		curr--;
curr               14 sound/firewire/oxfw/oxfw-proc.c 	struct snd_oxfw_stream_formation formation, curr;
curr               22 sound/firewire/oxfw/oxfw-proc.c 						    &curr);
curr               37 sound/firewire/oxfw/oxfw-proc.c 		if (memcmp(&formation, &curr, sizeof(curr)) == 0)
curr               52 sound/firewire/oxfw/oxfw-proc.c 						    &curr);
curr               67 sound/firewire/oxfw/oxfw-proc.c 		if (memcmp(&formation, &curr, sizeof(curr)) == 0)
curr               99 sound/isa/gus/gus_pcm.c 	unsigned int curr, begin, end;
curr              123 sound/isa/gus/gus_pcm.c 		curr = begin + (pcmp->bpos * pcmp->block_size) / runtime->channels;
curr              124 sound/isa/gus/gus_pcm.c 		end = curr + (pcmp->block_size / runtime->channels);
curr              139 sound/isa/gus/gus_pcm.c 		snd_gf1_write_addr(gus, SNDRV_GF1_VA_CURRENT, curr << 4, voice_ctrl & 4);
curr              603 sound/pci/hda/hda_proc.c 	int c, curr = -1;
curr              611 sound/pci/hda/hda_proc.c 		curr = snd_hda_codec_read(codec, nid, 0,
curr              618 sound/pci/hda/hda_proc.c 			if (c == curr)
curr              685 sound/pci/hda/hda_proc.c 	int i, curr = -1;
curr              695 sound/pci/hda/hda_proc.c 	curr = snd_hda_codec_read(codec, nid, 0,
curr              699 sound/pci/hda/hda_proc.c 		if (i == curr)
curr             1013 sound/pci/hda/patch_hdmi.c 	int mux_idx, curr;
curr             1016 sound/pci/hda/patch_hdmi.c 	curr = snd_hda_codec_read(codec, pin_nid, 0,
curr             1018 sound/pci/hda/patch_hdmi.c 	if (curr != mux_idx)
curr             1053 sound/pci/hda/patch_hdmi.c 	int cvt_idx, curr;
curr             1095 sound/pci/hda/patch_hdmi.c 		curr = snd_hda_codec_read(codec, nid, 0,
curr             1097 sound/pci/hda/patch_hdmi.c 		if (curr != mux_idx) {
curr               18 tools/lib/lockdep/lockdep.c void print_irqtrace_events(struct task_struct *curr)
curr               50 tools/lib/traceevent/kbuffer-parse.c 	unsigned int		curr;
curr              283 tools/lib/traceevent/kbuffer-parse.c 	void *ptr = kbuf->data + kbuf->curr;
curr              308 tools/lib/traceevent/kbuffer-parse.c 		kbuf->curr = kbuf->size;
curr              335 tools/lib/traceevent/kbuffer-parse.c 		kbuf->curr = kbuf->next;
curr              396 tools/lib/traceevent/kbuffer-parse.c 	void *ptr = kbuf->data + kbuf->curr;
curr              453 tools/lib/traceevent/kbuffer-parse.c 		kbuf->curr = kbuf->next;
curr              520 tools/lib/traceevent/kbuffer-parse.c 	kbuf->curr = 0;
curr              562 tools/lib/traceevent/kbuffer-parse.c 	if (kbuf->curr >= kbuf->size)
curr              613 tools/lib/traceevent/kbuffer-parse.c 	while (kbuf->curr < offset) {
curr              647 tools/lib/traceevent/kbuffer-parse.c 	return kbuf->curr;
curr              659 tools/lib/traceevent/kbuffer-parse.c 	return kbuf->curr + kbuf->start;
curr              683 tools/lib/traceevent/kbuffer-parse.c 	return kbuf->next - kbuf->curr;
curr              698 tools/lib/traceevent/kbuffer-parse.c 	if (kbuf->curr)
curr              313 tools/perf/tests/builtin-test.c static bool perf_test__matches(struct test *test, int curr, int argc, const char *argv[])
curr              325 tools/perf/tests/builtin-test.c 			if (nr == curr + 1)
curr              541 tools/perf/tests/builtin-test.c 		int curr = i++;
curr              549 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(&test, curr, argc, argv))
curr              576 tools/perf/tests/builtin-test.c 		int curr = i++, err;
curr              578 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(t, curr, argc, argv))
curr              651 tools/perf/tests/builtin-test.c 		int curr = i++;
curr              657 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(&t, curr, argc, argv))
curr              674 tools/perf/tests/builtin-test.c 		int curr = i++;
curr              676 tools/perf/tests/builtin-test.c 		if (!perf_test__matches(t, curr, argc, argv) ||
curr               12 tools/perf/ui/gtk/progress.c 	double fraction = p->total ? 1.0 * p->curr / p->total : 0.0;
curr               35 tools/perf/ui/gtk/progress.c 	snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, p->curr, p->total);
curr               18 tools/perf/ui/progress.c 	u64 last = p->curr;
curr               20 tools/perf/ui/progress.c 	p->curr += adv;
curr               22 tools/perf/ui/progress.c 	if (p->curr >= p->next) {
curr               23 tools/perf/ui/progress.c 		u64 nr = DIV_ROUND_UP(p->curr - last, p->step);
curr               33 tools/perf/ui/progress.c 	p->curr = 0;
curr               11 tools/perf/ui/progress.h 	u64 curr, next, step, total;
curr               21 tools/perf/ui/tui/progress.c 	ret  = unit_number__scnprintf(buf_cur, sizeof(buf_cur), p->curr);
curr               56 tools/perf/ui/tui/progress.c 	bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
curr              160 tools/perf/util/callchain.h 	struct callchain_cursor_node	*curr;
curr              206 tools/perf/util/callchain.h 	cursor->curr = cursor->first;
curr              217 tools/perf/util/callchain.h 	return cursor->curr;
curr              222 tools/perf/util/callchain.h 	cursor->curr = cursor->curr->next;
curr              261 tools/perf/util/callchain.h 	dest->first = src->curr;
curr              863 tools/perf/util/hist.c 	iter->curr = 0;
curr              881 tools/perf/util/hist.c 	int i = iter->curr;
curr              886 tools/perf/util/hist.c 	if (iter->curr >= iter->total)
curr              903 tools/perf/util/hist.c 	int i = iter->curr;
curr              927 tools/perf/util/hist.c 	iter->curr++;
curr              938 tools/perf/util/hist.c 	return iter->curr >= iter->total ? 0 : -1;
curr             1000 tools/perf/util/hist.c 	iter->curr = 0;
curr             1022 tools/perf/util/hist.c 	he_cache[iter->curr++] = he;
curr             1084 tools/perf/util/hist.c 	for (i = 0; i < iter->curr; i++) {
curr             1098 tools/perf/util/hist.c 	he_cache[iter->curr++] = he;
curr              116 tools/perf/util/hist.h 	int curr;
curr              188 tools/perf/util/symbol.c 	struct symbol *curr, *next;
curr              196 tools/perf/util/symbol.c 		curr = rb_entry(nd, struct symbol, rb_node);
curr              198 tools/perf/util/symbol.c 		nd = rb_next(&curr->rb_node);
curr              204 tools/perf/util/symbol.c 		if (curr->start != next->start)
curr              207 tools/perf/util/symbol.c 		if (choose_best_symbol(curr, next) == SYMBOL_A) {
curr              212 tools/perf/util/symbol.c 			nd = rb_next(&curr->rb_node);
curr              213 tools/perf/util/symbol.c 			rb_erase_cached(&curr->rb_node, symbols);
curr              214 tools/perf/util/symbol.c 			symbol__delete(curr);
curr              222 tools/perf/util/symbol.c 	struct symbol *curr, *prev;
curr              227 tools/perf/util/symbol.c 	curr = rb_entry(prevnd, struct symbol, rb_node);
curr              230 tools/perf/util/symbol.c 		prev = curr;
curr              231 tools/perf/util/symbol.c 		curr = rb_entry(nd, struct symbol, rb_node);
curr              233 tools/perf/util/symbol.c 		if (prev->end == prev->start && prev->end != curr->start)
curr              234 tools/perf/util/symbol.c 			arch__symbols__fixup_end(prev, curr);
curr              238 tools/perf/util/symbol.c 	if (curr->end == curr->start)
curr              239 tools/perf/util/symbol.c 		curr->end = roundup(curr->start, 4096) + 4096;
curr              245 tools/perf/util/symbol.c 	struct map *next, *curr;
curr              249 tools/perf/util/symbol.c 	curr = maps__first(maps);
curr              250 tools/perf/util/symbol.c 	if (curr == NULL)
curr              253 tools/perf/util/symbol.c 	for (next = map__next(curr); next; next = map__next(curr)) {
curr              254 tools/perf/util/symbol.c 		if (!curr->end)
curr              255 tools/perf/util/symbol.c 			curr->end = next->start;
curr              256 tools/perf/util/symbol.c 		curr = next;
curr              263 tools/perf/util/symbol.c 	if (!curr->end)
curr              264 tools/perf/util/symbol.c 		curr->end = ~0ULL;
curr              174 tools/perf/util/thread.c 	struct namespaces *new, *curr = __thread__namespaces(thread);
curr              182 tools/perf/util/thread.c 	if (timestamp && curr) {
curr              188 tools/perf/util/thread.c 		curr = list_next_entry(new, list);
curr              189 tools/perf/util/thread.c 		curr->end_time = timestamp;
curr              240 tools/perf/util/thread.c 	struct comm *new, *curr = thread__comm(thread);
curr              244 tools/perf/util/thread.c 		int err = comm__override(curr, str, timestamp, exec);
curr               36 tools/vm/page_owner_sort.c 	char *curr = buf, *const buf_end = buf + buf_size;
curr               38 tools/vm/page_owner_sort.c 	while (buf_end - curr > 1 && fgets(curr, buf_end - curr, fin)) {
curr               39 tools/vm/page_owner_sort.c 		if (*curr == '\n') /* empty line */
curr               40 tools/vm/page_owner_sort.c 			return curr - buf;
curr               41 tools/vm/page_owner_sort.c 		curr += strlen(curr);