iter               66 arch/m68k/emu/nfblock.c 	struct bvec_iter iter;
iter               72 arch/m68k/emu/nfblock.c 	bio_for_each_segment(bvec, bio, iter) {
iter              165 arch/powerpc/kernel/cacheinfo.c 	struct cache *iter;
iter              167 arch/powerpc/kernel/cacheinfo.c 	list_for_each_entry(iter, &cache_list, list)
iter              168 arch/powerpc/kernel/cacheinfo.c 		WARN_ONCE(iter->next_local == cache,
iter              170 arch/powerpc/kernel/cacheinfo.c 			  iter->ofnode,
iter              171 arch/powerpc/kernel/cacheinfo.c 			  cache_type_string(iter),
iter              303 arch/powerpc/kernel/cacheinfo.c 	struct cache *iter;
iter              309 arch/powerpc/kernel/cacheinfo.c 	list_for_each_entry(iter, &cache_list, list)
iter              310 arch/powerpc/kernel/cacheinfo.c 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
iter              311 arch/powerpc/kernel/cacheinfo.c 			return iter;
iter              320 arch/powerpc/kernel/cacheinfo.c 	struct cache *iter;
iter              322 arch/powerpc/kernel/cacheinfo.c 	list_for_each_entry(iter, &cache_list, list) {
iter              323 arch/powerpc/kernel/cacheinfo.c 		if (iter->ofnode != node)
iter              325 arch/powerpc/kernel/cacheinfo.c 		cache = cache_find_first_sibling(iter);
iter             1579 arch/s390/kvm/interrupt.c 	struct kvm_s390_interrupt_info *iter;
iter             1584 arch/s390/kvm/interrupt.c 	list_for_each_entry(iter, isc_list, list) {
iter             1585 arch/s390/kvm/interrupt.c 		if (schid && (id != iter->io.subchannel_id ||
iter             1586 arch/s390/kvm/interrupt.c 			      nr != iter->io.subchannel_nr))
iter             1589 arch/s390/kvm/interrupt.c 		list_del_init(&iter->list);
iter             1594 arch/s390/kvm/interrupt.c 		return iter;
iter              128 arch/s390/mm/gmap.c 	struct radix_tree_iter iter;
iter              138 arch/s390/mm/gmap.c 		radix_tree_for_each_slot(slot, root, &iter, index) {
iter              139 arch/s390/mm/gmap.c 			indices[nr] = iter.index;
iter              153 arch/s390/mm/gmap.c 	struct radix_tree_iter iter;
iter              163 arch/s390/mm/gmap.c 		radix_tree_for_each_slot(slot, root, &iter, index) {
iter              164 arch/s390/mm/gmap.c 			indices[nr] = iter.index;
iter               29 arch/sh/mm/asids-debugfs.c static int asids_seq_show(struct seq_file *file, void *iter)
iter               25 arch/sh/mm/cache-debugfs.c static int cache_seq_show(struct seq_file *file, void *iter)
iter              145 arch/sh/mm/pmb.c 		struct pmb_entry *pmbe, *iter;
iter              175 arch/sh/mm/pmb.c 		for (iter = pmbe->link; iter; iter = iter->link)
iter              176 arch/sh/mm/pmb.c 			span += iter->size;
iter              815 arch/sh/mm/pmb.c static int pmb_seq_show(struct seq_file *file, void *iter)
iter               39 arch/sh/mm/tlb-debugfs.c static int tlb_seq_show(struct seq_file *file, void *iter)
iter               19 arch/sparc/prom/bootstr_32.c 	int iter;
iter               31 arch/sparc/prom/bootstr_32.c 		for (iter = 1; iter < 8; iter++) {
iter               32 arch/sparc/prom/bootstr_32.c 			arg = (*(romvec->pv_v0bootargs))->argv[iter];
iter             1361 arch/um/drivers/ubd_kern.c 	struct req_iterator iter;
iter             1366 arch/um/drivers/ubd_kern.c 	rq_for_each_segment(bvec, req, iter) {
iter             1026 arch/um/drivers/vector_kern.c 	int iter = 0;
iter             1029 arch/um/drivers/vector_kern.c 		while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
iter             1030 arch/um/drivers/vector_kern.c 			iter++;
iter             1032 arch/um/drivers/vector_kern.c 		while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
iter             1033 arch/um/drivers/vector_kern.c 			iter++;
iter             1036 arch/um/drivers/vector_kern.c 	if (iter == MAX_ITERATIONS)
iter               31 arch/unicore32/include/asm/memblock.h #define for_each_bank(iter, mi)				\
iter               32 arch/unicore32/include/asm/memblock.h 	for (iter = 0; iter < (mi)->nr_banks; iter++)
iter             1132 arch/x86/events/intel/pt.c 	struct topa *topa, *iter;
iter             1134 arch/x86/events/intel/pt.c 	list_for_each_entry_safe(topa, iter, &buf->tables, list) {
iter              170 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_patch *iter, *tmp, *p = NULL;
iter              176 arch/x86/kernel/cpu/microcode/intel.c 	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
iter              177 arch/x86/kernel/cpu/microcode/intel.c 		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
iter              191 arch/x86/kernel/cpu/microcode/intel.c 				list_replace(&iter->plist, &p->plist);
iter              192 arch/x86/kernel/cpu/microcode/intel.c 				kfree(iter->data);
iter              193 arch/x86/kernel/cpu/microcode/intel.c 				kfree(iter);
iter              727 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_patch *iter, *tmp;
iter              729 arch/x86/kernel/cpu/microcode/intel.c 	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
iter              731 arch/x86/kernel/cpu/microcode/intel.c 		phdr = (struct microcode_header_intel *)iter->data;
iter              741 arch/x86/kernel/cpu/microcode/intel.c 		return iter->data;
iter              861 arch/x86/kernel/cpu/microcode/intel.c static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
iter              870 arch/x86/kernel/cpu/microcode/intel.c 	while (iov_iter_count(iter)) {
iter              875 arch/x86/kernel/cpu/microcode/intel.c 		if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
iter              886 arch/x86/kernel/cpu/microcode/intel.c 		if (data_size > iov_iter_count(iter)) {
iter              902 arch/x86/kernel/cpu/microcode/intel.c 		if (!copy_from_iter_full(data, data_size, iter) ||
iter              921 arch/x86/kernel/cpu/microcode/intel.c 	if (iov_iter_count(iter)) {
iter              973 arch/x86/kernel/cpu/microcode/intel.c 	struct iov_iter iter;
iter              991 arch/x86/kernel/cpu/microcode/intel.c 	iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
iter              992 arch/x86/kernel/cpu/microcode/intel.c 	ret = generic_load_microcode(cpu, &iter);
iter             1002 arch/x86/kernel/cpu/microcode/intel.c 	struct iov_iter iter;
iter             1010 arch/x86/kernel/cpu/microcode/intel.c 	iov_iter_init(&iter, WRITE, &iov, 1, size);
iter             1012 arch/x86/kernel/cpu/microcode/intel.c 	return generic_load_microcode(cpu, &iter);
iter              581 arch/x86/kernel/ftrace.c 	struct ftrace_rec_iter *iter;
iter              587 arch/x86/kernel/ftrace.c 	for_ftrace_rec_iter(iter) {
iter              588 arch/x86/kernel/ftrace.c 		rec = ftrace_rec_iter_record(iter);
iter              601 arch/x86/kernel/ftrace.c 	for_ftrace_rec_iter(iter) {
iter              602 arch/x86/kernel/ftrace.c 		rec = ftrace_rec_iter_record(iter);
iter              615 arch/x86/kernel/ftrace.c 	for_ftrace_rec_iter(iter) {
iter              616 arch/x86/kernel/ftrace.c 		rec = ftrace_rec_iter_record(iter);
iter              631 arch/x86/kernel/ftrace.c 	for_ftrace_rec_iter(iter) {
iter              632 arch/x86/kernel/ftrace.c 		rec = ftrace_rec_iter_record(iter);
iter             1538 arch/x86/kvm/mmu.c 			   struct rmap_iterator *iter)
iter             1546 arch/x86/kvm/mmu.c 		iter->desc = NULL;
iter             1551 arch/x86/kvm/mmu.c 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
iter             1552 arch/x86/kvm/mmu.c 	iter->pos = 0;
iter             1553 arch/x86/kvm/mmu.c 	sptep = iter->desc->sptes[iter->pos];
iter             1564 arch/x86/kvm/mmu.c static u64 *rmap_get_next(struct rmap_iterator *iter)
iter             1568 arch/x86/kvm/mmu.c 	if (iter->desc) {
iter             1569 arch/x86/kvm/mmu.c 		if (iter->pos < PTE_LIST_EXT - 1) {
iter             1570 arch/x86/kvm/mmu.c 			++iter->pos;
iter             1571 arch/x86/kvm/mmu.c 			sptep = iter->desc->sptes[iter->pos];
iter             1576 arch/x86/kvm/mmu.c 		iter->desc = iter->desc->more;
iter             1578 arch/x86/kvm/mmu.c 		if (iter->desc) {
iter             1579 arch/x86/kvm/mmu.c 			iter->pos = 0;
iter             1581 arch/x86/kvm/mmu.c 			sptep = iter->desc->sptes[iter->pos];
iter             1661 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             1664 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
iter             1700 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             1703 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
iter             1731 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             1734 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
iter             1856 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             1859 arch/x86/kvm/mmu.c 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
iter             1881 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             1891 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep) {
iter             2063 arch/x86/kvm/mmu.c 	struct rmap_iterator uninitialized_var(iter);
iter             2066 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
iter             2078 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             2080 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
iter             2203 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             2205 arch/x86/kvm/mmu.c 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
iter             2785 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             2787 arch/x86/kvm/mmu.c 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
iter             6016 arch/x86/kvm/mmu.c 	struct rmap_iterator iter;
iter             6022 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep) {
iter              194 arch/x86/kvm/mmu_audit.c 	struct rmap_iterator iter;
iter              205 arch/x86/kvm/mmu_audit.c 	for_each_rmap_spte(rmap_head, &iter, sptep) {
iter              476 arch/x86/kvm/mtrr.c static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
iter              480 arch/x86/kvm/mtrr.c 	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
iter              483 arch/x86/kvm/mtrr.c 	seg = fixed_mtrr_addr_to_seg(iter->start);
iter              487 arch/x86/kvm/mtrr.c 	iter->fixed = true;
iter              488 arch/x86/kvm/mtrr.c 	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
iter              489 arch/x86/kvm/mtrr.c 	iter->index = index;
iter              490 arch/x86/kvm/mtrr.c 	iter->seg = seg;
iter              494 arch/x86/kvm/mtrr.c static bool match_var_range(struct mtrr_iter *iter,
iter              500 arch/x86/kvm/mtrr.c 	if (!(start >= iter->end || end <= iter->start)) {
iter              501 arch/x86/kvm/mtrr.c 		iter->range = range;
iter              508 arch/x86/kvm/mtrr.c 		iter->partial_map |= iter->start_max < start;
iter              511 arch/x86/kvm/mtrr.c 		iter->start_max = max(iter->start_max, end);
iter              518 arch/x86/kvm/mtrr.c static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
iter              520 arch/x86/kvm/mtrr.c 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
iter              522 arch/x86/kvm/mtrr.c 	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
iter              523 arch/x86/kvm/mtrr.c 		if (match_var_range(iter, iter->range))
iter              526 arch/x86/kvm/mtrr.c 	iter->range = NULL;
iter              527 arch/x86/kvm/mtrr.c 	iter->partial_map |= iter->start_max < iter->end;
iter              530 arch/x86/kvm/mtrr.c static void mtrr_lookup_var_start(struct mtrr_iter *iter)
iter              532 arch/x86/kvm/mtrr.c 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
iter              534 arch/x86/kvm/mtrr.c 	iter->fixed = false;
iter              535 arch/x86/kvm/mtrr.c 	iter->start_max = iter->start;
iter              536 arch/x86/kvm/mtrr.c 	iter->range = NULL;
iter              537 arch/x86/kvm/mtrr.c 	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
iter              539 arch/x86/kvm/mtrr.c 	__mtrr_lookup_var_next(iter);
iter              542 arch/x86/kvm/mtrr.c static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
iter              545 arch/x86/kvm/mtrr.c 	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
iter              546 arch/x86/kvm/mtrr.c 		iter->fixed = false;
iter              547 arch/x86/kvm/mtrr.c 		iter->range = NULL;
iter              551 arch/x86/kvm/mtrr.c 	iter->index++;
iter              554 arch/x86/kvm/mtrr.c 	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
iter              555 arch/x86/kvm/mtrr.c 		return mtrr_lookup_var_start(iter);
iter              558 arch/x86/kvm/mtrr.c 	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
iter              559 arch/x86/kvm/mtrr.c 		iter->seg++;
iter              562 arch/x86/kvm/mtrr.c static void mtrr_lookup_var_next(struct mtrr_iter *iter)
iter              564 arch/x86/kvm/mtrr.c 	__mtrr_lookup_var_next(iter);
iter              567 arch/x86/kvm/mtrr.c static void mtrr_lookup_start(struct mtrr_iter *iter)
iter              569 arch/x86/kvm/mtrr.c 	if (!mtrr_is_enabled(iter->mtrr_state)) {
iter              570 arch/x86/kvm/mtrr.c 		iter->mtrr_disabled = true;
iter              574 arch/x86/kvm/mtrr.c 	if (!mtrr_lookup_fixed_start(iter))
iter              575 arch/x86/kvm/mtrr.c 		mtrr_lookup_var_start(iter);
iter              578 arch/x86/kvm/mtrr.c static void mtrr_lookup_init(struct mtrr_iter *iter,
iter              581 arch/x86/kvm/mtrr.c 	iter->mtrr_state = mtrr_state;
iter              582 arch/x86/kvm/mtrr.c 	iter->start = start;
iter              583 arch/x86/kvm/mtrr.c 	iter->end = end;
iter              584 arch/x86/kvm/mtrr.c 	iter->mtrr_disabled = false;
iter              585 arch/x86/kvm/mtrr.c 	iter->partial_map = false;
iter              586 arch/x86/kvm/mtrr.c 	iter->fixed = false;
iter              587 arch/x86/kvm/mtrr.c 	iter->range = NULL;
iter              589 arch/x86/kvm/mtrr.c 	mtrr_lookup_start(iter);
iter              592 arch/x86/kvm/mtrr.c static bool mtrr_lookup_okay(struct mtrr_iter *iter)
iter              594 arch/x86/kvm/mtrr.c 	if (iter->fixed) {
iter              595 arch/x86/kvm/mtrr.c 		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
iter              599 arch/x86/kvm/mtrr.c 	if (iter->range) {
iter              600 arch/x86/kvm/mtrr.c 		iter->mem_type = iter->range->base & 0xff;
iter              607 arch/x86/kvm/mtrr.c static void mtrr_lookup_next(struct mtrr_iter *iter)
iter              609 arch/x86/kvm/mtrr.c 	if (iter->fixed)
iter              610 arch/x86/kvm/mtrr.c 		mtrr_lookup_fixed_next(iter);
iter              612 arch/x86/kvm/mtrr.c 		mtrr_lookup_var_next(iter);
iter              622 arch/x86/kvm/mtrr.c 	struct mtrr_iter iter;
iter              631 arch/x86/kvm/mtrr.c 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
iter              632 arch/x86/kvm/mtrr.c 		int curr_type = iter.mem_type;
iter              678 arch/x86/kvm/mtrr.c 	if (iter.mtrr_disabled)
iter              689 arch/x86/kvm/mtrr.c 	WARN_ON(iter.partial_map);
iter              699 arch/x86/kvm/mtrr.c 	struct mtrr_iter iter;
iter              705 arch/x86/kvm/mtrr.c 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
iter              707 arch/x86/kvm/mtrr.c 			type = iter.mem_type;
iter              711 arch/x86/kvm/mtrr.c 		if (type != iter.mem_type)
iter              715 arch/x86/kvm/mtrr.c 	if (iter.mtrr_disabled)
iter              718 arch/x86/kvm/mtrr.c 	if (!iter.partial_map)
iter              108 arch/xtensa/platforms/iss/simdisk.c 	struct bvec_iter iter;
iter              111 arch/xtensa/platforms/iss/simdisk.c 	bio_for_each_segment(bvec, bio, iter) {
iter              157 block/bio-integrity.c 	struct blk_integrity_iter iter;
iter              165 block/bio-integrity.c 	iter.disk_name = bio->bi_disk->disk_name;
iter              166 block/bio-integrity.c 	iter.interval = 1 << bi->interval_exp;
iter              167 block/bio-integrity.c 	iter.seed = proc_iter->bi_sector;
iter              168 block/bio-integrity.c 	iter.prot_buf = prot_buf;
iter              173 block/bio-integrity.c 		iter.data_buf = kaddr + bv.bv_offset;
iter              174 block/bio-integrity.c 		iter.data_size = bv.bv_len;
iter              176 block/bio-integrity.c 		ret = proc_fn(&iter);
iter              530 block/bio.c    	struct bvec_iter iter;
iter              532 block/bio.c    	__bio_for_each_segment(bv, bio, iter, start) {
iter              554 block/bio.c    	struct bvec_iter iter;
iter              564 block/bio.c    	bio_for_each_segment(bv, bio, iter) {
iter              890 block/bio.c    static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
iter              892 block/bio.c    	const struct bio_vec *bv = iter->bvec;
iter              896 block/bio.c    	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
iter              899 block/bio.c    	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
iter              901 block/bio.c    				bv->bv_offset + iter->iov_offset);
iter              904 block/bio.c    	iov_iter_advance(iter, size);
iter              920 block/bio.c    static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
iter              939 block/bio.c    	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
iter              959 block/bio.c    	iov_iter_advance(iter, size);
iter              983 block/bio.c    int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
iter              985 block/bio.c    	const bool is_bvec = iov_iter_is_bvec(iter);
iter              993 block/bio.c    			ret = __bio_iov_bvec_add_pages(bio, iter);
iter              995 block/bio.c    			ret = __bio_iov_iter_get_pages(bio, iter);
iter              996 block/bio.c    	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
iter             1140 block/bio.c    	struct iov_iter iter;
iter             1155 block/bio.c    	bmd->iter = *data;
iter             1156 block/bio.c    	bmd->iter.iov = bmd->iov;
iter             1168 block/bio.c    static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
iter             1179 block/bio.c    					  iter);
iter             1181 block/bio.c    		if (!iov_iter_count(iter))
iter             1199 block/bio.c    static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
iter             1210 block/bio.c    					&iter);
iter             1212 block/bio.c    		if (!iov_iter_count(&iter))
iter             1253 block/bio.c    			ret = bio_copy_to_iter(bio, bmd->iter);
iter             1275 block/bio.c    			      struct iov_iter *iter,
iter             1283 block/bio.c    	unsigned int len = iter->count;
iter             1286 block/bio.c    	bmd = bio_alloc_map_data(iter, gfp_mask);
iter             1357 block/bio.c    	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
iter             1359 block/bio.c    		ret = bio_copy_from_iter(bio, iter);
iter             1365 block/bio.c    		iov_iter_advance(iter, bio->bi_iter.bi_size);
iter             1391 block/bio.c    			     struct iov_iter *iter,
iter             1398 block/bio.c    	if (!iov_iter_count(iter))
iter             1401 block/bio.c    	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
iter             1405 block/bio.c    	while (iov_iter_count(iter)) {
iter             1411 block/bio.c    		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
iter             1442 block/bio.c    			iov_iter_advance(iter, added);
iter             1524 block/blk-core.c 	struct req_iterator iter;
iter             1527 block/blk-core.c 	rq_for_each_segment(bvec, rq, iter)
iter               32 block/blk-integrity.c 	struct bvec_iter iter;
iter               35 block/blk-integrity.c 	bio_for_each_integrity_vec(iv, bio, iter) {
iter               74 block/blk-integrity.c 	struct bvec_iter iter;
iter               77 block/blk-integrity.c 	bio_for_each_integrity_vec(iv, bio, iter) {
iter              366 block/blk-integrity.c static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
iter               21 block/blk-map.c 	struct bvec_iter iter;
iter               27 block/blk-map.c 	bio_for_each_bvec(bv, *bio, iter)
iter               65 block/blk-map.c 		struct rq_map_data *map_data, struct iov_iter *iter,
iter               73 block/blk-map.c 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
iter               75 block/blk-map.c 		bio = bio_map_user_iov(q, iter, gfp_mask);
iter              122 block/blk-map.c 			const struct iov_iter *iter, gfp_t gfp_mask)
iter              130 block/blk-map.c 	if (!iter_is_iovec(iter))
iter              135 block/blk-map.c 	else if (iov_iter_alignment(iter) & align)
iter              138 block/blk-map.c 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
iter              140 block/blk-map.c 	i = *iter;
iter              246 block/blk-merge.c 	struct bvec_iter iter;
iter              251 block/blk-merge.c 	bio_for_each_bvec(bv, bio, iter) {
iter              360 block/blk-merge.c 	struct req_iterator iter;
iter              375 block/blk-merge.c 	rq_for_each_bvec(bv, rq, iter)
iter              467 block/blk-merge.c 	struct bvec_iter iter;
iter              472 block/blk-merge.c 		bio_for_each_bvec(bvec, bio, iter) {
iter              136 block/bounce.c 	struct bvec_iter iter;
iter              144 block/bounce.c 	bio_for_each_segment(tovec, to, iter) {
iter              220 block/bounce.c 	struct bvec_iter iter;
iter              265 block/bounce.c 		bio_for_each_segment(bv, bio_src, iter)
iter              292 block/bounce.c 	struct bvec_iter iter;
iter              298 block/bounce.c 	bio_for_each_segment(from, *bio_orig, iter) {
iter              947 block/genhd.c  	struct class_dev_iter iter;
iter              950 block/genhd.c  	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
iter              951 block/genhd.c  	while ((dev = class_dev_iter_next(&iter))) {
iter              991 block/genhd.c  	class_dev_iter_exit(&iter);
iter              999 block/genhd.c  	struct class_dev_iter *iter;
iter             1002 block/genhd.c  	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter             1003 block/genhd.c  	if (!iter)
iter             1006 block/genhd.c  	seqf->private = iter;
iter             1007 block/genhd.c  	class_dev_iter_init(iter, &block_class, NULL, &disk_type);
iter             1009 block/genhd.c  		dev = class_dev_iter_next(iter);
iter             1031 block/genhd.c  	struct class_dev_iter *iter = seqf->private;
iter             1034 block/genhd.c  	if (iter) {
iter             1035 block/genhd.c  		class_dev_iter_exit(iter);
iter             1036 block/genhd.c  		kfree(iter);
iter             1432 block/genhd.c  	struct class_dev_iter iter;
iter             1435 block/genhd.c  	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
iter             1436 block/genhd.c  	while ((dev = class_dev_iter_next(&iter))) {
iter             1459 block/genhd.c  	class_dev_iter_exit(&iter);
iter              850 block/sed-opal.c 	struct opal_resp_tok *iter;
iter              884 block/sed-opal.c 	iter = resp->toks;
iter              889 block/sed-opal.c 			token_length = response_parse_tiny(iter, pos);
iter              891 block/sed-opal.c 			token_length = response_parse_short(iter, pos);
iter              893 block/sed-opal.c 			token_length = response_parse_medium(iter, pos);
iter              895 block/sed-opal.c 			token_length = response_parse_long(iter, pos);
iter              897 block/sed-opal.c 			token_length = response_parse_token(iter, pos);
iter              904 block/sed-opal.c 		iter++;
iter             1071 block/sed-opal.c 	struct opal_suspend_data *iter;
iter             1073 block/sed-opal.c 	list_for_each_entry(iter, &dev->unlk_lst, node) {
iter             1074 block/sed-opal.c 		if (iter->lr == sus->lr) {
iter             1075 block/sed-opal.c 			list_del(&iter->node);
iter             1076 block/sed-opal.c 			kfree(iter);
iter               29 block/t10-pi.c static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
iter               34 block/t10-pi.c 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
iter               35 block/t10-pi.c 		struct t10_pi_tuple *pi = iter->prot_buf;
iter               37 block/t10-pi.c 		pi->guard_tag = fn(iter->data_buf, iter->interval);
iter               41 block/t10-pi.c 			pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
iter               45 block/t10-pi.c 		iter->data_buf += iter->interval;
iter               46 block/t10-pi.c 		iter->prot_buf += sizeof(struct t10_pi_tuple);
iter               47 block/t10-pi.c 		iter->seed++;
iter               53 block/t10-pi.c static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
iter               60 block/t10-pi.c 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
iter               61 block/t10-pi.c 		struct t10_pi_tuple *pi = iter->prot_buf;
iter               70 block/t10-pi.c 			    lower_32_bits(iter->seed)) {
iter               72 block/t10-pi.c 				       "(rcvd %u)\n", iter->disk_name,
iter               74 block/t10-pi.c 				       iter->seed, be32_to_cpu(pi->ref_tag));
iter               83 block/t10-pi.c 		csum = fn(iter->data_buf, iter->interval);
iter               87 block/t10-pi.c 			       "(rcvd %04x, want %04x)\n", iter->disk_name,
iter               88 block/t10-pi.c 			       (unsigned long long)iter->seed,
iter               94 block/t10-pi.c 		iter->data_buf += iter->interval;
iter               95 block/t10-pi.c 		iter->prot_buf += sizeof(struct t10_pi_tuple);
iter               96 block/t10-pi.c 		iter->seed++;
iter              102 block/t10-pi.c static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
iter              104 block/t10-pi.c 	return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
iter              107 block/t10-pi.c static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
iter              109 block/t10-pi.c 	return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
iter              112 block/t10-pi.c static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
iter              114 block/t10-pi.c 	return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
iter              117 block/t10-pi.c static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
iter              119 block/t10-pi.c 	return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
iter              142 block/t10-pi.c 		struct bvec_iter iter;
iter              148 block/t10-pi.c 		bip_for_each_vec(iv, bip, iter) {
iter              194 block/t10-pi.c 		struct bvec_iter iter;
iter              196 block/t10-pi.c 		bip_for_each_vec(iv, bip, iter) {
iter              218 block/t10-pi.c static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
iter              220 block/t10-pi.c 	return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
iter              223 block/t10-pi.c static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
iter              225 block/t10-pi.c 	return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
iter              228 block/t10-pi.c static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
iter              230 block/t10-pi.c 	return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
iter              233 block/t10-pi.c static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
iter              235 block/t10-pi.c 	return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
iter              393 crypto/af_alg.c int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
iter              399 crypto/af_alg.c 	n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
iter              182 drivers/base/attribute_container.c #define klist_for_each_entry(pos, head, member, iter) \
iter              183 drivers/base/attribute_container.c 	for (klist_iter_init(head, iter); (pos = ({ \
iter              184 drivers/base/attribute_container.c 		struct klist_node *n = klist_next(iter); \
iter              186 drivers/base/attribute_container.c 			({ klist_iter_exit(iter) ; NULL; }); \
iter              216 drivers/base/attribute_container.c 		struct klist_iter iter;
iter              224 drivers/base/attribute_container.c 		klist_for_each_entry(ic, &cont->containers, node, &iter) {
iter              260 drivers/base/attribute_container.c 		struct klist_iter iter;
iter              270 drivers/base/attribute_container.c 		klist_for_each_entry(ic, &cont->containers, node, &iter) {
iter              428 drivers/base/attribute_container.c 	struct klist_iter iter;
iter              430 drivers/base/attribute_container.c 	klist_for_each_entry(ic, &cont->containers, node, &iter) {
iter              434 drivers/base/attribute_container.c 			klist_iter_exit(&iter);
iter              986 drivers/base/bus.c void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
iter              993 drivers/base/bus.c 	klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
iter              994 drivers/base/bus.c 	iter->type = type;
iter             1010 drivers/base/bus.c struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
iter             1016 drivers/base/bus.c 		knode = klist_next(&iter->ki);
iter             1020 drivers/base/bus.c 		if (!iter->type || iter->type == dev->type)
iter             1033 drivers/base/bus.c void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
iter             1035 drivers/base/bus.c 	klist_iter_exit(&iter->ki);
iter             1042 drivers/base/bus.c 	struct subsys_dev_iter iter;
iter             1055 drivers/base/bus.c 		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
iter             1056 drivers/base/bus.c 		while ((dev = subsys_dev_iter_next(&iter)))
iter             1058 drivers/base/bus.c 		subsys_dev_iter_exit(&iter);
iter             1069 drivers/base/bus.c 	struct subsys_dev_iter iter;
iter             1080 drivers/base/bus.c 		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
iter             1081 drivers/base/bus.c 		while ((dev = subsys_dev_iter_next(&iter)))
iter             1083 drivers/base/bus.c 		subsys_dev_iter_exit(&iter);
iter              280 drivers/base/class.c void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
iter              287 drivers/base/class.c 	klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
iter              288 drivers/base/class.c 	iter->type = type;
iter              304 drivers/base/class.c struct device *class_dev_iter_next(struct class_dev_iter *iter)
iter              310 drivers/base/class.c 		knode = klist_next(&iter->ki);
iter              314 drivers/base/class.c 		if (!iter->type || iter->type == dev->type)
iter              327 drivers/base/class.c void class_dev_iter_exit(struct class_dev_iter *iter)
iter              329 drivers/base/class.c 	klist_iter_exit(&iter->ki);
iter              354 drivers/base/class.c 	struct class_dev_iter iter;
iter              366 drivers/base/class.c 	class_dev_iter_init(&iter, class, start, NULL);
iter              367 drivers/base/class.c 	while ((dev = class_dev_iter_next(&iter))) {
iter              372 drivers/base/class.c 	class_dev_iter_exit(&iter);
iter              402 drivers/base/class.c 	struct class_dev_iter iter;
iter              413 drivers/base/class.c 	class_dev_iter_init(&iter, class, start, NULL);
iter              414 drivers/base/class.c 	while ((dev = class_dev_iter_next(&iter))) {
iter              420 drivers/base/class.c 	class_dev_iter_exit(&iter);
iter              429 drivers/base/class.c 	struct class_dev_iter iter;
iter              442 drivers/base/class.c 		class_dev_iter_init(&iter, parent, NULL, NULL);
iter              443 drivers/base/class.c 		while ((dev = class_dev_iter_next(&iter)))
iter              445 drivers/base/class.c 		class_dev_iter_exit(&iter);
iter              455 drivers/base/class.c 	struct class_dev_iter iter;
iter              464 drivers/base/class.c 		class_dev_iter_init(&iter, parent, NULL, NULL);
iter              465 drivers/base/class.c 		while ((dev = class_dev_iter_next(&iter)))
iter              467 drivers/base/class.c 		class_dev_iter_exit(&iter);
iter              110 drivers/block/aoe/aoe.h 	struct bvec_iter iter;
iter              128 drivers/block/aoe/aoe.h 	struct bvec_iter iter;
iter              199 drivers/block/aoe/aoecmd.c 	memset(&f->iter, 0, sizeof(f->iter));
iter              297 drivers/block/aoe/aoecmd.c skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
iter              302 drivers/block/aoe/aoecmd.c 	__bio_for_each_segment(bv, bio, iter, iter)
iter              343 drivers/block/aoe/aoecmd.c 	ah->scnt = f->iter.bi_size >> 9;
iter              344 drivers/block/aoe/aoecmd.c 	put_lba(ah, f->iter.bi_sector);
iter              353 drivers/block/aoe/aoecmd.c 		skb_fillup(skb, f->buf->bio, f->iter);
iter              355 drivers/block/aoe/aoecmd.c 		skb->len += f->iter.bi_size;
iter              356 drivers/block/aoe/aoecmd.c 		skb->data_len = f->iter.bi_size;
iter              357 drivers/block/aoe/aoecmd.c 		skb->truesize += f->iter.bi_size;
iter              385 drivers/block/aoe/aoecmd.c 	f->iter = buf->iter;
iter              386 drivers/block/aoe/aoecmd.c 	f->iter.bi_size = min_t(unsigned long,
iter              388 drivers/block/aoe/aoecmd.c 				f->iter.bi_size);
iter              389 drivers/block/aoe/aoecmd.c 	bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
iter              391 drivers/block/aoe/aoecmd.c 	if (!buf->iter.bi_size)
iter              570 drivers/block/aoe/aoecmd.c 	nf->iter = f->iter;
iter              601 drivers/block/aoe/aoecmd.c 	f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
iter              604 drivers/block/aoe/aoecmd.c 	for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
iter              611 drivers/block/aoe/aoecmd.c 	skb->len += f->iter.bi_size;
iter              612 drivers/block/aoe/aoecmd.c 	skb->data_len = f->iter.bi_size;
iter              613 drivers/block/aoe/aoecmd.c 	skb->truesize += f->iter.bi_size;
iter              831 drivers/block/aoe/aoecmd.c 	buf->iter = bio->bi_iter;
iter             1023 drivers/block/aoe/aoecmd.c bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
iter             1028 drivers/block/aoe/aoecmd.c 	iter.bi_size = cnt;
iter             1030 drivers/block/aoe/aoecmd.c 	__bio_for_each_segment(bv, bio, iter, iter) {
iter             1129 drivers/block/aoe/aoecmd.c 		if (n > f->iter.bi_size) {
iter             1133 drivers/block/aoe/aoecmd.c 				n, f->iter.bi_size);
iter             1137 drivers/block/aoe/aoecmd.c 		bvcpy(skb, f->buf->bio, f->iter, n);
iter             1181 drivers/block/aoe/aoecmd.c 	if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
iter             1634 drivers/block/aoe/aoecmd.c 	buf->iter.bi_size = 0;
iter              290 drivers/block/brd.c 	struct bvec_iter iter;
iter              296 drivers/block/brd.c 	bio_for_each_segment(bvec, bio, iter) {
iter             1591 drivers/block/drbd/drbd_main.c 	struct bvec_iter iter;
iter             1594 drivers/block/drbd/drbd_main.c 	bio_for_each_segment(bvec, bio, iter) {
iter             1599 drivers/block/drbd/drbd_main.c 					 bio_iter_last(bvec, iter)
iter             1613 drivers/block/drbd/drbd_main.c 	struct bvec_iter iter;
iter             1616 drivers/block/drbd/drbd_main.c 	bio_for_each_segment(bvec, bio, iter) {
iter             1621 drivers/block/drbd/drbd_main.c 				      bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
iter             2012 drivers/block/drbd/drbd_receiver.c 	struct bvec_iter iter;
iter             2034 drivers/block/drbd/drbd_receiver.c 	bio_for_each_segment(bvec, bio, iter) {
iter              319 drivers/block/drbd/drbd_worker.c 	struct bvec_iter iter;
iter              325 drivers/block/drbd/drbd_worker.c 	bio_for_each_segment(bvec, bio, iter) {
iter             2375 drivers/block/floppy.c 	struct req_iterator iter;
iter             2381 drivers/block/floppy.c 	rq_for_each_segment(bv, current_req, iter) {
iter             2415 drivers/block/floppy.c 	struct req_iterator iter;
iter             2445 drivers/block/floppy.c 	rq_for_each_segment(bv, current_req, iter) {
iter              293 drivers/block/loop.c 	struct req_iterator iter;
iter              296 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
iter              315 drivers/block/loop.c 	struct req_iterator iter;
iter              323 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
iter              345 drivers/block/loop.c 	struct req_iterator iter;
iter              349 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
iter              374 drivers/block/loop.c 	struct req_iterator iter;
iter              384 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
iter              516 drivers/block/loop.c 	struct iov_iter iter;
iter              561 drivers/block/loop.c 	iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter              562 drivers/block/loop.c 	iter.iov_offset = offset;
iter              573 drivers/block/loop.c 		ret = call_write_iter(file, &cmd->iocb, &iter);
iter              575 drivers/block/loop.c 		ret = call_read_iter(file, &cmd->iocb, &iter);
iter              460 drivers/block/nbd.c 		     struct iov_iter *iter, int msg_flags, int *sent)
iter              475 drivers/block/nbd.c 	msg.msg_iter = *iter;
iter              608 drivers/block/nbd.c 		struct bvec_iter iter;
iter              611 drivers/block/nbd.c 		bio_for_each_segment(bvec, bio, iter) {
iter              612 drivers/block/nbd.c 			bool is_last = !next && bio_iter_last(bvec, iter);
iter              734 drivers/block/nbd.c 		struct req_iterator iter;
iter              737 drivers/block/nbd.c 		rq_for_each_segment(bvec, req, iter) {
iter             1060 drivers/block/null_blk_main.c 	struct req_iterator iter;
iter             1071 drivers/block/null_blk_main.c 	rq_for_each_segment(bvec, rq, iter) {
iter             1095 drivers/block/null_blk_main.c 	struct bvec_iter iter;
iter             1106 drivers/block/null_blk_main.c 	bio_for_each_segment(bvec, bio, iter) {
iter               85 drivers/block/ps3disk.c 	struct req_iterator iter;
iter               91 drivers/block/ps3disk.c 	rq_for_each_segment(bvec, req, iter) {
iter               94 drivers/block/ps3disk.c 			__func__, __LINE__, i, bio_sectors(iter.bio),
iter               95 drivers/block/ps3disk.c 			iter.bio->bi_iter.bi_sector);
iter              122 drivers/block/ps3disk.c 	struct req_iterator iter;
iter              124 drivers/block/ps3disk.c 	rq_for_each_segment(bv, req, iter)
iter              546 drivers/block/ps3vram.c 	struct bvec_iter iter;
iter              549 drivers/block/ps3vram.c 	bio_for_each_segment(bvec, bio, iter) {
iter             2304 drivers/block/rbd.c 		rbd_assert(obj_req->bvec_pos.iter.bi_size ==
iter             2603 drivers/block/rbd.c 	union rbd_img_fill_iter	iter;
iter             2648 drivers/block/rbd.c 	fctx->iter = *fctx->pos;
iter             2655 drivers/block/rbd.c 					   fctx->set_pos_fn, &fctx->iter);
iter             2700 drivers/block/rbd.c 	fctx->iter = *fctx->pos;
iter             2707 drivers/block/rbd.c 					   fctx->count_fn, &fctx->iter);
iter             2724 drivers/block/rbd.c 	fctx->iter = *fctx->pos;
iter             2730 drivers/block/rbd.c 					   fctx->copy_fn, &fctx->iter);
iter             2784 drivers/block/rbd.c 		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
iter             2809 drivers/block/rbd.c 	struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
iter             2844 drivers/block/rbd.c 		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
iter             2872 drivers/block/rbd.c 		.iter = { .bi_size = ceph_file_extents_bytes(img_extents,
iter             3135 drivers/block/rbd.c 		.iter = { .bi_size = bytes },
iter              607 drivers/block/rsxx/core.c 	int iter = 0;
iter              610 drivers/block/rsxx/core.c 	while (iter++ < 10) {
iter              674 drivers/block/rsxx/dma.c 	struct bvec_iter iter;
iter              712 drivers/block/rsxx/dma.c 		bio_for_each_segment(bvec, bio, iter) {
iter              117 drivers/block/umem.c 		struct bvec_iter	iter;
iter              374 drivers/block/umem.c 		p->iter = card->current_iter;
iter              439 drivers/block/umem.c 		vec = bio_iter_iovec(bio, page->iter);
iter              440 drivers/block/umem.c 		bio_advance_iter(bio, &page->iter, vec.bv_len);
iter              442 drivers/block/umem.c 		if (!page->iter.bi_size) {
iter              445 drivers/block/umem.c 				page->iter = page->bio->bi_iter;
iter             1546 drivers/block/zram/zram_drv.c 	struct bvec_iter iter;
iter             1562 drivers/block/zram/zram_drv.c 	bio_for_each_segment(bvec, bio, iter) {
iter              705 drivers/char/mem.c static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
iter              709 drivers/char/mem.c 	while (iov_iter_count(iter)) {
iter              710 drivers/char/mem.c 		size_t chunk = iov_iter_count(iter), n;
iter              714 drivers/char/mem.c 		n = iov_iter_zero(chunk, iter);
iter              715 drivers/char/mem.c 		if (!n && iov_iter_count(iter))
iter              538 drivers/crypto/marvell/cesa.h 	struct sg_mapping_iter iter;
iter              777 drivers/crypto/marvell/cesa.h static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
iter              780 drivers/crypto/marvell/cesa.h 	iter->len = len;
iter              781 drivers/crypto/marvell/cesa.h 	iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE);
iter              782 drivers/crypto/marvell/cesa.h 	iter->offset = 0;
iter              785 drivers/crypto/marvell/cesa.h static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter,
iter              789 drivers/crypto/marvell/cesa.h 	iter->op_offset = 0;
iter              790 drivers/crypto/marvell/cesa.h 	iter->offset = 0;
iter              791 drivers/crypto/marvell/cesa.h 	iter->sg = sg;
iter              792 drivers/crypto/marvell/cesa.h 	iter->dir = dir;
iter              796 drivers/crypto/marvell/cesa.h mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter,
iter              799 drivers/crypto/marvell/cesa.h 	return min(iter->op_len - sgiter->op_offset,
iter              807 drivers/crypto/marvell/cesa.h static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
iter              809 drivers/crypto/marvell/cesa.h 	iter->offset += iter->op_len;
iter              810 drivers/crypto/marvell/cesa.h 	iter->op_len = min(iter->len - iter->offset,
iter              813 drivers/crypto/marvell/cesa.h 	return iter->op_len;
iter               39 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
iter               42 drivers/crypto/marvell/cipher.c 	mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
iter               43 drivers/crypto/marvell/cipher.c 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
iter               44 drivers/crypto/marvell/cipher.c 	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
iter               48 drivers/crypto/marvell/cipher.c mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
iter               50 drivers/crypto/marvell/cipher.c 	iter->src.op_offset = 0;
iter               51 drivers/crypto/marvell/cipher.c 	iter->dst.op_offset = 0;
iter               53 drivers/crypto/marvell/cipher.c 	return mv_cesa_req_dma_iter_next_op(&iter->base);
iter              309 drivers/crypto/marvell/cipher.c 	struct mv_cesa_skcipher_dma_iter iter;
iter              336 drivers/crypto/marvell/cipher.c 	mv_cesa_skcipher_req_iter_init(&iter, req);
iter              348 drivers/crypto/marvell/cipher.c 		mv_cesa_set_crypt_op_len(op, iter.base.op_len);
iter              351 drivers/crypto/marvell/cipher.c 		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
iter              352 drivers/crypto/marvell/cipher.c 						   &iter.src, flags);
iter              362 drivers/crypto/marvell/cipher.c 		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
iter              363 drivers/crypto/marvell/cipher.c 						   &iter.dst, flags);
iter              367 drivers/crypto/marvell/cipher.c 	} while (mv_cesa_skcipher_req_iter_next_op(&iter));
iter               24 drivers/crypto/marvell/hash.c mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
iter               33 drivers/crypto/marvell/hash.c 	mv_cesa_req_dma_iter_init(&iter->base, len);
iter               34 drivers/crypto/marvell/hash.c 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
iter               35 drivers/crypto/marvell/hash.c 	iter->src.op_offset = creq->cache_ptr;
iter               39 drivers/crypto/marvell/hash.c mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
iter               41 drivers/crypto/marvell/hash.c 	iter->src.op_offset = 0;
iter               43 drivers/crypto/marvell/hash.c 	return mv_cesa_req_dma_iter_next_op(&iter->base);
iter              600 drivers/crypto/marvell/hash.c 	struct mv_cesa_ahash_dma_iter iter;
iter              623 drivers/crypto/marvell/hash.c 	mv_cesa_ahash_req_iter_init(&iter, req);
iter              633 drivers/crypto/marvell/hash.c 	if (iter.src.sg) {
iter              641 drivers/crypto/marvell/hash.c 							   &iter.base,
iter              642 drivers/crypto/marvell/hash.c 							   &iter.src, flags);
iter              646 drivers/crypto/marvell/hash.c 			frag_len = iter.base.op_len;
iter              648 drivers/crypto/marvell/hash.c 			if (!mv_cesa_ahash_req_iter_next_op(&iter))
iter              660 drivers/crypto/marvell/hash.c 		frag_len = iter.base.op_len;
iter              669 drivers/crypto/marvell/hash.c 		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
iter              697 drivers/crypto/marvell/hash.c 				  iter.base.len;
iter               15 drivers/crypto/marvell/tdma.c bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
iter               31 drivers/crypto/marvell/tdma.c 	if (sgiter->op_offset == iter->op_len)
iter              466 drivers/crypto/talitos.c 	int tail, iter;
iter              479 drivers/crypto/talitos.c 	iter = tail;
iter              480 drivers/crypto/talitos.c 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
iter              481 drivers/crypto/talitos.c 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
iter              482 drivers/crypto/talitos.c 		iter = (iter + 1) & (priv->fifo_len - 1);
iter              483 drivers/crypto/talitos.c 		if (iter == tail) {
iter              489 drivers/crypto/talitos.c 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
iter              492 drivers/crypto/talitos.c 		edesc = container_of(priv->chan[ch].fifo[iter].desc,
iter              498 drivers/crypto/talitos.c 	return priv->chan[ch].fifo[iter].desc->hdr;
iter              452 drivers/dma/fsl-edma-common.c 	u16 soff, doff, iter;
iter              480 drivers/dma/fsl-edma-common.c 	iter = period_len / nbytes;
iter              502 drivers/dma/fsl-edma-common.c 				  fsl_chan->attr, soff, nbytes, 0, iter,
iter              503 drivers/dma/fsl-edma-common.c 				  iter, doff, last_sg, true, false, true);
iter              520 drivers/dma/fsl-edma-common.c 	u16 soff, doff, iter;
iter              563 drivers/dma/fsl-edma-common.c 		iter = sg_dma_len(sg) / nbytes;
iter              568 drivers/dma/fsl-edma-common.c 					  nbytes, 0, iter, iter, doff, last_sg,
iter              574 drivers/dma/fsl-edma-common.c 					  nbytes, 0, iter, iter, doff, last_sg,
iter              106 drivers/dma/iop-adma.c 	struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
iter              116 drivers/dma/iop-adma.c 	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
iter              120 drivers/dma/iop-adma.c 			iter->async_tx.cookie, iter->idx, busy,
iter              121 drivers/dma/iop-adma.c 			&iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
iter              122 drivers/dma/iop-adma.c 			async_tx_test_ack(&iter->async_tx));
iter              137 drivers/dma/iop-adma.c 		if (iter->async_tx.phys == current_desc) {
iter              139 drivers/dma/iop-adma.c 			if (busy || iop_desc_get_next_desc(iter))
iter              145 drivers/dma/iop-adma.c 			slot_cnt = iter->slot_cnt;
iter              146 drivers/dma/iop-adma.c 			slots_per_op = iter->slots_per_op;
iter              156 drivers/dma/iop-adma.c 				grp_start = iter;
iter              216 drivers/dma/iop-adma.c 		if (iter->xor_check_result && iter->async_tx.cookie)
iter              217 drivers/dma/iop-adma.c 			*iter->xor_check_result =
iter              218 drivers/dma/iop-adma.c 				iop_desc_get_zero_result(iter);
iter              221 drivers/dma/iop-adma.c 					iter, iop_chan, cookie);
iter              223 drivers/dma/iop-adma.c 		if (iop_adma_clean_slot(iter, iop_chan))
iter              259 drivers/dma/iop-adma.c 	struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
iter              270 drivers/dma/iop-adma.c 		iter = iop_chan->last_used;
iter              272 drivers/dma/iop-adma.c 		iter = list_entry(&iop_chan->all_slots,
iter              277 drivers/dma/iop-adma.c 		iter, _iter, &iop_chan->all_slots, slot_node) {
iter              280 drivers/dma/iop-adma.c 		if (iter->slots_per_op) {
iter              293 drivers/dma/iop-adma.c 			if (iop_desc_is_aligned(iter, slots_per_op))
iter              294 drivers/dma/iop-adma.c 				alloc_start = iter;
iter              304 drivers/dma/iop-adma.c 			iter = alloc_start;
iter              310 drivers/dma/iop-adma.c 					iter->idx, iter->hw_desc,
iter              311 drivers/dma/iop-adma.c 					(u64)iter->async_tx.phys, slots_per_op);
iter              315 drivers/dma/iop-adma.c 					async_tx_ack(&iter->async_tx);
iter              317 drivers/dma/iop-adma.c 				list_add_tail(&iter->chain_node, &chain);
iter              318 drivers/dma/iop-adma.c 				alloc_tail = iter;
iter              319 drivers/dma/iop-adma.c 				iter->async_tx.cookie = 0;
iter              320 drivers/dma/iop-adma.c 				iter->slot_cnt = num_slots;
iter              321 drivers/dma/iop-adma.c 				iter->xor_check_result = NULL;
iter              323 drivers/dma/iop-adma.c 					iter->slots_per_op = slots_per_op - i;
iter              324 drivers/dma/iop-adma.c 					last_used = iter;
iter              325 drivers/dma/iop-adma.c 					iter = list_entry(iter->slot_node.next,
iter              717 drivers/dma/iop-adma.c 	struct iop_adma_desc_slot *iter, *_iter;
iter              723 drivers/dma/iop-adma.c 	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
iter              726 drivers/dma/iop-adma.c 		list_del(&iter->chain_node);
iter              729 drivers/dma/iop-adma.c 		iter, _iter, &iop_chan->all_slots, slot_node) {
iter              730 drivers/dma/iop-adma.c 		list_del(&iter->slot_node);
iter              731 drivers/dma/iop-adma.c 		kfree(iter);
iter              558 drivers/dma/iop-adma.h 	struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
iter              569 drivers/dma/iop-adma.h 		iter = iop_hw_desc_slot_idx(hw_desc, i);
iter              570 drivers/dma/iop-adma.h 		u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
iter              574 drivers/dma/iop-adma.h 		iter->desc_ctrl = u_desc_ctrl.value;
iter              677 drivers/dma/iop-adma.h 	struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
iter              684 drivers/dma/iop-adma.h 			iter = iop_hw_desc_slot_idx(hw_desc, i);
iter              685 drivers/dma/iop-adma.h 			iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
iter              690 drivers/dma/iop-adma.h 		iter = iop_hw_desc_slot_idx(hw_desc, i);
iter              691 drivers/dma/iop-adma.h 		iter->byte_count = len;
iter              726 drivers/dma/iop-adma.h 	struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
iter              732 drivers/dma/iop-adma.h 		iter = iop_hw_desc_slot_idx(hw_desc, i);
iter              733 drivers/dma/iop-adma.h 		iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
iter              741 drivers/dma/iop-adma.h 	struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
iter              747 drivers/dma/iop-adma.h 		iter = iop_hw_desc_slot_idx(hw_desc, i);
iter              748 drivers/dma/iop-adma.h 		iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
iter              697 drivers/dma/mpc512x_dma.c 	int iter, i;
iter              778 drivers/dma/mpc512x_dma.c 			iter = len / tcd->nbytes;
iter              779 drivers/dma/mpc512x_dma.c 			if (iter >= 1 << 15) {
iter              784 drivers/dma/mpc512x_dma.c 			tcd->biter = iter & 0x1ff;
iter              785 drivers/dma/mpc512x_dma.c 			tcd->biter_linkch = iter >> 9;
iter              217 drivers/dma/mv_xor.c 	struct mv_xor_desc_slot *iter, *_iter;
iter              220 drivers/dma/mv_xor.c 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
iter              223 drivers/dma/mv_xor.c 		if (async_tx_test_ack(&iter->async_tx)) {
iter              224 drivers/dma/mv_xor.c 			list_move_tail(&iter->node, &mv_chan->free_slots);
iter              225 drivers/dma/mv_xor.c 			if (!list_empty(&iter->sg_tx_list)) {
iter              226 drivers/dma/mv_xor.c 				list_splice_tail_init(&iter->sg_tx_list,
iter              265 drivers/dma/mv_xor.c 	struct mv_xor_desc_slot *iter, *_iter;
iter              280 drivers/dma/mv_xor.c 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
iter              284 drivers/dma/mv_xor.c 		hw_desc = iter->hw_desc;
iter              286 drivers/dma/mv_xor.c 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
iter              290 drivers/dma/mv_xor.c 			mv_desc_clean_slot(iter, mv_chan);
iter              293 drivers/dma/mv_xor.c 			if (iter->async_tx.phys == current_desc) {
iter              298 drivers/dma/mv_xor.c 			if (iter->async_tx.phys == current_desc) {
iter              311 drivers/dma/mv_xor.c 			iter = list_entry(mv_chan->chain.next,
iter              314 drivers/dma/mv_xor.c 			mv_chan_start_new_chain(mv_chan, iter);
iter              316 drivers/dma/mv_xor.c 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
iter              321 drivers/dma/mv_xor.c 				iter = list_entry(iter->node.next,
iter              324 drivers/dma/mv_xor.c 				mv_chan_start_new_chain(mv_chan, iter);
iter              351 drivers/dma/mv_xor.c 	struct mv_xor_desc_slot *iter;
iter              356 drivers/dma/mv_xor.c 		iter = list_first_entry(&mv_chan->free_slots,
iter              360 drivers/dma/mv_xor.c 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
iter              365 drivers/dma/mv_xor.c 		async_tx_ack(&iter->async_tx);
iter              366 drivers/dma/mv_xor.c 		iter->async_tx.cookie = -EBUSY;
iter              368 drivers/dma/mv_xor.c 		return iter;
iter              630 drivers/dma/mv_xor.c 	struct mv_xor_desc_slot *iter, *_iter;
iter              637 drivers/dma/mv_xor.c 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
iter              640 drivers/dma/mv_xor.c 		list_move_tail(&iter->node, &mv_chan->free_slots);
iter              642 drivers/dma/mv_xor.c 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
iter              645 drivers/dma/mv_xor.c 		list_move_tail(&iter->node, &mv_chan->free_slots);
iter              647 drivers/dma/mv_xor.c 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
iter              650 drivers/dma/mv_xor.c 		list_move_tail(&iter->node, &mv_chan->free_slots);
iter              653 drivers/dma/mv_xor.c 		iter, _iter, &mv_chan->free_slots, node) {
iter              654 drivers/dma/mv_xor.c 		list_del(&iter->node);
iter              655 drivers/dma/mv_xor.c 		kfree(iter);
iter              174 drivers/dma/ppc4xx/adma.c 			  struct ppc440spe_adma_desc_slot *iter)
iter              176 drivers/dma/ppc4xx/adma.c 	for (; iter; iter = iter->hw_next)
iter              177 drivers/dma/ppc4xx/adma.c 		print_cb(chan, iter->hw_desc);
iter              322 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter;
iter              336 drivers/dma/ppc4xx/adma.c 	list_for_each_entry(iter, &desc->group_list, chain_node) {
iter              337 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter              338 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter              340 drivers/dma/ppc4xx/adma.c 		if (likely(!list_is_last(&iter->chain_node,
iter              343 drivers/dma/ppc4xx/adma.c 			iter->hw_next = list_entry(iter->chain_node.next,
iter              345 drivers/dma/ppc4xx/adma.c 			clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter              352 drivers/dma/ppc4xx/adma.c 			iter->hw_next = NULL;
iter              354 drivers/dma/ppc4xx/adma.c 				set_bit(PPC440SPE_DESC_INT, &iter->flags);
iter              356 drivers/dma/ppc4xx/adma.c 				clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter              367 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&desc->group_list,
iter              372 drivers/dma/ppc4xx/adma.c 			hw_desc = iter->hw_desc;
iter              374 drivers/dma/ppc4xx/adma.c 			iter = list_first_entry(&iter->chain_node,
iter              380 drivers/dma/ppc4xx/adma.c 			hw_desc = iter->hw_desc;
iter              382 drivers/dma/ppc4xx/adma.c 			iter = list_first_entry(&iter->chain_node,
iter              387 drivers/dma/ppc4xx/adma.c 		list_for_each_entry_from(iter, &desc->group_list, chain_node) {
iter              388 drivers/dma/ppc4xx/adma.c 			hw_desc = iter->hw_desc;
iter              398 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&desc->group_list,
iter              401 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter              405 drivers/dma/ppc4xx/adma.c 			iter = list_first_entry(&iter->chain_node,
iter              408 drivers/dma/ppc4xx/adma.c 			hw_desc = iter->hw_desc;
iter              414 drivers/dma/ppc4xx/adma.c 			iter = list_first_entry(&iter->chain_node,
iter              417 drivers/dma/ppc4xx/adma.c 			list_for_each_entry_from(iter, &desc->group_list,
iter              419 drivers/dma/ppc4xx/adma.c 				hw_desc = iter->hw_desc;
iter              435 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter;
iter              445 drivers/dma/ppc4xx/adma.c 	iter = list_first_entry(&desc->group_list,
iter              447 drivers/dma/ppc4xx/adma.c 	iter = list_entry(iter->chain_node.next,
iter              451 drivers/dma/ppc4xx/adma.c 		iter = list_entry(iter->chain_node.next,
iter              455 drivers/dma/ppc4xx/adma.c 	list_for_each_entry_from(iter, &desc->group_list, chain_node) {
iter              456 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter              457 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter              458 drivers/dma/ppc4xx/adma.c 		iter->src_cnt = 0;
iter              459 drivers/dma/ppc4xx/adma.c 		iter->dst_cnt = 0;
iter              475 drivers/dma/ppc4xx/adma.c 		if (likely(!list_is_last(&iter->chain_node,
iter              478 drivers/dma/ppc4xx/adma.c 			iter->hw_next = list_entry(iter->chain_node.next,
iter              487 drivers/dma/ppc4xx/adma.c 			iter->hw_next = NULL;
iter              491 drivers/dma/ppc4xx/adma.c 			set_bit(PPC440SPE_DESC_INT, &iter->flags);
iter              940 drivers/dma/ppc4xx/adma.c 				struct ppc440spe_adma_desc_slot *iter;
iter              947 drivers/dma/ppc4xx/adma.c 				list_for_each_entry(iter, &chan->chain,
iter              949 drivers/dma/ppc4xx/adma.c 					if (iter->phys == phys)
iter              956 drivers/dma/ppc4xx/adma.c 				BUG_ON(&iter->chain_node == &chan->chain);
iter              958 drivers/dma/ppc4xx/adma.c 				if (iter->xor_check_result) {
iter              960 drivers/dma/ppc4xx/adma.c 						     &iter->flags)) {
iter              961 drivers/dma/ppc4xx/adma.c 						*iter->xor_check_result |=
iter              965 drivers/dma/ppc4xx/adma.c 						     &iter->flags)) {
iter              966 drivers/dma/ppc4xx/adma.c 						*iter->xor_check_result |=
iter             1099 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter;
iter             1112 drivers/dma/ppc4xx/adma.c 			iter = chan_last_sub[chan->device->id];
iter             1113 drivers/dma/ppc4xx/adma.c 			BUG_ON(!iter);
iter             1116 drivers/dma/ppc4xx/adma.c 			iter = chan_first_cdb[chan->device->id];
iter             1117 drivers/dma/ppc4xx/adma.c 			BUG_ON(!iter);
iter             1118 drivers/dma/ppc4xx/adma.c 			ppc440spe_dma_put_desc(chan, iter);
iter             1123 drivers/dma/ppc4xx/adma.c 		if (!iter->hw_next)
iter             1127 drivers/dma/ppc4xx/adma.c 		list_for_each_entry_continue(iter, &chan->chain, chain_node) {
iter             1128 drivers/dma/ppc4xx/adma.c 			ppc440spe_dma_put_desc(chan, iter);
iter             1129 drivers/dma/ppc4xx/adma.c 			if (!iter->hw_next)
iter             1425 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
iter             1434 drivers/dma/ppc4xx/adma.c 	list_for_each_entry(iter, &tdesc->group_list, chain_node) {
iter             1438 drivers/dma/ppc4xx/adma.c 	return iter;
iter             1537 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
iter             1556 drivers/dma/ppc4xx/adma.c 	list_for_each_entry_safe(iter, _iter, &chan->chain,
iter             1561 drivers/dma/ppc4xx/adma.c 		    iter->async_tx.cookie, iter->idx, busy, iter->phys,
iter             1562 drivers/dma/ppc4xx/adma.c 		    ppc440spe_desc_get_link(iter, chan), current_desc,
iter             1563 drivers/dma/ppc4xx/adma.c 		    async_tx_test_ack(&iter->async_tx));
iter             1578 drivers/dma/ppc4xx/adma.c 		if (iter->phys == current_desc) {
iter             1580 drivers/dma/ppc4xx/adma.c 			if (busy || ppc440spe_desc_get_link(iter, chan)) {
iter             1590 drivers/dma/ppc4xx/adma.c 			slot_cnt = iter->slot_cnt;
iter             1591 drivers/dma/ppc4xx/adma.c 			slots_per_op = iter->slots_per_op;
iter             1600 drivers/dma/ppc4xx/adma.c 				group_start = iter;
iter             1644 drivers/dma/ppc4xx/adma.c 		cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
iter             1647 drivers/dma/ppc4xx/adma.c 		if (ppc440spe_adma_clean_slot(iter, chan))
iter             1689 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
iter             1703 drivers/dma/ppc4xx/adma.c 		iter = chan->last_used;
iter             1705 drivers/dma/ppc4xx/adma.c 		iter = list_entry(&chan->all_slots,
iter             1708 drivers/dma/ppc4xx/adma.c 	list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
iter             1712 drivers/dma/ppc4xx/adma.c 		if (iter->slots_per_op) {
iter             1719 drivers/dma/ppc4xx/adma.c 			alloc_start = iter;
iter             1725 drivers/dma/ppc4xx/adma.c 			iter = alloc_start;
iter             1730 drivers/dma/ppc4xx/adma.c 					async_tx_ack(&iter->async_tx);
iter             1732 drivers/dma/ppc4xx/adma.c 				list_add_tail(&iter->chain_node, &chain);
iter             1733 drivers/dma/ppc4xx/adma.c 				alloc_tail = iter;
iter             1734 drivers/dma/ppc4xx/adma.c 				iter->async_tx.cookie = 0;
iter             1735 drivers/dma/ppc4xx/adma.c 				iter->hw_next = NULL;
iter             1736 drivers/dma/ppc4xx/adma.c 				iter->flags = 0;
iter             1737 drivers/dma/ppc4xx/adma.c 				iter->slot_cnt = num_slots;
iter             1738 drivers/dma/ppc4xx/adma.c 				iter->xor_check_result = NULL;
iter             1740 drivers/dma/ppc4xx/adma.c 					iter->slots_per_op = slots_per_op - i;
iter             1741 drivers/dma/ppc4xx/adma.c 					last_used = iter;
iter             1742 drivers/dma/ppc4xx/adma.c 					iter = list_entry(iter->slot_node.next,
iter             2103 drivers/dma/ppc4xx/adma.c 		struct ppc440spe_adma_desc_slot *iter;
iter             2113 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&sw_desc->group_list,
iter             2116 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2118 drivers/dma/ppc4xx/adma.c 		iter->hw_next = list_entry(iter->chain_node.next,
iter             2121 drivers/dma/ppc4xx/adma.c 		clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2122 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter             2125 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan,
iter             2127 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
iter             2128 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
iter             2130 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
iter             2131 drivers/dma/ppc4xx/adma.c 		iter->unmap_len = len;
iter             2137 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&iter->chain_node,
iter             2140 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2141 drivers/dma/ppc4xx/adma.c 		iter->hw_next = NULL;
iter             2143 drivers/dma/ppc4xx/adma.c 			set_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2145 drivers/dma/ppc4xx/adma.c 			clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2147 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter             2149 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_addr(iter, chan, 0,
iter             2151 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan,
iter             2154 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
iter             2156 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
iter             2157 drivers/dma/ppc4xx/adma.c 		iter->unmap_len = len;
iter             2189 drivers/dma/ppc4xx/adma.c 		struct ppc440spe_adma_desc_slot *iter;
iter             2197 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&sw_desc->group_list,
iter             2200 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2201 drivers/dma/ppc4xx/adma.c 		iter->hw_next = list_entry(iter->chain_node.next,
iter             2204 drivers/dma/ppc4xx/adma.c 		clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2205 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter             2208 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
iter             2210 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan, 0,
iter             2212 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
iter             2214 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
iter             2215 drivers/dma/ppc4xx/adma.c 		iter->unmap_len = len;
iter             2219 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&iter->chain_node,
iter             2222 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2224 drivers/dma/ppc4xx/adma.c 		iter->hw_next = list_entry(iter->chain_node.next,
iter             2228 drivers/dma/ppc4xx/adma.c 			set_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2230 drivers/dma/ppc4xx/adma.c 			clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2232 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter             2234 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
iter             2236 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
iter             2238 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_mult(iter, chan,	DMA_CUED_MULT1_OFF,
iter             2240 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
iter             2241 drivers/dma/ppc4xx/adma.c 		iter->unmap_len = len;
iter             2247 drivers/dma/ppc4xx/adma.c 		iter = list_first_entry(&iter->chain_node,
iter             2250 drivers/dma/ppc4xx/adma.c 		memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2251 drivers/dma/ppc4xx/adma.c 		iter->hw_next = NULL;
iter             2253 drivers/dma/ppc4xx/adma.c 			set_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2255 drivers/dma/ppc4xx/adma.c 			clear_bit(PPC440SPE_DESC_INT, &iter->flags);
iter             2257 drivers/dma/ppc4xx/adma.c 		hw_desc = iter->hw_desc;
iter             2259 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
iter             2261 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
iter             2263 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
iter             2265 drivers/dma/ppc4xx/adma.c 		ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
iter             2266 drivers/dma/ppc4xx/adma.c 		iter->unmap_len = len;
iter             2281 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
iter             2408 drivers/dma/ppc4xx/adma.c 		list_for_each_entry(iter, &sw_desc->group_list,
iter             2410 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_byte_count(iter,
iter             2412 drivers/dma/ppc4xx/adma.c 			iter->unmap_len = len;
iter             2426 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
iter             2448 drivers/dma/ppc4xx/adma.c 		list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
iter             2449 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
iter             2451 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
iter             2453 drivers/dma/ppc4xx/adma.c 			iter->unmap_len = len;
iter             2455 drivers/dma/ppc4xx/adma.c 			ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
iter             2456 drivers/dma/ppc4xx/adma.c 			iter->rxor_cursor.len = len;
iter             2457 drivers/dma/ppc4xx/adma.c 			iter->descs_per_op = descs_per_op;
iter             2460 drivers/dma/ppc4xx/adma.c 		list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
iter             2463 drivers/dma/ppc4xx/adma.c 				ppc440spe_adma_init_dma2rxor_slot(iter, src,
iter             2465 drivers/dma/ppc4xx/adma.c 			if (likely(!list_is_last(&iter->chain_node,
iter             2468 drivers/dma/ppc4xx/adma.c 				iter->hw_next =
iter             2469 drivers/dma/ppc4xx/adma.c 					list_entry(iter->chain_node.next,
iter             2472 drivers/dma/ppc4xx/adma.c 				ppc440spe_xor_set_link(iter, iter->hw_next);
iter             2475 drivers/dma/ppc4xx/adma.c 				iter->hw_next = NULL;
iter             2591 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *sw_desc, *iter;
iter             2629 drivers/dma/ppc4xx/adma.c 		list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
iter             2630 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
iter             2632 drivers/dma/ppc4xx/adma.c 			iter->unmap_len = len;
iter             2639 drivers/dma/ppc4xx/adma.c 			iter = sw_desc->group_head;
iter             2640 drivers/dma/ppc4xx/adma.c 			chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
iter             2641 drivers/dma/ppc4xx/adma.c 			memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2642 drivers/dma/ppc4xx/adma.c 			iter->hw_next = list_entry(iter->chain_node.next,
iter             2645 drivers/dma/ppc4xx/adma.c 			hw_desc = iter->hw_desc;
iter             2647 drivers/dma/ppc4xx/adma.c 			iter->src_cnt = 0;
iter             2648 drivers/dma/ppc4xx/adma.c 			iter->dst_cnt = 0;
iter             2649 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan, 0,
iter             2651 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
iter             2652 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
iter             2654 drivers/dma/ppc4xx/adma.c 			iter->unmap_len = 0;
iter             2662 drivers/dma/ppc4xx/adma.c 			iter = list_first_entry(&sw_desc->group_list,
iter             2665 drivers/dma/ppc4xx/adma.c 			chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
iter             2668 drivers/dma/ppc4xx/adma.c 				iter = list_entry(iter->chain_node.next,
iter             2673 drivers/dma/ppc4xx/adma.c 			memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
iter             2674 drivers/dma/ppc4xx/adma.c 			iter->hw_next = list_entry(iter->chain_node.next,
iter             2677 drivers/dma/ppc4xx/adma.c 			hw_desc = iter->hw_desc;
iter             2679 drivers/dma/ppc4xx/adma.c 			iter->src_cnt = 0;
iter             2680 drivers/dma/ppc4xx/adma.c 			iter->dst_cnt = 0;
iter             2681 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan, 0,
iter             2683 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
iter             2684 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
iter             2686 drivers/dma/ppc4xx/adma.c 			iter->unmap_len = 0;
iter             2696 drivers/dma/ppc4xx/adma.c 		list_for_each_entry_reverse(iter, &sw_desc->group_list,
iter             2706 drivers/dma/ppc4xx/adma.c 						&iter->flags);
iter             2709 drivers/dma/ppc4xx/adma.c 						&iter->flags);
iter             2714 drivers/dma/ppc4xx/adma.c 						&iter->flags);
iter             2717 drivers/dma/ppc4xx/adma.c 						&iter->flags);
iter             2720 drivers/dma/ppc4xx/adma.c 			iter->xor_check_result = pqres;
iter             2726 drivers/dma/ppc4xx/adma.c 			*iter->xor_check_result = 0;
iter             2727 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
iter             2735 drivers/dma/ppc4xx/adma.c 		list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
iter             2740 drivers/dma/ppc4xx/adma.c 			chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
iter             2741 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_src_addr(iter, chan, 0,
iter             2747 drivers/dma/ppc4xx/adma.c 				ppc440spe_desc_set_src_mult(iter, chan,
iter             2811 drivers/dma/ppc4xx/adma.c static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
iter             2818 drivers/dma/ppc4xx/adma.c 	ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
iter             2821 drivers/dma/ppc4xx/adma.c 	ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
iter             2824 drivers/dma/ppc4xx/adma.c 	ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
iter             2835 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter;
iter             2869 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc, index);
iter             2872 drivers/dma/ppc4xx/adma.c 				list_for_each_entry_from(iter,
iter             2874 drivers/dma/ppc4xx/adma.c 					ppc440spe_desc_set_dest_addr(iter, chan,
iter             2878 drivers/dma/ppc4xx/adma.c 				list_for_each_entry_from(iter,
iter             2880 drivers/dma/ppc4xx/adma.c 					ppc440spe_desc_set_dest_addr(iter, chan,
iter             2882 drivers/dma/ppc4xx/adma.c 					ppc440spe_desc_set_dest_addr(iter, chan,
iter             2894 drivers/dma/ppc4xx/adma.c 					iter = ppc440spe_get_group_entry(
iter             2896 drivers/dma/ppc4xx/adma.c 					ppc440spe_adma_pq_zero_op(iter, chan,
iter             2902 drivers/dma/ppc4xx/adma.c 					iter = ppc440spe_get_group_entry(
iter             2904 drivers/dma/ppc4xx/adma.c 					ppc440spe_adma_pq_zero_op(iter, chan,
iter             2926 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc, index++);
iter             2927 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan,
iter             2932 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc,
iter             2934 drivers/dma/ppc4xx/adma.c 				ppc440spe_desc_set_dest_addr(iter, chan,
iter             2942 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc,
iter             2946 drivers/dma/ppc4xx/adma.c 					list_for_each_entry_from(iter,
iter             2950 drivers/dma/ppc4xx/adma.c 							iter, chan,
iter             2956 drivers/dma/ppc4xx/adma.c 					list_for_each_entry_from(iter,
iter             2960 drivers/dma/ppc4xx/adma.c 							iter, chan,
iter             2964 drivers/dma/ppc4xx/adma.c 							iter, chan,
iter             2990 drivers/dma/ppc4xx/adma.c 		iter = ppc440spe_get_group_entry(sw_desc, 0);
iter             2992 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan,
iter             2995 drivers/dma/ppc4xx/adma.c 			iter = list_entry(iter->chain_node.next,
iter             3002 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc,
iter             3005 drivers/dma/ppc4xx/adma.c 				ppc440spe_desc_set_dest_addr(iter,
iter             3007 drivers/dma/ppc4xx/adma.c 				iter = list_entry(iter->chain_node.next,
iter             3025 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter, *end;
iter             3044 drivers/dma/ppc4xx/adma.c 	iter = ppc440spe_get_group_entry(sw_desc, idx);
iter             3048 drivers/dma/ppc4xx/adma.c 		list_for_each_entry_from(iter, &sw_desc->group_list,
iter             3050 drivers/dma/ppc4xx/adma.c 			if (unlikely(iter == end))
iter             3052 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan,
iter             3054 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan,
iter             3060 drivers/dma/ppc4xx/adma.c 		list_for_each_entry_from(iter, &sw_desc->group_list,
iter             3062 drivers/dma/ppc4xx/adma.c 			if (unlikely(iter == end))
iter             3064 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_dest_addr(iter, chan,
iter             3103 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter = NULL;
iter             3141 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc, 0);
iter             3147 drivers/dma/ppc4xx/adma.c 				iter = NULL;
iter             3153 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc,
iter             3168 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc,
iter             3172 drivers/dma/ppc4xx/adma.c 		if (likely(iter)) {
iter             3173 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
iter             3181 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc, 1);
iter             3182 drivers/dma/ppc4xx/adma.c 				ppc440spe_desc_set_src_addr(iter, chan, 0,
iter             3190 drivers/dma/ppc4xx/adma.c 		iter = sw_desc->group_head;
iter             3191 drivers/dma/ppc4xx/adma.c 		if (iter->dst_cnt == 2) {
iter             3193 drivers/dma/ppc4xx/adma.c 			ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
iter             3196 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc,
iter             3199 drivers/dma/ppc4xx/adma.c 		ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
iter             3455 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
iter             3468 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc,
iter             3478 drivers/dma/ppc4xx/adma.c 				iter = ppc440spe_get_group_entry(sw_desc,
iter             3497 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc, index + znum);
iter             3502 drivers/dma/ppc4xx/adma.c 		if (likely(iter)) {
iter             3503 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_src_mult(iter, chan,
iter             3518 drivers/dma/ppc4xx/adma.c 		iter = sw_desc->group_head;
iter             3521 drivers/dma/ppc4xx/adma.c 			ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
iter             3524 drivers/dma/ppc4xx/adma.c 			iter = ppc440spe_get_group_entry(sw_desc,
iter             3527 drivers/dma/ppc4xx/adma.c 		ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
iter             3538 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *iter, *_iter;
iter             3545 drivers/dma/ppc4xx/adma.c 	list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
iter             3548 drivers/dma/ppc4xx/adma.c 		list_del(&iter->chain_node);
iter             3550 drivers/dma/ppc4xx/adma.c 	list_for_each_entry_safe_reverse(iter, _iter,
iter             3552 drivers/dma/ppc4xx/adma.c 		list_del(&iter->slot_node);
iter             3553 drivers/dma/ppc4xx/adma.c 		kfree(iter);
iter             3704 drivers/dma/ppc4xx/adma.c 	struct ppc440spe_adma_desc_slot *sw_desc, *iter;
iter             3722 drivers/dma/ppc4xx/adma.c 		list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
iter             3723 drivers/dma/ppc4xx/adma.c 			ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
iter             3724 drivers/dma/ppc4xx/adma.c 			iter->unmap_len = PAGE_SIZE;
iter               68 drivers/dma/qcom/hidma_ll.c #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size)	\
iter               70 drivers/dma/qcom/hidma_ll.c 	iter += size;						\
iter               71 drivers/dma/qcom/hidma_ll.c 	if (iter >= ring_size)					\
iter               72 drivers/dma/qcom/hidma_ll.c 		iter -= ring_size;				\
iter              637 drivers/gpu/drm/drm_connector.c 				   struct drm_connector_list_iter *iter)
iter              639 drivers/gpu/drm/drm_connector.c 	iter->dev = dev;
iter              640 drivers/gpu/drm/drm_connector.c 	iter->conn = NULL;
iter              672 drivers/gpu/drm/drm_connector.c drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
iter              674 drivers/gpu/drm/drm_connector.c 	struct drm_connector *old_conn = iter->conn;
iter              675 drivers/gpu/drm/drm_connector.c 	struct drm_mode_config *config = &iter->dev->mode_config;
iter              684 drivers/gpu/drm/drm_connector.c 			iter->conn = NULL;
iter              689 drivers/gpu/drm/drm_connector.c 		iter->conn = list_entry(lhead, struct drm_connector, head);
iter              692 drivers/gpu/drm/drm_connector.c 	} while (!kref_get_unless_zero(&iter->conn->base.refcount));
iter              698 drivers/gpu/drm/drm_connector.c 	return iter->conn;
iter              711 drivers/gpu/drm/drm_connector.c void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
iter              713 drivers/gpu/drm/drm_connector.c 	struct drm_mode_config *config = &iter->dev->mode_config;
iter              716 drivers/gpu/drm/drm_connector.c 	iter->dev = NULL;
iter              717 drivers/gpu/drm/drm_connector.c 	if (iter->conn) {
iter              719 drivers/gpu/drm/drm_connector.c 		__drm_connector_put_safe(iter->conn);
iter              276 drivers/gpu/drm/drm_damage_helper.c drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter              280 drivers/gpu/drm/drm_damage_helper.c 	memset(iter, 0, sizeof(*iter));
iter              285 drivers/gpu/drm/drm_damage_helper.c 	iter->clips = drm_helper_get_plane_damage_clips(state);
iter              286 drivers/gpu/drm/drm_damage_helper.c 	iter->num_clips = drm_plane_get_damage_clips_count(state);
iter              289 drivers/gpu/drm/drm_damage_helper.c 	iter->plane_src.x1 = state->src.x1 >> 16;
iter              290 drivers/gpu/drm/drm_damage_helper.c 	iter->plane_src.y1 = state->src.y1 >> 16;
iter              291 drivers/gpu/drm/drm_damage_helper.c 	iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
iter              292 drivers/gpu/drm/drm_damage_helper.c 	iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
iter              294 drivers/gpu/drm/drm_damage_helper.c 	if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
iter              295 drivers/gpu/drm/drm_damage_helper.c 		iter->clips = NULL;
iter              296 drivers/gpu/drm/drm_damage_helper.c 		iter->num_clips = 0;
iter              297 drivers/gpu/drm/drm_damage_helper.c 		iter->full_update = true;
iter              319 drivers/gpu/drm/drm_damage_helper.c drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
iter              324 drivers/gpu/drm/drm_damage_helper.c 	if (iter->full_update) {
iter              325 drivers/gpu/drm/drm_damage_helper.c 		*rect = iter->plane_src;
iter              326 drivers/gpu/drm/drm_damage_helper.c 		iter->full_update = false;
iter              330 drivers/gpu/drm/drm_damage_helper.c 	while (iter->curr_clip < iter->num_clips) {
iter              331 drivers/gpu/drm/drm_damage_helper.c 		*rect = iter->clips[iter->curr_clip];
iter              332 drivers/gpu/drm/drm_damage_helper.c 		iter->curr_clip++;
iter              334 drivers/gpu/drm/drm_damage_helper.c 		if (drm_rect_intersect(rect, &iter->plane_src)) {
iter              363 drivers/gpu/drm/drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              372 drivers/gpu/drm/drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
iter              373 drivers/gpu/drm/drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip) {
iter              261 drivers/gpu/drm/drm_dp_aux_dev.c 	struct drm_dp_aux_dev *iter, *aux_dev = NULL;
iter              270 drivers/gpu/drm/drm_dp_aux_dev.c 	idr_for_each_entry(&aux_idr, iter, id) {
iter              271 drivers/gpu/drm/drm_dp_aux_dev.c 		if (iter->aux == aux) {
iter              272 drivers/gpu/drm/drm_dp_aux_dev.c 			aux_dev = iter;
iter             1374 drivers/gpu/drm/drm_syncobj.c 			struct dma_fence *iter, *last_signaled = NULL;
iter             1376 drivers/gpu/drm/drm_syncobj.c 			dma_fence_chain_for_each(iter, fence) {
iter             1377 drivers/gpu/drm/drm_syncobj.c 				if (iter->context != fence->context) {
iter             1378 drivers/gpu/drm/drm_syncobj.c 					dma_fence_put(iter);
iter             1384 drivers/gpu/drm/drm_syncobj.c 				last_signaled = dma_fence_get(iter);
iter              145 drivers/gpu/drm/drm_vma_manager.c 	struct rb_node *iter;
iter              148 drivers/gpu/drm/drm_vma_manager.c 	iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
iter              151 drivers/gpu/drm/drm_vma_manager.c 	while (likely(iter)) {
iter              152 drivers/gpu/drm/drm_vma_manager.c 		node = rb_entry(iter, struct drm_mm_node, rb);
iter              155 drivers/gpu/drm/drm_vma_manager.c 			iter = iter->rb_right;
iter              160 drivers/gpu/drm/drm_vma_manager.c 			iter = iter->rb_left;
iter              265 drivers/gpu/drm/drm_vma_manager.c 	struct rb_node **iter;
iter              278 drivers/gpu/drm/drm_vma_manager.c 	iter = &node->vm_files.rb_node;
iter              280 drivers/gpu/drm/drm_vma_manager.c 	while (likely(*iter)) {
iter              281 drivers/gpu/drm/drm_vma_manager.c 		parent = *iter;
iter              282 drivers/gpu/drm/drm_vma_manager.c 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
iter              288 drivers/gpu/drm/drm_vma_manager.c 			iter = &(*iter)->rb_right;
iter              290 drivers/gpu/drm/drm_vma_manager.c 			iter = &(*iter)->rb_left;
iter              301 drivers/gpu/drm/drm_vma_manager.c 	rb_link_node(&new->vm_rb, parent, iter);
iter              329 drivers/gpu/drm/drm_vma_manager.c 	struct rb_node *iter;
iter              333 drivers/gpu/drm/drm_vma_manager.c 	iter = node->vm_files.rb_node;
iter              334 drivers/gpu/drm/drm_vma_manager.c 	while (likely(iter)) {
iter              335 drivers/gpu/drm/drm_vma_manager.c 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
iter              343 drivers/gpu/drm/drm_vma_manager.c 			iter = iter->rb_right;
iter              345 drivers/gpu/drm/drm_vma_manager.c 			iter = iter->rb_left;
iter              370 drivers/gpu/drm/drm_vma_manager.c 	struct rb_node *iter;
iter              374 drivers/gpu/drm/drm_vma_manager.c 	iter = node->vm_files.rb_node;
iter              375 drivers/gpu/drm/drm_vma_manager.c 	while (likely(iter)) {
iter              376 drivers/gpu/drm/drm_vma_manager.c 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
iter              380 drivers/gpu/drm/drm_vma_manager.c 			iter = iter->rb_right;
iter              382 drivers/gpu/drm/drm_vma_manager.c 			iter = iter->rb_left;
iter              387 drivers/gpu/drm/drm_vma_manager.c 	return iter;
iter               67 drivers/gpu/drm/etnaviv/etnaviv_dump.c static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
iter               70 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	struct etnaviv_dump_object_header *hdr = iter->hdr;
iter               74 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
iter               75 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	hdr->file_size = cpu_to_le32(data_end - iter->data);
iter               77 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter->hdr++;
iter               78 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter->data += hdr->file_size;
iter               81 drivers/gpu/drm/etnaviv/etnaviv_dump.c static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
iter               84 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	struct etnaviv_dump_registers *reg = iter->data;
iter               92 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
iter               95 drivers/gpu/drm/etnaviv/etnaviv_dump.c static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
iter               98 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_iommu_dump(mmu, iter->data);
iter              100 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
iter              103 drivers/gpu/drm/etnaviv/etnaviv_dump.c static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
iter              106 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	memcpy(iter->data, ptr, size);
iter              108 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter->hdr->iova = cpu_to_le64(iova);
iter              110 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_header(iter, type, iter->data + size);
iter              116 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	struct core_dump_iterator iter;
iter              154 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	file_size += sizeof(*iter.hdr) * n_obj;
iter              157 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
iter              159 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	if (!iter.start) {
iter              166 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter.hdr = iter.start;
iter              167 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter.data = &iter.hdr[n_obj];
iter              169 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	memset(iter.hdr, 0, iter.data - iter.start);
iter              171 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_registers(&iter, gpu);
iter              172 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
iter              173 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
iter              178 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
iter              187 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		bomap_start = bomap = iter.data;
iter              189 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
iter              210 drivers/gpu/drm/etnaviv/etnaviv_dump.c 			iter.hdr->data[0] = bomap - bomap_start;
iter              216 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		iter.hdr->iova = cpu_to_le64(vram->iova);
iter              220 drivers/gpu/drm/etnaviv/etnaviv_dump.c 			memcpy(iter.data, vaddr, obj->base.size);
iter              222 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
iter              226 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
iter              228 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
iter              473 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	if (domain->iter >= nr_domains)
iter              476 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	dom = pm_domain(gpu, domain->iter);
iter              480 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	domain->id = domain->iter;
iter              484 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	domain->iter++;
iter              485 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	if (domain->iter == nr_domains)
iter              486 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 		domain->iter = 0xff;
iter              505 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	if (signal->iter >= dom->nr_signals)
iter              508 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	sig = &dom->signal[signal->iter];
iter              510 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	signal->id = signal->iter;
iter              513 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	signal->iter++;
iter              514 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 	if (signal->iter == dom->nr_signals)
iter              515 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c 		signal->iter = 0xffff;
iter              361 drivers/gpu/drm/i915/display/intel_display.h #define for_each_intel_connector_iter(intel_connector, iter) \
iter              362 drivers/gpu/drm/i915/display/intel_display.h 	while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
iter               99 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct radix_tree_iter iter;
iter              105 drivers/gpu/drm/i915/gem/i915_gem_context.c 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
iter              119 drivers/gpu/drm/i915/gem/i915_gem_context.c 			if (lut->handle != iter.index)
iter              130 drivers/gpu/drm/i915/gem/i915_gem_context.c 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
iter              144 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct radix_tree_iter iter;
iter              148 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
iter              149 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
iter              378 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
iter              395 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	if (n < READ_ONCE(iter->sg_idx))
iter              398 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	mutex_lock(&iter->lock);
iter              405 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	sg = iter->sg_pos;
iter              406 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	idx = iter->sg_idx;
iter              422 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		ret = radix_tree_insert(&iter->radix, idx, sg);
iter              428 drivers/gpu/drm/i915/gem/i915_gem_pages.c 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
iter              439 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	iter->sg_pos = sg;
iter              440 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	iter->sg_idx = idx;
iter              442 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	mutex_unlock(&iter->lock);
iter              462 drivers/gpu/drm/i915/gem/i915_gem_pages.c 	sg = radix_tree_lookup(&iter->radix, n);
iter              475 drivers/gpu/drm/i915/gem/i915_gem_pages.c 		sg = radix_tree_lookup(&iter->radix, base);
iter              760 drivers/gpu/drm/i915/gvt/gtt.c 	struct radix_tree_iter iter;
iter              765 drivers/gpu/drm/i915/gvt/gtt.c 	radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
iter             1169 drivers/gpu/drm/i915/i915_gem_gtt.c 		      struct sgt_dma *iter,
iter             1181 drivers/gpu/drm/i915/i915_gem_gtt.c 		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
iter             1182 drivers/gpu/drm/i915/i915_gem_gtt.c 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter             1184 drivers/gpu/drm/i915/i915_gem_gtt.c 		iter->dma += I915_GTT_PAGE_SIZE;
iter             1185 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (iter->dma >= iter->max) {
iter             1186 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter->sg = __sg_next(iter->sg);
iter             1187 drivers/gpu/drm/i915/i915_gem_gtt.c 			if (!iter->sg) {
iter             1192 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter->dma = sg_dma_address(iter->sg);
iter             1193 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter->max = iter->dma + iter->sg->length;
iter             1215 drivers/gpu/drm/i915/i915_gem_gtt.c 				   struct sgt_dma *iter,
iter             1221 drivers/gpu/drm/i915/i915_gem_gtt.c 	dma_addr_t rem = iter->sg->length;
iter             1237 drivers/gpu/drm/i915/i915_gem_gtt.c 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
iter             1254 drivers/gpu/drm/i915/i915_gem_gtt.c 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
iter             1263 drivers/gpu/drm/i915/i915_gem_gtt.c 			GEM_BUG_ON(iter->sg->length < page_size);
iter             1264 drivers/gpu/drm/i915/i915_gem_gtt.c 			vaddr[index++] = encode | iter->dma;
iter             1267 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter->dma += page_size;
iter             1269 drivers/gpu/drm/i915/i915_gem_gtt.c 			if (iter->dma >= iter->max) {
iter             1270 drivers/gpu/drm/i915/i915_gem_gtt.c 				iter->sg = __sg_next(iter->sg);
iter             1271 drivers/gpu/drm/i915/i915_gem_gtt.c 				if (!iter->sg)
iter             1274 drivers/gpu/drm/i915/i915_gem_gtt.c 				rem = iter->sg->length;
iter             1275 drivers/gpu/drm/i915/i915_gem_gtt.c 				iter->dma = sg_dma_address(iter->sg);
iter             1276 drivers/gpu/drm/i915/i915_gem_gtt.c 				iter->max = iter->dma + rem;
iter             1279 drivers/gpu/drm/i915/i915_gem_gtt.c 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
iter             1284 drivers/gpu/drm/i915/i915_gem_gtt.c 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
iter             1300 drivers/gpu/drm/i915/i915_gem_gtt.c 		      !iter->sg && IS_ALIGNED(vma->node.start +
iter             1331 drivers/gpu/drm/i915/i915_gem_gtt.c 	} while (iter->sg);
iter             1340 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct sgt_dma iter = sgt_dma(vma);
iter             1343 drivers/gpu/drm/i915/i915_gem_gtt.c 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
iter             1351 drivers/gpu/drm/i915/i915_gem_gtt.c 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
iter             1654 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct sgt_dma iter = sgt_dma(vma);
iter             1661 drivers/gpu/drm/i915/i915_gem_gtt.c 		GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
iter             1662 drivers/gpu/drm/i915/i915_gem_gtt.c 		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
iter             1664 drivers/gpu/drm/i915/i915_gem_gtt.c 		iter.dma += I915_GTT_PAGE_SIZE;
iter             1665 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (iter.dma == iter.max) {
iter             1666 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter.sg = __sg_next(iter.sg);
iter             1667 drivers/gpu/drm/i915/i915_gem_gtt.c 			if (!iter.sg)
iter             1670 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter.dma = sg_dma_address(iter.sg);
iter             1671 drivers/gpu/drm/i915/i915_gem_gtt.c 			iter.max = iter.dma + iter.sg->length;
iter             2246 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct sgt_iter iter;
iter             2248 drivers/gpu/drm/i915/i915_gem_gtt.c 	for_each_sgt_dma(addr, iter, vma->pages)
iter             3513 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct scatterlist *sg, *iter;
iter             3526 drivers/gpu/drm/i915/i915_gem_gtt.c 	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
iter             3527 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(!iter);
iter             3534 drivers/gpu/drm/i915/i915_gem_gtt.c 		len = min(iter->length - (offset << PAGE_SHIFT),
iter             3538 drivers/gpu/drm/i915/i915_gem_gtt.c 			sg_dma_address(iter) + (offset << PAGE_SHIFT);
iter             3551 drivers/gpu/drm/i915/i915_gem_gtt.c 		iter = __sg_next(iter);
iter              455 drivers/gpu/drm/i915/i915_gem_gtt.h #define gen6_for_each_pde(pt, pd, start, length, iter)			\
iter              456 drivers/gpu/drm/i915/i915_gem_gtt.h 	for (iter = gen6_pde_index(start);				\
iter              457 drivers/gpu/drm/i915/i915_gem_gtt.h 	     length > 0 && iter < I915_PDES &&				\
iter              458 drivers/gpu/drm/i915/i915_gem_gtt.h 		     (pt = i915_pt_entry(pd, iter), true);		\
iter              461 drivers/gpu/drm/i915/i915_gem_gtt.h 		    start += temp, length -= temp; }), ++iter)
iter              463 drivers/gpu/drm/i915/i915_gem_gtt.h #define gen6_for_all_pdes(pt, pd, iter)					\
iter              464 drivers/gpu/drm/i915/i915_gem_gtt.h 	for (iter = 0;							\
iter              465 drivers/gpu/drm/i915/i915_gem_gtt.h 	     iter < I915_PDES &&					\
iter              466 drivers/gpu/drm/i915/i915_gem_gtt.h 		     (pt = i915_pt_entry(pd, iter), true);		\
iter              467 drivers/gpu/drm/i915/i915_gem_gtt.h 	     ++iter)
iter               71 drivers/gpu/drm/i915/i915_gpu_error.c 		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
iter               72 drivers/gpu/drm/i915/i915_gpu_error.c 		e->iter += e->bytes;
iter              807 drivers/gpu/drm/i915/i915_gpu_error.c 		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
iter              965 drivers/gpu/drm/i915/i915_gpu_error.c 	struct sgt_iter iter;
iter              992 drivers/gpu/drm/i915/i915_gpu_error.c 	for_each_sgt_dma(dma, iter, vma->pages) {
iter              191 drivers/gpu/drm/i915/i915_gpu_error.h 	loff_t iter;
iter              249 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	struct sg_page_iter iter;
iter              265 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
iter              266 drivers/gpu/drm/mediatek/mtk_drm_gem.c 		mtk_gem->pages[i++] = sg_page_iter_page(&iter);
iter               44 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		struct dpu_rm_hw_iter *iter,
iter               48 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	memset(iter, 0, sizeof(*iter));
iter               49 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	iter->enc_id = enc_id;
iter               50 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	iter->type = type;
iter              314 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	struct dpu_rm_hw_iter iter;
iter              339 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
iter              340 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
iter              341 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		if (iter.blk->id == lm_cfg->pingpong) {
iter              342 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 			*pp = iter.blk;
iter              435 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	struct dpu_rm_hw_iter iter;
iter              446 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
iter              447 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
iter              448 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
iter              452 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		if (RESERVED_BY_OTHER(iter.blk, enc_id))
iter              457 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
iter              462 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		ctls[i] = iter.blk;
iter              463 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		DPU_DEBUG("ctl %d match\n", iter.blk->id);
iter              486 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	struct dpu_rm_hw_iter iter;
iter              490 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	dpu_rm_init_hw_iter(&iter, 0, type);
iter              491 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	while (_dpu_rm_get_hw_locked(rm, &iter)) {
iter              492 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		if (iter.blk->id != id)
iter              495 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
iter              500 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		iter.blk->enc_id = enc_id;
iter              501 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 		trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
iter              506 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 	if (!iter.hw) {
iter              103 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h 		struct dpu_rm_hw_iter *iter,
iter              119 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
iter              266 drivers/gpu/drm/msm/msm_gpu.c 	struct drm_print_iterator iter;
iter              274 drivers/gpu/drm/msm/msm_gpu.c 	iter.data = buffer;
iter              275 drivers/gpu/drm/msm/msm_gpu.c 	iter.offset = 0;
iter              276 drivers/gpu/drm/msm/msm_gpu.c 	iter.start = offset;
iter              277 drivers/gpu/drm/msm/msm_gpu.c 	iter.remain = count;
iter              279 drivers/gpu/drm/msm/msm_gpu.c 	p = drm_coredump_printer(&iter);
iter              295 drivers/gpu/drm/msm/msm_gpu.c 	return count - iter.remain;
iter               13 drivers/gpu/drm/nouveau/include/nvif/if0002.h 	__u8  iter;
iter               22 drivers/gpu/drm/nouveau/include/nvif/if0002.h 	__u16 iter;
iter               33 drivers/gpu/drm/nouveau/include/nvif/if0002.h 	__u8  iter;
iter              152 drivers/gpu/drm/nouveau/nouveau_connector.h #define nouveau_for_each_non_mst_connector_iter(connector, iter) \
iter              153 drivers/gpu/drm/nouveau/nouveau_connector.h 	drm_for_each_connector_iter(connector, iter) \
iter             1651 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	const struct gf100_gr_pack *iter;
iter             1663 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	pack_for_each_init(init, iter, pack) {
iter              449 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 			   args->v0.version, args->v0.iter);
iter              450 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 		di = (args->v0.iter & 0xff) - 1;
iter              473 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 		args->v0.iter = ++di;
iter              477 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 	args->v0.iter = 0xff;
iter              501 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 			   args->v0.version, args->v0.domain, args->v0.iter);
iter              502 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 		si = (args->v0.iter & 0xffff) - 1;
iter              526 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 			args->v0.iter = ++si;
iter              531 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 	args->v0.iter = 0xffff;
iter              555 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 			   args->v0.iter);
iter              556 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 		si = (args->v0.iter & 0xff) - 1;
iter              579 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 		args->v0.iter = ++si;
iter              583 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c 	args->v0.iter = 0xff;
iter             1255 drivers/gpu/drm/omapdrm/omap_gem.c 		struct sg_page_iter iter;
iter             1270 drivers/gpu/drm/omapdrm/omap_gem.c 		for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
iter             1271 drivers/gpu/drm/omapdrm/omap_gem.c 			pages[i++] = sg_page_iter_page(&iter);
iter               59 drivers/gpu/drm/panfrost/panfrost_gem.c 	struct panfrost_gem_mapping *iter, *mapping = NULL;
iter               62 drivers/gpu/drm/panfrost/panfrost_gem.c 	list_for_each_entry(iter, &bo->mappings.list, node) {
iter               63 drivers/gpu/drm/panfrost/panfrost_gem.c 		if (iter->mmu == &priv->mmu) {
iter               64 drivers/gpu/drm/panfrost/panfrost_gem.c 			kref_get(&iter->refcount);
iter               65 drivers/gpu/drm/panfrost/panfrost_gem.c 			mapping = iter;
iter              177 drivers/gpu/drm/panfrost/panfrost_gem.c 	struct panfrost_gem_mapping *mapping = NULL, *iter;
iter              180 drivers/gpu/drm/panfrost/panfrost_gem.c 	list_for_each_entry(iter, &bo->mappings.list, node) {
iter              181 drivers/gpu/drm/panfrost/panfrost_gem.c 		if (iter->mmu == &priv->mmu) {
iter              182 drivers/gpu/drm/panfrost/panfrost_gem.c 			mapping = iter;
iter              183 drivers/gpu/drm/panfrost/panfrost_gem.c 			list_del(&iter->node);
iter               75 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter               94 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter               95 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              106 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              127 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              128 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              139 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              159 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              160 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              171 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              192 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              193 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              204 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              222 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              223 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              233 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              250 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              251 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              261 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              273 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              274 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              284 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              308 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              309 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              320 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              343 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              344 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              355 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              379 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              380 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              391 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              415 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              416 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              426 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              452 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              453 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              464 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              491 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              492 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              503 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              530 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              531 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              541 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              566 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              567 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              578 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              605 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              606 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              617 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              642 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              643 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip) {
iter              658 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              685 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              686 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip) {
iter              701 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              726 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              727 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              738 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              765 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              766 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter              777 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	struct drm_atomic_helper_damage_iter iter;
iter              804 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
iter              805 drivers/gpu/drm/selftests/test-drm_damage_helper.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter               44 drivers/gpu/drm/sun4i/sun4i_tcon.c 	struct drm_connector_list_iter iter;
iter               46 drivers/gpu/drm/sun4i/sun4i_tcon.c 	drm_connector_list_iter_begin(encoder->dev, &iter);
iter               47 drivers/gpu/drm/sun4i/sun4i_tcon.c 	drm_for_each_connector_iter(connector, &iter)
iter               49 drivers/gpu/drm/sun4i/sun4i_tcon.c 			drm_connector_list_iter_end(&iter);
iter               52 drivers/gpu/drm/sun4i/sun4i_tcon.c 	drm_connector_list_iter_end(&iter);
iter              321 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	struct sg_dma_page_iter iter;
iter               39 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 			 struct vmw_piter *iter,
iter               86 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
iter               88 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
iter               92 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 			vmw_piter_next(iter);
iter             2798 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 	struct drm_atomic_helper_damage_iter iter;
iter             2814 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
iter             2815 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 	drm_atomic_for_each_plane_damage(&iter, &clip)
iter             2869 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
iter             2870 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 	drm_atomic_for_each_plane_damage(&iter, &clip) {
iter              121 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	struct vmw_piter iter;
iter              127 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
iter              128 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	WARN_ON(!vmw_piter_next(&iter));
iter              138 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 		mob->pt_root_page = vmw_piter_dma_addr(&iter);
iter              141 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 		mob->pt_root_page = vmw_piter_dma_addr(&iter);
iter              147 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
iter             1441 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c 	struct drm_atomic_helper_damage_iter iter;
iter             1451 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
iter             1452 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c 	drm_atomic_for_each_plane_damage(&iter, &clip) {
iter              271 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	return __sg_page_iter_dma_next(&viter->iter) && ret;
iter              310 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	return sg_page_iter_dma_address(&viter->iter);
iter              345 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
iter              417 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	struct vmw_piter iter;
iter              472 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
iter              473 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 		dma_addr_t cur = vmw_piter_dma_addr(&iter);
iter              624 drivers/hid/hid-wiimote-core.c 	const __u8 *mods, *iter;
iter              630 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
iter              631 drivers/hid/hid-wiimote-core.c 		if (wiimod_table[*iter]->flags & WIIMOD_FLAG_INPUT) {
iter              651 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
iter              652 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter              673 drivers/hid/hid-wiimote-core.c 	for ( ; iter-- != mods; ) {
iter              674 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter              687 drivers/hid/hid-wiimote-core.c 	const __u8 *mods, *iter;
iter              698 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter)
iter              706 drivers/hid/hid-wiimote-core.c 	for ( ; iter-- != mods; ) {
iter              707 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter             1247 drivers/hid/hid-wiimote-core.c 	const __u8 *iter, *mods;
iter             1257 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
iter             1258 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter             1268 drivers/hid/hid-wiimote-core.c 	const __u8 *iter, *mods;
iter             1278 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
iter             1279 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter             1308 drivers/hid/hid-wiimote-core.c 	const __u8 *iter, *mods;
iter             1370 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
iter             1371 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter             1390 drivers/hid/hid-wiimote-core.c 	const __u8 *iter, *mods;
iter             1400 drivers/hid/hid-wiimote-core.c 	for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
iter             1401 drivers/hid/hid-wiimote-core.c 		ops = wiimod_table[*iter];
iter              902 drivers/hv/channel_mgmt.c 	struct vmbus_channel *channel = NULL, *iter;
iter              911 drivers/hv/channel_mgmt.c 	list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
iter              912 drivers/hv/channel_mgmt.c 		inst1 = &iter->offermsg.offer.if_instance;
iter              916 drivers/hv/channel_mgmt.c 			channel = iter;
iter             2056 drivers/hv/vmbus_drv.c 	struct resource *iter, *shadow;
iter             2084 drivers/hv/vmbus_drv.c 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
iter             2085 drivers/hv/vmbus_drv.c 		if ((iter->start >= max) || (iter->end <= min))
iter             2088 drivers/hv/vmbus_drv.c 		range_min = iter->start;
iter             2089 drivers/hv/vmbus_drv.c 		range_max = iter->end;
iter             2092 drivers/hv/vmbus_drv.c 			shadow = __request_region(iter, start, size, NULL,
iter             2104 drivers/hv/vmbus_drv.c 			__release_region(iter, start, size);
iter             2124 drivers/hv/vmbus_drv.c 	struct resource *iter;
iter             2127 drivers/hv/vmbus_drv.c 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
iter             2128 drivers/hv/vmbus_drv.c 		if ((iter->start >= start + size) || (iter->end <= start))
iter             2131 drivers/hv/vmbus_drv.c 		__release_region(iter, start, size);
iter              343 drivers/hwspinlock/hwspinlock_core.c 	struct radix_tree_iter iter;
iter              361 drivers/hwspinlock/hwspinlock_core.c 	radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
iter              366 drivers/hwspinlock/hwspinlock_core.c 			slot = radix_tree_iter_retry(&iter);
iter              431 drivers/hwtracing/intel_th/msu.c static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
iter              433 drivers/hwtracing/intel_th/msu.c 	return sg_virt(iter->block);
iter              438 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter;
iter              440 drivers/hwtracing/intel_th/msu.c 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
iter              441 drivers/hwtracing/intel_th/msu.c 	if (!iter)
iter              453 drivers/hwtracing/intel_th/msu.c 		kfree(iter);
iter              454 drivers/hwtracing/intel_th/msu.c 		iter = ERR_PTR(-EBUSY);
iter              458 drivers/hwtracing/intel_th/msu.c 	iter->msc = msc;
iter              460 drivers/hwtracing/intel_th/msu.c 	list_add_tail(&iter->entry, &msc->iter_list);
iter              464 drivers/hwtracing/intel_th/msu.c 	return iter;
iter              467 drivers/hwtracing/intel_th/msu.c static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
iter              470 drivers/hwtracing/intel_th/msu.c 	list_del(&iter->entry);
iter              473 drivers/hwtracing/intel_th/msu.c 	kfree(iter);
iter              476 drivers/hwtracing/intel_th/msu.c static void msc_iter_block_start(struct msc_iter *iter)
iter              478 drivers/hwtracing/intel_th/msu.c 	if (iter->start_block)
iter              481 drivers/hwtracing/intel_th/msu.c 	iter->start_block = msc_win_oldest_sg(iter->win);
iter              482 drivers/hwtracing/intel_th/msu.c 	iter->block = iter->start_block;
iter              483 drivers/hwtracing/intel_th/msu.c 	iter->wrap_count = 0;
iter              489 drivers/hwtracing/intel_th/msu.c 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
iter              490 drivers/hwtracing/intel_th/msu.c 		iter->wrap_count = 2;
iter              494 drivers/hwtracing/intel_th/msu.c static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
iter              497 drivers/hwtracing/intel_th/msu.c 	if (iter->start_win)
iter              500 drivers/hwtracing/intel_th/msu.c 	iter->start_win = msc_oldest_window(msc);
iter              501 drivers/hwtracing/intel_th/msu.c 	if (!iter->start_win)
iter              504 drivers/hwtracing/intel_th/msu.c 	iter->win = iter->start_win;
iter              505 drivers/hwtracing/intel_th/msu.c 	iter->start_block = NULL;
iter              507 drivers/hwtracing/intel_th/msu.c 	msc_iter_block_start(iter);
iter              512 drivers/hwtracing/intel_th/msu.c static int msc_iter_win_advance(struct msc_iter *iter)
iter              514 drivers/hwtracing/intel_th/msu.c 	iter->win = msc_next_window(iter->win);
iter              515 drivers/hwtracing/intel_th/msu.c 	iter->start_block = NULL;
iter              517 drivers/hwtracing/intel_th/msu.c 	if (iter->win == iter->start_win) {
iter              518 drivers/hwtracing/intel_th/msu.c 		iter->eof++;
iter              522 drivers/hwtracing/intel_th/msu.c 	msc_iter_block_start(iter);
iter              527 drivers/hwtracing/intel_th/msu.c static int msc_iter_block_advance(struct msc_iter *iter)
iter              529 drivers/hwtracing/intel_th/msu.c 	iter->block_off = 0;
iter              532 drivers/hwtracing/intel_th/msu.c 	if (iter->wrap_count && iter->block == iter->start_block) {
iter              533 drivers/hwtracing/intel_th/msu.c 		iter->wrap_count--;
iter              534 drivers/hwtracing/intel_th/msu.c 		if (!iter->wrap_count)
iter              536 drivers/hwtracing/intel_th/msu.c 			return msc_iter_win_advance(iter);
iter              540 drivers/hwtracing/intel_th/msu.c 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
iter              542 drivers/hwtracing/intel_th/msu.c 		return msc_iter_win_advance(iter);
iter              545 drivers/hwtracing/intel_th/msu.c 	if (sg_is_last(iter->block))
iter              546 drivers/hwtracing/intel_th/msu.c 		iter->block = msc_win_base_sg(iter->win);
iter              548 drivers/hwtracing/intel_th/msu.c 		iter->block = sg_next(iter->block);
iter              551 drivers/hwtracing/intel_th/msu.c 	if (!iter->wrap_count && iter->block == iter->start_block)
iter              552 drivers/hwtracing/intel_th/msu.c 		return msc_iter_win_advance(iter);
iter              574 drivers/hwtracing/intel_th/msu.c msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
iter              577 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter              581 drivers/hwtracing/intel_th/msu.c 	if (iter->eof)
iter              585 drivers/hwtracing/intel_th/msu.c 	if (msc_iter_win_start(iter, msc))
iter              589 drivers/hwtracing/intel_th/msu.c 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
iter              590 drivers/hwtracing/intel_th/msu.c 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
iter              608 drivers/hwtracing/intel_th/msu.c 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
iter              616 drivers/hwtracing/intel_th/msu.c 		tocopy -= iter->block_off;
iter              617 drivers/hwtracing/intel_th/msu.c 		src += iter->block_off;
iter              631 drivers/hwtracing/intel_th/msu.c 		iter->block_off += copied;
iter              632 drivers/hwtracing/intel_th/msu.c 		iter->offset += copied;
iter              638 drivers/hwtracing/intel_th/msu.c 		if (msc_iter_block_advance(iter))
iter             1224 drivers/hwtracing/intel_th/msu.c 	struct msc_window *win, *iter;
iter             1226 drivers/hwtracing/intel_th/msu.c 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
iter             1434 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter;
iter             1439 drivers/hwtracing/intel_th/msu.c 	iter = msc_iter_install(msc);
iter             1440 drivers/hwtracing/intel_th/msu.c 	if (IS_ERR(iter))
iter             1441 drivers/hwtracing/intel_th/msu.c 		return PTR_ERR(iter);
iter             1443 drivers/hwtracing/intel_th/msu.c 	file->private_data = iter;
iter             1450 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = file->private_data;
iter             1451 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter             1453 drivers/hwtracing/intel_th/msu.c 	msc_iter_remove(iter, msc);
iter             1497 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = file->private_data;
iter             1498 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter             1530 drivers/hwtracing/intel_th/msu.c 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
iter             1532 drivers/hwtracing/intel_th/msu.c 			*ppos = iter->offset;
iter             1549 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vma->vm_file->private_data;
iter             1550 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter             1557 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vma->vm_file->private_data;
iter             1558 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter             1582 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
iter             1583 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter             1605 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vma->vm_file->private_data;
iter             1606 drivers/hwtracing/intel_th/msu.c 	struct msc *msc = iter->msc;
iter              435 drivers/hwtracing/stm/core.c 	struct stm_pdrv_entry *pe, *iter;
iter              439 drivers/hwtracing/stm/core.c 	list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
iter              941 drivers/hwtracing/stm/core.c 	struct stm_source_device *src, *iter;
iter              948 drivers/hwtracing/stm/core.c 	list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
iter               32 drivers/iio/industrialio-sw-device.c 	struct iio_sw_device_type *d = NULL, *iter;
iter               34 drivers/iio/industrialio-sw-device.c 	list_for_each_entry(iter, &iio_device_types_list, list)
iter               35 drivers/iio/industrialio-sw-device.c 		if (!strcmp(iter->name, name)) {
iter               36 drivers/iio/industrialio-sw-device.c 			d = iter;
iter               45 drivers/iio/industrialio-sw-device.c 	struct iio_sw_device_type *iter;
iter               49 drivers/iio/industrialio-sw-device.c 	iter = __iio_find_sw_device_type(d->name, strlen(d->name));
iter               50 drivers/iio/industrialio-sw-device.c 	if (iter)
iter               70 drivers/iio/industrialio-sw-device.c 	struct iio_sw_device_type *iter;
iter               73 drivers/iio/industrialio-sw-device.c 	iter = __iio_find_sw_device_type(dt->name, strlen(dt->name));
iter               74 drivers/iio/industrialio-sw-device.c 	if (iter)
iter               32 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger_type *t = NULL, *iter;
iter               34 drivers/iio/industrialio-sw-trigger.c 	list_for_each_entry(iter, &iio_trigger_types_list, list)
iter               35 drivers/iio/industrialio-sw-trigger.c 		if (!strcmp(iter->name, name)) {
iter               36 drivers/iio/industrialio-sw-trigger.c 			t = iter;
iter               45 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger_type *iter;
iter               49 drivers/iio/industrialio-sw-trigger.c 	iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
iter               50 drivers/iio/industrialio-sw-trigger.c 	if (iter)
iter               70 drivers/iio/industrialio-sw-trigger.c 	struct iio_sw_trigger_type *iter;
iter               73 drivers/iio/industrialio-sw-trigger.c 	iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
iter               74 drivers/iio/industrialio-sw-trigger.c 	if (iter)
iter              137 drivers/iio/industrialio-trigger.c 	struct iio_trigger *iter;
iter              139 drivers/iio/industrialio-trigger.c 	list_for_each_entry(iter, &iio_trigger_list, list)
iter              140 drivers/iio/industrialio-trigger.c 		if (!strcmp(iter->name, name))
iter              141 drivers/iio/industrialio-trigger.c 			return iter;
iter              148 drivers/iio/industrialio-trigger.c 	struct iio_trigger *trig = NULL, *iter;
iter              151 drivers/iio/industrialio-trigger.c 	list_for_each_entry(iter, &iio_trigger_list, list)
iter              152 drivers/iio/industrialio-trigger.c 		if (sysfs_streq(iter->name, name)) {
iter              153 drivers/iio/industrialio-trigger.c 			trig = iter;
iter              131 drivers/infiniband/core/uverbs_cmd.c 				struct uverbs_req_iter *iter,
iter              141 drivers/infiniband/core/uverbs_cmd.c 	iter->cur = attrs->ucore.inbuf + req_len;
iter              142 drivers/infiniband/core/uverbs_cmd.c 	iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
iter              146 drivers/infiniband/core/uverbs_cmd.c static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
iter              149 drivers/infiniband/core/uverbs_cmd.c 	if (iter->cur + len > iter->end)
iter              152 drivers/infiniband/core/uverbs_cmd.c 	if (copy_from_user(val, iter->cur, len))
iter              155 drivers/infiniband/core/uverbs_cmd.c 	iter->cur += len;
iter              159 drivers/infiniband/core/uverbs_cmd.c static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
iter              162 drivers/infiniband/core/uverbs_cmd.c 	const void __user *res = iter->cur;
iter              164 drivers/infiniband/core/uverbs_cmd.c 	if (iter->cur + len > iter->end)
iter              166 drivers/infiniband/core/uverbs_cmd.c 	iter->cur += len;
iter              170 drivers/infiniband/core/uverbs_cmd.c static int uverbs_request_finish(struct uverbs_req_iter *iter)
iter              172 drivers/infiniband/core/uverbs_cmd.c 	if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
iter             2013 drivers/infiniband/core/uverbs_cmd.c 	struct uverbs_req_iter iter;
iter             2015 drivers/infiniband/core/uverbs_cmd.c 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
iter             2018 drivers/infiniband/core/uverbs_cmd.c 	wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
iter             2022 drivers/infiniband/core/uverbs_cmd.c 		&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
iter             2025 drivers/infiniband/core/uverbs_cmd.c 	ret = uverbs_request_finish(&iter);
iter             2193 drivers/infiniband/core/uverbs_cmd.c ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
iter             2207 drivers/infiniband/core/uverbs_cmd.c 	wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
iter             2211 drivers/infiniband/core/uverbs_cmd.c 		iter, sge_count * sizeof(struct ib_uverbs_sge));
iter             2214 drivers/infiniband/core/uverbs_cmd.c 	ret = uverbs_request_finish(iter);
iter             2298 drivers/infiniband/core/uverbs_cmd.c 	struct uverbs_req_iter iter;
iter             2300 drivers/infiniband/core/uverbs_cmd.c 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
iter             2304 drivers/infiniband/core/uverbs_cmd.c 	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
iter             2348 drivers/infiniband/core/uverbs_cmd.c 	struct uverbs_req_iter iter;
iter             2350 drivers/infiniband/core/uverbs_cmd.c 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
iter             2354 drivers/infiniband/core/uverbs_cmd.c 	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
iter             3055 drivers/infiniband/core/uverbs_cmd.c 	struct uverbs_req_iter iter;
iter             3058 drivers/infiniband/core/uverbs_cmd.c 	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
iter             3074 drivers/infiniband/core/uverbs_cmd.c 	err = uverbs_request_next(&iter, wqs_handles,
iter             3079 drivers/infiniband/core/uverbs_cmd.c 	err = uverbs_request_finish(&iter);
iter             3183 drivers/infiniband/core/uverbs_cmd.c 	struct uverbs_req_iter iter;
iter             3189 drivers/infiniband/core/uverbs_cmd.c 	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
iter             3225 drivers/infiniband/core/uverbs_cmd.c 		err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
iter             3233 drivers/infiniband/core/uverbs_cmd.c 	err = uverbs_request_finish(&iter);
iter              359 drivers/infiniband/core/uverbs_uapi.c 	struct radix_tree_iter iter;
iter              366 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter,
iter              370 drivers/infiniband/core/uverbs_uapi.c 		u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK;
iter              374 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_attr_to_ioctl_method(iter.index) !=
iter              423 drivers/infiniband/core/uverbs_uapi.c 	struct radix_tree_iter iter;
iter              428 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
iter              432 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_ioctl_method(iter.index)) {
iter              434 drivers/infiniband/core/uverbs_uapi.c 							iter.index);
iter              439 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_write_method(iter.index))
iter              441 drivers/infiniband/core/uverbs_uapi.c 					iter.index & UVERBS_API_ATTR_KEY_MASK);
iter              442 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_write_ex_method(iter.index))
iter              445 drivers/infiniband/core/uverbs_uapi.c 				    iter.index & UVERBS_API_ATTR_KEY_MASK);
iter              458 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
iter              459 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_write_method(iter.index))
iter              460 drivers/infiniband/core/uverbs_uapi.c 			uapi->write_methods[iter.index &
iter              463 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_write_ex_method(iter.index))
iter              464 drivers/infiniband/core/uverbs_uapi.c 			uapi->write_ex_methods[iter.index &
iter              474 drivers/infiniband/core/uverbs_uapi.c 	struct radix_tree_iter iter;
iter              477 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) {
iter              478 drivers/infiniband/core/uverbs_uapi.c 		if (iter.index > last)
iter              481 drivers/infiniband/core/uverbs_uapi.c 		radix_tree_iter_delete(&uapi->radix, &iter, slot);
iter              528 drivers/infiniband/core/uverbs_uapi.c 	struct radix_tree_iter iter;
iter              534 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter, starting_key) {
iter              535 drivers/infiniband/core/uverbs_uapi.c 		uapi_key_okay(iter.index);
iter              537 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_object(iter.index)) {
iter              544 drivers/infiniband/core/uverbs_uapi.c 				starting_key = iter.index;
iter              545 drivers/infiniband/core/uverbs_uapi.c 				uapi_remove_object(uapi, iter.index);
iter              551 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_ioctl_method(iter.index)) {
iter              556 drivers/infiniband/core/uverbs_uapi.c 				starting_key = iter.index;
iter              557 drivers/infiniband/core/uverbs_uapi.c 				uapi_remove_method(uapi, iter.index);
iter              563 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_write_method(iter.index) ||
iter              564 drivers/infiniband/core/uverbs_uapi.c 		    uapi_key_is_write_ex_method(iter.index)) {
iter              570 drivers/infiniband/core/uverbs_uapi.c 				radix_tree_iter_delete(&uapi->radix, &iter, slot);
iter              575 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_attr(iter.index)) {
iter              600 drivers/infiniband/core/uverbs_uapi.c 			starting_key = iter.index;
iter              603 drivers/infiniband/core/uverbs_uapi.c 				iter.index & (UVERBS_API_OBJ_KEY_MASK |
iter              683 drivers/infiniband/core/uverbs_uapi.c 	struct radix_tree_iter iter;
iter              688 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
iter              689 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_ioctl_method(iter.index)) {
iter              708 drivers/infiniband/core/uverbs_uapi.c 	struct radix_tree_iter iter;
iter              711 drivers/infiniband/core/uverbs_uapi.c 	radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
iter              712 drivers/infiniband/core/uverbs_uapi.c 		if (uapi_key_is_object(iter.index)) {
iter              722 drivers/infiniband/core/uverbs_uapi.c 		} else if (uapi_key_is_attr(iter.index)) {
iter              263 drivers/infiniband/hw/hfi1/debugfs.c 	struct rvt_qp_iter *iter;
iter              266 drivers/infiniband/hw/hfi1/debugfs.c 	iter = rvt_qp_iter_init(s->private, 0, NULL);
iter              271 drivers/infiniband/hw/hfi1/debugfs.c 	if (!iter)
iter              275 drivers/infiniband/hw/hfi1/debugfs.c 		if (rvt_qp_iter_next(iter)) {
iter              276 drivers/infiniband/hw/hfi1/debugfs.c 			kfree(iter);
iter              281 drivers/infiniband/hw/hfi1/debugfs.c 	return iter;
iter              288 drivers/infiniband/hw/hfi1/debugfs.c 	struct rvt_qp_iter *iter = iter_ptr;
iter              292 drivers/infiniband/hw/hfi1/debugfs.c 	if (rvt_qp_iter_next(iter)) {
iter              293 drivers/infiniband/hw/hfi1/debugfs.c 		kfree(iter);
iter              297 drivers/infiniband/hw/hfi1/debugfs.c 	return iter;
iter              308 drivers/infiniband/hw/hfi1/debugfs.c 	struct rvt_qp_iter *iter = iter_ptr;
iter              310 drivers/infiniband/hw/hfi1/debugfs.c 	if (!iter)
iter              313 drivers/infiniband/hw/hfi1/debugfs.c 	qp_iter_print(s, iter);
iter              650 drivers/infiniband/hw/hfi1/qp.c void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
iter              653 drivers/infiniband/hw/hfi1/qp.c 	struct rvt_qp *qp = iter->qp;
iter              668 drivers/infiniband/hw/hfi1/qp.c 		   iter->n,
iter              139 drivers/infiniband/hw/hfi1/qp.h void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
iter              369 drivers/infiniband/hw/hns/hns_roce_hem.c 	struct hns_roce_hem_iter iter;
iter              399 drivers/infiniband/hw/hns/hns_roce_hem.c 	for (hns_roce_hem_first(table->hem[i], &iter);
iter              400 drivers/infiniband/hw/hns/hns_roce_hem.c 	     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
iter              401 drivers/infiniband/hw/hns/hns_roce_hem.c 		bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
iter              443 drivers/infiniband/hw/hns/hns_roce_hem.c 	struct hns_roce_hem_iter iter;
iter              565 drivers/infiniband/hw/hns/hns_roce_hem.c 	hns_roce_hem_first(table->hem[hem_idx], &iter);
iter              566 drivers/infiniband/hw/hns/hns_roce_hem.c 	bt_ba = hns_roce_hem_addr(&iter);
iter              151 drivers/infiniband/hw/hns/hns_roce_hem.h 				      struct hns_roce_hem_iter *iter)
iter              153 drivers/infiniband/hw/hns/hns_roce_hem.h 	iter->hem = hem;
iter              154 drivers/infiniband/hw/hns/hns_roce_hem.h 	iter->chunk = list_empty(&hem->chunk_list) ? NULL :
iter              157 drivers/infiniband/hw/hns/hns_roce_hem.h 	iter->page_idx = 0;
iter              160 drivers/infiniband/hw/hns/hns_roce_hem.h static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
iter              162 drivers/infiniband/hw/hns/hns_roce_hem.h 	return !iter->chunk;
iter              165 drivers/infiniband/hw/hns/hns_roce_hem.h static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
iter              167 drivers/infiniband/hw/hns/hns_roce_hem.h 	if (++iter->page_idx >= iter->chunk->nsg) {
iter              168 drivers/infiniband/hw/hns/hns_roce_hem.h 		if (iter->chunk->list.next == &iter->hem->chunk_list) {
iter              169 drivers/infiniband/hw/hns/hns_roce_hem.h 			iter->chunk = NULL;
iter              173 drivers/infiniband/hw/hns/hns_roce_hem.h 		iter->chunk = list_entry(iter->chunk->list.next,
iter              175 drivers/infiniband/hw/hns/hns_roce_hem.h 		iter->page_idx = 0;
iter              179 drivers/infiniband/hw/hns/hns_roce_hem.h static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
iter              181 drivers/infiniband/hw/hns/hns_roce_hem.h 	return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
iter             3041 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_hem_iter iter;
iter             3087 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		for (hns_roce_hem_first(hem, &iter);
iter             3088 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
iter             3089 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			bt_ba = hns_roce_hem_addr(&iter);
iter             3195 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_ib_flow_handler *iter, *tmp;
iter             3200 drivers/infiniband/hw/mlx5/main.c 	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
iter             3201 drivers/infiniband/hw/mlx5/main.c 		mlx5_del_flow_rules(iter->rule);
iter             3202 drivers/infiniband/hw/mlx5/main.c 		put_flow_table(dev, iter->prio, true);
iter             3203 drivers/infiniband/hw/mlx5/main.c 		list_del(&iter->list);
iter             3204 drivers/infiniband/hw/mlx5/main.c 		kfree(iter);
iter              661 drivers/infiniband/hw/mthca/mthca_cmd.c 	struct mthca_icm_iter iter;
iter              675 drivers/infiniband/hw/mthca/mthca_cmd.c 	for (mthca_icm_first(icm, &iter);
iter              676 drivers/infiniband/hw/mthca/mthca_cmd.c 	     !mthca_icm_last(&iter);
iter              677 drivers/infiniband/hw/mthca/mthca_cmd.c 	     mthca_icm_next(&iter)) {
iter              683 drivers/infiniband/hw/mthca/mthca_cmd.c 		lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
iter              687 drivers/infiniband/hw/mthca/mthca_cmd.c 				   (unsigned long long) mthca_icm_addr(&iter),
iter              688 drivers/infiniband/hw/mthca/mthca_cmd.c 				   mthca_icm_size(&iter));
iter              692 drivers/infiniband/hw/mthca/mthca_cmd.c 		for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
iter              699 drivers/infiniband/hw/mthca/mthca_cmd.c 				cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
iter              100 drivers/infiniband/hw/mthca/mthca_memfree.h 				   struct mthca_icm_iter *iter)
iter              102 drivers/infiniband/hw/mthca/mthca_memfree.h 	iter->icm      = icm;
iter              103 drivers/infiniband/hw/mthca/mthca_memfree.h 	iter->chunk    = list_empty(&icm->chunk_list) ?
iter              106 drivers/infiniband/hw/mthca/mthca_memfree.h 	iter->page_idx = 0;
iter              109 drivers/infiniband/hw/mthca/mthca_memfree.h static inline int mthca_icm_last(struct mthca_icm_iter *iter)
iter              111 drivers/infiniband/hw/mthca/mthca_memfree.h 	return !iter->chunk;
iter              114 drivers/infiniband/hw/mthca/mthca_memfree.h static inline void mthca_icm_next(struct mthca_icm_iter *iter)
iter              116 drivers/infiniband/hw/mthca/mthca_memfree.h 	if (++iter->page_idx >= iter->chunk->nsg) {
iter              117 drivers/infiniband/hw/mthca/mthca_memfree.h 		if (iter->chunk->list.next == &iter->icm->chunk_list) {
iter              118 drivers/infiniband/hw/mthca/mthca_memfree.h 			iter->chunk = NULL;
iter              122 drivers/infiniband/hw/mthca/mthca_memfree.h 		iter->chunk = list_entry(iter->chunk->list.next,
iter              124 drivers/infiniband/hw/mthca/mthca_memfree.h 		iter->page_idx = 0;
iter              128 drivers/infiniband/hw/mthca/mthca_memfree.h static inline dma_addr_t mthca_icm_addr(struct mthca_icm_iter *iter)
iter              130 drivers/infiniband/hw/mthca/mthca_memfree.h 	return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
iter              133 drivers/infiniband/hw/mthca/mthca_memfree.h static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
iter              135 drivers/infiniband/hw/mthca/mthca_memfree.h 	return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
iter              959 drivers/infiniband/hw/qedr/verbs.c 	int iter;
iter              989 drivers/infiniband/hw/qedr/verbs.c 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
iter              990 drivers/infiniband/hw/qedr/verbs.c 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
iter              992 drivers/infiniband/hw/qedr/verbs.c 		iter--;
iter              995 drivers/infiniband/hw/qedr/verbs.c 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
iter              996 drivers/infiniband/hw/qedr/verbs.c 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
iter              998 drivers/infiniband/hw/qedr/verbs.c 		iter--;
iter              184 drivers/infiniband/hw/qib/qib_debugfs.c 	struct rvt_qp_iter *iter;
iter              187 drivers/infiniband/hw/qib/qib_debugfs.c 	iter = rvt_qp_iter_init(s->private, 0, NULL);
iter              192 drivers/infiniband/hw/qib/qib_debugfs.c 	if (!iter)
iter              196 drivers/infiniband/hw/qib/qib_debugfs.c 		if (rvt_qp_iter_next(iter)) {
iter              197 drivers/infiniband/hw/qib/qib_debugfs.c 			kfree(iter);
iter              202 drivers/infiniband/hw/qib/qib_debugfs.c 	return iter;
iter              209 drivers/infiniband/hw/qib/qib_debugfs.c 	struct rvt_qp_iter *iter = iter_ptr;
iter              213 drivers/infiniband/hw/qib/qib_debugfs.c 	if (rvt_qp_iter_next(iter)) {
iter              214 drivers/infiniband/hw/qib/qib_debugfs.c 		kfree(iter);
iter              218 drivers/infiniband/hw/qib/qib_debugfs.c 	return iter;
iter              229 drivers/infiniband/hw/qib/qib_debugfs.c 	struct rvt_qp_iter *iter = iter_ptr;
iter              231 drivers/infiniband/hw/qib/qib_debugfs.c 	if (!iter)
iter              234 drivers/infiniband/hw/qib/qib_debugfs.c 	qib_qp_iter_print(s, iter);
iter              424 drivers/infiniband/hw/qib/qib_qp.c void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
iter              427 drivers/infiniband/hw/qib/qib_qp.c 	struct rvt_qp *qp = iter->qp;
iter              433 drivers/infiniband/hw/qib/qib_qp.c 		   iter->n,
iter              282 drivers/infiniband/hw/qib/qib_verbs.h void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
iter             2743 drivers/infiniband/sw/rdmavt/qp.c int rvt_qp_iter_next(struct rvt_qp_iter *iter)
iter             2746 drivers/infiniband/sw/rdmavt/qp.c 	int n = iter->n;
iter             2748 drivers/infiniband/sw/rdmavt/qp.c 	struct rvt_qp *pqp = iter->qp;
iter             2750 drivers/infiniband/sw/rdmavt/qp.c 	struct rvt_dev_info *rdi = iter->rdi;
iter             2766 drivers/infiniband/sw/rdmavt/qp.c 	for (; n <  rdi->qp_dev->qp_table_size + iter->specials; n++) {
iter             2770 drivers/infiniband/sw/rdmavt/qp.c 			if (n < iter->specials) {
iter             2780 drivers/infiniband/sw/rdmavt/qp.c 						(n - iter->specials)]);
iter             2785 drivers/infiniband/sw/rdmavt/qp.c 			iter->qp = qp;
iter             2786 drivers/infiniband/sw/rdmavt/qp.c 			iter->n = n;
iter              566 drivers/infiniband/ulp/ipoib/ipoib.h int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
iter              567 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
iter              575 drivers/infiniband/ulp/ipoib/ipoib.h int ipoib_path_iter_next(struct ipoib_path_iter *iter);
iter              576 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_path_iter_read(struct ipoib_path_iter *iter,
iter               60 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	struct ipoib_mcast_iter *iter;
iter               63 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	iter = ipoib_mcast_iter_init(file->private);
iter               64 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	if (!iter)
iter               68 drivers/infiniband/ulp/ipoib/ipoib_fs.c 		if (ipoib_mcast_iter_next(iter)) {
iter               69 drivers/infiniband/ulp/ipoib/ipoib_fs.c 			kfree(iter);
iter               74 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	return iter;
iter               80 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	struct ipoib_mcast_iter *iter = iter_ptr;
iter               84 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	if (ipoib_mcast_iter_next(iter)) {
iter               85 drivers/infiniband/ulp/ipoib/ipoib_fs.c 		kfree(iter);
iter               89 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	return iter;
iter               99 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	struct ipoib_mcast_iter *iter = iter_ptr;
iter              105 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	if (!iter)
iter              108 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
iter              159 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	struct ipoib_path_iter *iter;
iter              162 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	iter = ipoib_path_iter_init(file->private);
iter              163 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	if (!iter)
iter              167 drivers/infiniband/ulp/ipoib/ipoib_fs.c 		if (ipoib_path_iter_next(iter)) {
iter              168 drivers/infiniband/ulp/ipoib/ipoib_fs.c 			kfree(iter);
iter              173 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	return iter;
iter              179 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	struct ipoib_path_iter *iter = iter_ptr;
iter              183 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	if (ipoib_path_iter_next(iter)) {
iter              184 drivers/infiniband/ulp/ipoib/ipoib_fs.c 		kfree(iter);
iter              188 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	return iter;
iter              198 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	struct ipoib_path_iter *iter = iter_ptr;
iter              203 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	if (!iter)
iter              206 drivers/infiniband/ulp/ipoib/ipoib_fs.c 	ipoib_path_iter_read(iter, &path);
iter              631 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct ipoib_path_iter *iter;
iter              633 drivers/infiniband/ulp/ipoib/ipoib_main.c 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter              634 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (!iter)
iter              637 drivers/infiniband/ulp/ipoib/ipoib_main.c 	iter->dev = dev;
iter              638 drivers/infiniband/ulp/ipoib/ipoib_main.c 	memset(iter->path.pathrec.dgid.raw, 0, 16);
iter              640 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (ipoib_path_iter_next(iter)) {
iter              641 drivers/infiniband/ulp/ipoib/ipoib_main.c 		kfree(iter);
iter              645 drivers/infiniband/ulp/ipoib/ipoib_main.c 	return iter;
iter              648 drivers/infiniband/ulp/ipoib/ipoib_main.c int ipoib_path_iter_next(struct ipoib_path_iter *iter)
iter              650 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
iter              662 drivers/infiniband/ulp/ipoib/ipoib_main.c 		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
iter              664 drivers/infiniband/ulp/ipoib/ipoib_main.c 			iter->path = *path;
iter              677 drivers/infiniband/ulp/ipoib/ipoib_main.c void ipoib_path_iter_read(struct ipoib_path_iter *iter,
iter              680 drivers/infiniband/ulp/ipoib/ipoib_main.c 	*path = iter->path;
iter              999 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	struct ipoib_mcast_iter *iter;
iter             1001 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter             1002 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	if (!iter)
iter             1005 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	iter->dev = dev;
iter             1006 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	memset(iter->mgid.raw, 0, 16);
iter             1008 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	if (ipoib_mcast_iter_next(iter)) {
iter             1009 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		kfree(iter);
iter             1013 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	return iter;
iter             1016 drivers/infiniband/ulp/ipoib/ipoib_multicast.c int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
iter             1018 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
iter             1030 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
iter             1032 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			iter->mgid      = mcast->mcmember.mgid;
iter             1033 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			iter->created   = mcast->created;
iter             1034 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			iter->queuelen  = skb_queue_len(&mcast->pkt_queue);
iter             1035 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			iter->complete  = !!mcast->ah;
iter             1036 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
iter             1051 drivers/infiniband/ulp/ipoib/ipoib_multicast.c void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
iter             1058 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	*mgid      = iter->mgid;
iter             1059 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	*created   = iter->created;
iter             1060 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	*queuelen  = iter->queuelen;
iter             1061 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	*complete  = iter->complete;
iter             1062 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	*send_only = iter->send_only;
iter              559 drivers/iommu/dmar.c 	struct acpi_dmar_header *iter, *next;
iter              562 drivers/iommu/dmar.c 	for (iter = start; iter < end; iter = next) {
iter              563 drivers/iommu/dmar.c 		next = (void *)iter + iter->length;
iter              564 drivers/iommu/dmar.c 		if (iter->length == 0) {
iter              575 drivers/iommu/dmar.c 			dmar_table_print_dmar_entry(iter);
iter              577 drivers/iommu/dmar.c 		if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
iter              580 drivers/iommu/dmar.c 				 iter->type);
iter              581 drivers/iommu/dmar.c 		} else if (cb->cb[iter->type]) {
iter              584 drivers/iommu/dmar.c 			ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
iter              589 drivers/iommu/dmar.c 				iter->type);
iter              295 drivers/iommu/iommu.c 	struct iommu_resv_region *iter, *tmp, *nr, *top;
iter              304 drivers/iommu/iommu.c 	list_for_each_entry(iter, regions, list) {
iter              305 drivers/iommu/iommu.c 		if (nr->start < iter->start ||
iter              306 drivers/iommu/iommu.c 		    (nr->start == iter->start && nr->type <= iter->type))
iter              309 drivers/iommu/iommu.c 	list_add_tail(&nr->list, &iter->list);
iter              312 drivers/iommu/iommu.c 	list_for_each_entry_safe(iter, tmp, regions, list) {
iter              313 drivers/iommu/iommu.c 		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
iter              316 drivers/iommu/iommu.c 		if (iter->type != new->type) {
iter              317 drivers/iommu/iommu.c 			list_move_tail(&iter->list, &stack);
iter              323 drivers/iommu/iommu.c 			if (top->type == iter->type)
iter              326 drivers/iommu/iommu.c 		list_move_tail(&iter->list, &stack);
iter              332 drivers/iommu/iommu.c 		if (iter->start > top_end + 1) {
iter              333 drivers/iommu/iommu.c 			list_move_tail(&iter->list, &stack);
iter              336 drivers/iommu/iommu.c 			list_del(&iter->list);
iter              337 drivers/iommu/iommu.c 			kfree(iter);
iter               80 drivers/lightnvm/pblk-rb.c 	unsigned int alloc_order, order, iter;
iter              107 drivers/lightnvm/pblk-rb.c 		iter = (1 << (alloc_order - max_order));
iter              110 drivers/lightnvm/pblk-rb.c 		iter = 1;
iter              153 drivers/lightnvm/pblk-rb.c 		iter--;
iter              154 drivers/lightnvm/pblk-rb.c 	} while (iter > 0);
iter              432 drivers/md/bcache/alloc.c 		size_t iter;
iter              436 drivers/md/bcache/alloc.c 		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
iter              437 drivers/md/bcache/alloc.c 			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
iter              440 drivers/md/bcache/alloc.c 			fifo_for_each(i, &ca->free[j], iter)
iter              442 drivers/md/bcache/alloc.c 		fifo_for_each(i, &ca->free_inc, iter)
iter              867 drivers/md/bcache/bcache.h #define for_each_cache(ca, cs, iter)					\
iter              868 drivers/md/bcache/bcache.h 	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
iter               57 drivers/md/bcache/bset.c 	struct btree_iter iter;
iter               61 drivers/md/bcache/bset.c 		for_each_key(b, k, &iter)
iter               70 drivers/md/bcache/bset.c 	struct btree_iter iter;
iter               73 drivers/md/bcache/bset.c 	for_each_key(b, k, &iter) {
iter              111 drivers/md/bcache/bset.c static void bch_btree_iter_next_check(struct btree_iter *iter)
iter              113 drivers/md/bcache/bset.c 	struct bkey *k = iter->data->k, *next = bkey_next(k);
iter              115 drivers/md/bcache/bset.c 	if (next < iter->data->end &&
iter              116 drivers/md/bcache/bset.c 	    bkey_cmp(k, iter->b->ops->is_extents ?
iter              118 drivers/md/bcache/bset.c 		bch_dump_bucket(iter->b);
iter              125 drivers/md/bcache/bset.c static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
iter              886 drivers/md/bcache/bset.c 	struct btree_iter iter;
iter              902 drivers/md/bcache/bset.c 	m = bch_btree_iter_init(b, &iter, preceding_key_p);
iter              904 drivers/md/bcache/bset.c 	if (b->ops->insert_fixup(b, k, &iter, replace_key))
iter             1093 drivers/md/bcache/bset.c static inline bool btree_iter_end(struct btree_iter *iter)
iter             1095 drivers/md/bcache/bset.c 	return !iter->used;
iter             1098 drivers/md/bcache/bset.c void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
iter             1102 drivers/md/bcache/bset.c 		BUG_ON(!heap_add(iter,
iter             1108 drivers/md/bcache/bset.c 					  struct btree_iter *iter,
iter             1114 drivers/md/bcache/bset.c 	iter->size = ARRAY_SIZE(iter->data);
iter             1115 drivers/md/bcache/bset.c 	iter->used = 0;
iter             1118 drivers/md/bcache/bset.c 	iter->b = b;
iter             1123 drivers/md/bcache/bset.c 		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
iter             1130 drivers/md/bcache/bset.c 				 struct btree_iter *iter,
iter             1133 drivers/md/bcache/bset.c 	return __bch_btree_iter_init(b, iter, search, b->set);
iter             1137 drivers/md/bcache/bset.c static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
iter             1143 drivers/md/bcache/bset.c 	if (!btree_iter_end(iter)) {
iter             1144 drivers/md/bcache/bset.c 		bch_btree_iter_next_check(iter);
iter             1146 drivers/md/bcache/bset.c 		ret = iter->data->k;
iter             1147 drivers/md/bcache/bset.c 		iter->data->k = bkey_next(iter->data->k);
iter             1149 drivers/md/bcache/bset.c 		if (iter->data->k > iter->data->end) {
iter             1151 drivers/md/bcache/bset.c 			iter->data->k = iter->data->end;
iter             1154 drivers/md/bcache/bset.c 		if (iter->data->k == iter->data->end)
iter             1155 drivers/md/bcache/bset.c 			heap_pop(iter, b, cmp);
iter             1157 drivers/md/bcache/bset.c 			heap_sift(iter, 0, cmp);
iter             1163 drivers/md/bcache/bset.c struct bkey *bch_btree_iter_next(struct btree_iter *iter)
iter             1165 drivers/md/bcache/bset.c 	return __bch_btree_iter_next(iter, btree_iter_cmp);
iter             1170 drivers/md/bcache/bset.c struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
iter             1176 drivers/md/bcache/bset.c 		ret = bch_btree_iter_next(iter);
iter             1202 drivers/md/bcache/bset.c 			    struct btree_iter *iter,
iter             1213 drivers/md/bcache/bset.c 	for (i = iter->used / 2 - 1; i >= 0; --i)
iter             1214 drivers/md/bcache/bset.c 		heap_sift(iter, i, b->ops->sort_cmp);
iter             1216 drivers/md/bcache/bset.c 	while (!btree_iter_end(iter)) {
iter             1218 drivers/md/bcache/bset.c 			k = b->ops->sort_fixup(iter, &tmp.k);
iter             1223 drivers/md/bcache/bset.c 			k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
iter             1242 drivers/md/bcache/bset.c static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
iter             1263 drivers/md/bcache/bset.c 	btree_mergesort(b, out, iter, fixup, false);
iter             1298 drivers/md/bcache/bset.c 	struct btree_iter iter;
iter             1301 drivers/md/bcache/bset.c 	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
iter             1312 drivers/md/bcache/bset.c 	__btree_sort(b, &iter, start, order, false, state);
iter             1319 drivers/md/bcache/bset.c 				    struct btree_iter *iter,
iter             1322 drivers/md/bcache/bset.c 	__btree_sort(b, iter, 0, b->page_order, true, state);
iter             1329 drivers/md/bcache/bset.c 	struct btree_iter iter;
iter             1331 drivers/md/bcache/bset.c 	bch_btree_iter_init(b, &iter, NULL);
iter             1333 drivers/md/bcache/bset.c 	btree_mergesort(b, new->set->data, &iter, false, true);
iter              192 drivers/md/bcache/bset.h 	struct bkey	*(*sort_fixup)(struct btree_iter *iter,
iter              196 drivers/md/bcache/bset.h 					struct btree_iter *iter,
iter              329 drivers/md/bcache/bset.h struct bkey *bch_btree_iter_next(struct btree_iter *iter);
iter              330 drivers/md/bcache/bset.h struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
iter              334 drivers/md/bcache/bset.h void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
iter              337 drivers/md/bcache/bset.h 				 struct btree_iter *iter,
iter              353 drivers/md/bcache/bset.h #define for_each_key_filter(b, k, iter, filter)				\
iter              354 drivers/md/bcache/bset.h 	for (bch_btree_iter_init((b), (iter), NULL);			\
iter              355 drivers/md/bcache/bset.h 	     ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
iter              357 drivers/md/bcache/bset.h #define for_each_key(b, k, iter)					\
iter              358 drivers/md/bcache/bset.h 	for (bch_btree_iter_init((b), (iter), NULL);			\
iter              359 drivers/md/bcache/bset.h 	     ((k) = bch_btree_iter_next(iter));)
iter              379 drivers/md/bcache/bset.h 				    struct btree_iter *iter,
iter              208 drivers/md/bcache/btree.c 	struct btree_iter *iter;
iter              215 drivers/md/bcache/btree.c 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
iter              216 drivers/md/bcache/btree.c 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter              217 drivers/md/bcache/btree.c 	iter->used = 0;
iter              220 drivers/md/bcache/btree.c 	iter->b = &b->keys;
iter              258 drivers/md/bcache/btree.c 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
iter              270 drivers/md/bcache/btree.c 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
iter              282 drivers/md/bcache/btree.c 	mempool_free(iter, &b->c->fill_iter);
iter             1320 drivers/md/bcache/btree.c 	struct btree_iter iter;
iter             1325 drivers/md/bcache/btree.c 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
iter             1575 drivers/md/bcache/btree.c 	struct btree_iter iter;
iter             1578 drivers/md/bcache/btree.c 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
iter             1616 drivers/md/bcache/btree.c 	struct btree_iter iter;
iter             1620 drivers/md/bcache/btree.c 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
iter             1626 drivers/md/bcache/btree.c 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
iter             1921 drivers/md/bcache/btree.c 	struct btree_iter iter;
iter             1923 drivers/md/bcache/btree.c 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
iter             1929 drivers/md/bcache/btree.c 		bch_btree_iter_init(&b->keys, &iter, NULL);
iter             1932 drivers/md/bcache/btree.c 			k = bch_btree_iter_next_filter(&iter, &b->keys,
iter             2393 drivers/md/bcache/btree.c 		struct btree_iter iter;
iter             2395 drivers/md/bcache/btree.c 		bch_btree_iter_init(&b->keys, &iter, from);
iter             2397 drivers/md/bcache/btree.c 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
iter             2426 drivers/md/bcache/btree.c 	struct btree_iter iter;
iter             2428 drivers/md/bcache/btree.c 	bch_btree_iter_init(&b->keys, &iter, from);
iter             2430 drivers/md/bcache/btree.c 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
iter              203 drivers/md/bcache/btree.h #define for_each_cached_btree(b, c, iter)				\
iter              204 drivers/md/bcache/btree.h 	for (iter = 0;							\
iter              205 drivers/md/bcache/btree.h 	     iter < ARRAY_SIZE((c)->bucket_hash);			\
iter              206 drivers/md/bcache/btree.h 	     iter++)							\
iter              207 drivers/md/bcache/btree.h 		hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
iter              112 drivers/md/bcache/debug.c 	struct bvec_iter iter, citer = { 0 };
iter              129 drivers/md/bcache/debug.c 	bio_for_each_segment(bv, bio, iter) {
iter               30 drivers/md/bcache/extents.c static void sort_key_next(struct btree_iter *iter,
iter               36 drivers/md/bcache/extents.c 		*i = iter->data[--iter->used];
iter              229 drivers/md/bcache/extents.c 				       struct btree_iter *iter,
iter              266 drivers/md/bcache/extents.c static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
iter              269 drivers/md/bcache/extents.c 	while (iter->used > 1) {
iter              270 drivers/md/bcache/extents.c 		struct btree_iter_set *top = iter->data, *i = top + 1;
iter              272 drivers/md/bcache/extents.c 		if (iter->used > 2 &&
iter              280 drivers/md/bcache/extents.c 			sort_key_next(iter, i);
iter              281 drivers/md/bcache/extents.c 			heap_sift(iter, i - top, bch_extent_sort_cmp);
iter              287 drivers/md/bcache/extents.c 				sort_key_next(iter, i);
iter              291 drivers/md/bcache/extents.c 			heap_sift(iter, i - top, bch_extent_sort_cmp);
iter              301 drivers/md/bcache/extents.c 				heap_sift(iter, 0, bch_extent_sort_cmp);
iter              325 drivers/md/bcache/extents.c 				    struct btree_iter *iter,
iter              337 drivers/md/bcache/extents.c 		struct bkey *k = bch_btree_iter_next(iter);
iter              183 drivers/md/bcache/journal.c 	unsigned int iter;
iter              186 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter) {
iter              644 drivers/md/bcache/journal.c 	unsigned int iter, n = 0;
iter              656 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter) {
iter              665 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter)
iter              676 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter) {
iter               43 drivers/md/bcache/request.c 	struct bvec_iter iter;
iter               46 drivers/md/bcache/request.c 	bio_for_each_segment(bv, bio, iter) {
iter              637 drivers/md/bcache/sysfs.c 	struct btree_iter iter;
iter              648 drivers/md/bcache/sysfs.c 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
iter              122 drivers/md/bcache/util.h #define fifo_for_each(c, fifo, iter)					\
iter              123 drivers/md/bcache/util.h 	for (iter = (fifo)->front;					\
iter              124 drivers/md/bcache/util.h 	     c = (fifo)->data[iter], iter != (fifo)->back;		\
iter              125 drivers/md/bcache/util.h 	     iter = (iter + 1) & (fifo)->mask)
iter              292 drivers/md/dm-flakey.c 	struct bvec_iter iter;
iter              302 drivers/md/dm-flakey.c 	bio_for_each_segment(bvec, bio, iter) {
iter              303 drivers/md/dm-flakey.c 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
iter              304 drivers/md/dm-flakey.c 			char *segment = (page_address(bio_iter_page(bio, iter))
iter              305 drivers/md/dm-flakey.c 					 + bio_iter_offset(bio, iter));
iter              314 drivers/md/dm-flakey.c 		corrupt_bio_byte -= bio_iter_len(bio, iter);
iter             1511 drivers/md/dm-integrity.c 		struct bvec_iter iter;
iter             1535 drivers/md/dm-integrity.c 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
iter             1583 drivers/md/dm-integrity.c 			struct bvec_iter iter;
iter             1588 drivers/md/dm-integrity.c 			bip_for_each_vec(biv, bip, iter) {
iter             1653 drivers/md/dm-integrity.c 		struct bvec_iter iter;
iter             1655 drivers/md/dm-integrity.c 		bio_for_each_segment(bv, bio, iter) {
iter              670 drivers/md/dm-log-writes.c 	struct bvec_iter iter;
iter              754 drivers/md/dm-log-writes.c 	bio_for_each_segment(bv, bio, iter) {
iter              421 drivers/md/dm-verity-fec.c 		      struct bvec_iter *iter)
iter              473 drivers/md/dm-verity-fec.c 	else if (iter) {
iter              475 drivers/md/dm-verity-fec.c 		r = verity_for_bv_block(v, io, iter, fec_bv_copy);
iter               72 drivers/md/dm-verity-fec.h 			     u8 *dest, struct bvec_iter *iter);
iter              103 drivers/md/dm-verity-fec.h 				    struct bvec_iter *iter)
iter              368 drivers/md/dm-verity-target.c 			       struct bvec_iter *iter, struct crypto_wait *wait)
iter              378 drivers/md/dm-verity-target.c 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
iter              400 drivers/md/dm-verity-target.c 		bio_advance_iter(bio, iter, len);
iter              412 drivers/md/dm-verity-target.c 			struct bvec_iter *iter,
iter              424 drivers/md/dm-verity-target.c 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
iter              438 drivers/md/dm-verity-target.c 		bio_advance_iter(bio, iter, len);
iter              457 drivers/md/dm-verity-target.c 					struct bvec_iter *iter)
iter              461 drivers/md/dm-verity-target.c 	bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
iter              482 drivers/md/dm-verity-target.c 			verity_bv_skip_block(v, io, &io->iter);
iter              497 drivers/md/dm-verity-target.c 			r = verity_for_bv_block(v, io, &io->iter,
iter              509 drivers/md/dm-verity-target.c 		start = io->iter;
iter              510 drivers/md/dm-verity-target.c 		r = verity_for_io_block(v, io, &io->iter, &wait);
iter              664 drivers/md/dm-verity-target.c 	io->iter = bio->bi_iter;
iter               79 drivers/md/dm-verity.h 	struct bvec_iter iter;
iter              120 drivers/md/dm-verity.h 			       struct bvec_iter *iter,
iter             1233 drivers/md/raid5.c 	struct bvec_iter iter;
iter             1248 drivers/md/raid5.c 	bio_for_each_segment(bvl, bio, iter) {
iter              143 drivers/mfd/asic3.c 	int iter, i;
iter              148 drivers/mfd/asic3.c 	for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
iter              204 drivers/mfd/asic3.c 	if (iter >= MAX_ASIC_ISR_LOOPS)
iter              663 drivers/misc/mic/scif/scif_dma.c 		     struct scif_window_iter *iter)
iter              665 drivers/misc/mic/scif/scif_dma.c 	dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter);
iter              748 drivers/misc/mic/scif/scif_dma.c 				size_t *nr_bytes, struct scif_window_iter *iter)
iter              762 drivers/misc/mic/scif/scif_dma.c 	if (iter) {
iter              763 drivers/misc/mic/scif/scif_dma.c 		i = iter->index;
iter              764 drivers/misc/mic/scif/scif_dma.c 		start = iter->offset;
iter              772 drivers/misc/mic/scif/scif_dma.c 			if (iter) {
iter              773 drivers/misc/mic/scif/scif_dma.c 				iter->index = i;
iter              774 drivers/misc/mic/scif/scif_dma.c 				iter->offset = start;
iter              382 drivers/misc/mic/scif/scif_rma.h scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter)
iter              384 drivers/misc/mic/scif/scif_rma.h 	iter->offset = window->offset;
iter              385 drivers/misc/mic/scif/scif_rma.h 	iter->index = 0;
iter              390 drivers/misc/mic/scif/scif_rma.h 				struct scif_window_iter *iter);
iter              445 drivers/mmc/host/dw_mmc-exynos.c 	const u8 iter = 8;
iter              449 drivers/mmc/host/dw_mmc-exynos.c 	for (i = 0; i < iter; i++) {
iter              457 drivers/mmc/host/dw_mmc-exynos.c 	for (i = 0; i < iter; i++) {
iter              960 drivers/mmc/host/sdhci-tegra.c 	u8 iter = TRIES_256;
iter              970 drivers/mmc/host/sdhci-tegra.c 		iter = TRIES_128;
iter              975 drivers/mmc/host/sdhci-tegra.c 		iter = TRIES_128;
iter              991 drivers/mmc/host/sdhci-tegra.c 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
iter              997 drivers/mmc/host/sdhci-tegra.c 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
iter             1380 drivers/mtd/mtdcore.c 				int (*iter)(struct mtd_info *,
iter             1389 drivers/mtd/mtdcore.c 		ret = iter(mtd, section, oobregion);
iter             1449 drivers/mtd/mtdcore.c 				int (*iter)(struct mtd_info *,
iter             1457 drivers/mtd/mtdcore.c 					&oobregion, iter);
iter             1470 drivers/mtd/mtdcore.c 		ret = iter(mtd, ++section, &oobregion);
iter             1492 drivers/mtd/mtdcore.c 				int (*iter)(struct mtd_info *,
iter             1500 drivers/mtd/mtdcore.c 					&oobregion, iter);
iter             1513 drivers/mtd/mtdcore.c 		ret = iter(mtd, ++section, &oobregion);
iter             1529 drivers/mtd/mtdcore.c 				int (*iter)(struct mtd_info *,
iter             1537 drivers/mtd/mtdcore.c 		ret = iter(mtd, section++, &oobregion);
iter              488 drivers/mtd/nand/spi/core.c 	struct nand_io_iter iter;
iter              498 drivers/mtd/nand/spi/core.c 	nanddev_io_for_each_page(nand, from, ops, &iter) {
iter              499 drivers/mtd/nand/spi/core.c 		ret = spinand_select_target(spinand, iter.req.pos.target);
iter              507 drivers/mtd/nand/spi/core.c 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
iter              520 drivers/mtd/nand/spi/core.c 		ops->retlen += iter.req.datalen;
iter              521 drivers/mtd/nand/spi/core.c 		ops->oobretlen += iter.req.ooblen;
iter              537 drivers/mtd/nand/spi/core.c 	struct nand_io_iter iter;
iter              546 drivers/mtd/nand/spi/core.c 	nanddev_io_for_each_page(nand, to, ops, &iter) {
iter              547 drivers/mtd/nand/spi/core.c 		ret = spinand_select_target(spinand, iter.req.pos.target);
iter              555 drivers/mtd/nand/spi/core.c 		ret = spinand_write_page(spinand, &iter.req);
iter              559 drivers/mtd/nand/spi/core.c 		ops->retlen += iter.req.datalen;
iter              560 drivers/mtd/nand/spi/core.c 		ops->oobretlen += iter.req.ooblen;
iter              420 drivers/mtd/ubi/debug.c static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
iter              424 drivers/mtd/ubi/debug.c 	int *block_number = iter;
iter              762 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter              765 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave_rcu(bond, slave, iter)
iter             1388 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             1452 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave(bond, slave, iter) {
iter             1668 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             1677 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             1723 drivers/net/bonding/bond_3ad.c 		bond_for_each_slave_rcu(bond, slave, iter) {
iter             2090 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             2125 drivers/net/bonding/bond_3ad.c 			bond_for_each_slave(bond, slave_iter, iter) {
iter             2202 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave(bond, slave_iter, iter) {
iter             2247 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             2261 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave(bond, slave, iter) {
iter             2288 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             2326 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2347 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2619 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             2623 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2689 drivers/net/bonding/bond_3ad.c 	struct list_head *iter;
iter             2695 drivers/net/bonding/bond_3ad.c 	bond_for_each_slave(bond, slave, iter) {
iter              169 drivers/net/bonding/bond_alb.c 	struct list_head *iter;
iter              176 drivers/net/bonding/bond_alb.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter              309 drivers/net/bonding/bond_alb.c 	struct list_head *iter;
iter              312 drivers/net/bonding/bond_alb.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             1149 drivers/net/bonding/bond_alb.c 	struct list_head *iter;
iter             1174 drivers/net/bonding/bond_alb.c 	bond_for_each_slave(bond, tmp_slave1, iter) {
iter             1224 drivers/net/bonding/bond_alb.c 	struct list_head *iter;
iter             1232 drivers/net/bonding/bond_alb.c 	bond_for_each_slave(bond, slave, iter) {
iter             1254 drivers/net/bonding/bond_alb.c 	bond_for_each_slave(bond, rollback_slave, iter) {
iter             1513 drivers/net/bonding/bond_alb.c 	struct list_head *iter;
iter             1531 drivers/net/bonding/bond_alb.c 		bond_for_each_slave_rcu(bond, slave, iter) {
iter             1547 drivers/net/bonding/bond_alb.c 		bond_for_each_slave_rcu(bond, slave, iter) {
iter              284 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              287 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter              297 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, rollback_slave, iter) {
iter              316 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              319 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter)
iter              338 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              347 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter              487 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              498 drivers/net/bonding/bond_main.c 		bond_for_each_slave(bond, slave, iter) {
iter              510 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              521 drivers/net/bonding/bond_main.c 		bond_for_each_slave(bond, slave, iter) {
iter              632 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              634 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter              767 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              774 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter              987 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter              994 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             1014 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             1017 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter)
iter             1025 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             1029 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             1057 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             1066 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             1094 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             1105 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             2058 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2062 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             2079 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2085 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2191 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2194 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             2294 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2322 drivers/net/bonding/bond_main.c 		bond_for_each_slave(bond, slave, iter) {
iter             2434 drivers/net/bonding/bond_main.c 	struct list_head  *iter;
iter             2444 drivers/net/bonding/bond_main.c 	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
iter             2653 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2670 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2734 drivers/net/bonding/bond_main.c 		bond_for_each_slave(bond, slave, iter) {
iter             2768 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2772 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2835 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2838 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             2913 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             2940 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             2982 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             3349 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             3354 drivers/net/bonding/bond_main.c 		bond_for_each_slave(bond, slave, iter) {
iter             3443 drivers/net/bonding/bond_main.c 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
iter             3447 drivers/net/bonding/bond_main.c 	iter = &dev->adj_list.lower;
iter             3452 drivers/net/bonding/bond_main.c 			ldev = netdev_next_lower_dev_rcu(now, &iter);
iter             3459 drivers/net/bonding/bond_main.c 			iter_stack[cur++] = iter;
iter             3473 drivers/net/bonding/bond_main.c 		iter = niter;
iter             3485 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             3498 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             3636 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             3647 drivers/net/bonding/bond_main.c 		bond_for_each_slave_rcu(bond, slave, iter) {
iter             3715 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             3720 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             3747 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, rollback_slave, iter) {
iter             3773 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             3792 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             3818 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, rollback_slave, iter) {
iter             3847 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             3852 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             3863 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             4011 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             4045 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             4123 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             4125 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             4154 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             4160 drivers/net/bonding/bond_main.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter             4254 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             4265 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter) {
iter             4386 drivers/net/bonding/bond_main.c 	struct list_head *iter;
iter             4393 drivers/net/bonding/bond_main.c 	bond_for_each_slave(bond, slave, iter)
iter              988 drivers/net/bonding/bond_options.c 	struct list_head *iter;
iter              992 drivers/net/bonding/bond_options.c 		bond_for_each_slave(bond, slave, iter)
iter             1036 drivers/net/bonding/bond_options.c 	struct list_head *iter;
iter             1059 drivers/net/bonding/bond_options.c 	bond_for_each_slave(bond, slave, iter) {
iter             1130 drivers/net/bonding/bond_options.c 	struct list_head *iter;
iter             1147 drivers/net/bonding/bond_options.c 	bond_for_each_slave(bond, slave, iter) {
iter             1230 drivers/net/bonding/bond_options.c 	struct list_head *iter;
iter             1236 drivers/net/bonding/bond_options.c 	bond_for_each_slave(bond, slave, iter) {
iter             1313 drivers/net/bonding/bond_options.c 	struct list_head *iter;
iter             1342 drivers/net/bonding/bond_options.c 	bond_for_each_slave(bond, slave, iter) {
iter               14 drivers/net/bonding/bond_procfs.c 	struct list_head *iter;
iter               23 drivers/net/bonding/bond_procfs.c 	bond_for_each_slave_rcu(bond, slave, iter)
iter               33 drivers/net/bonding/bond_procfs.c 	struct list_head *iter;
iter               41 drivers/net/bonding/bond_procfs.c 	bond_for_each_slave_rcu(bond, slave, iter) {
iter              169 drivers/net/bonding/bond_sysfs.c 	struct list_head *iter;
iter              176 drivers/net/bonding/bond_sysfs.c 	bond_for_each_slave(bond, slave, iter) {
iter              593 drivers/net/bonding/bond_sysfs.c 	struct list_head *iter;
iter              600 drivers/net/bonding/bond_sysfs.c 	bond_for_each_slave(bond, slave, iter) {
iter             1232 drivers/net/dsa/bcm_sf2_cfp.c 	unsigned int i, j, iter;
iter             1242 drivers/net/dsa/bcm_sf2_cfp.c 			iter = (i - 1) * s + j;
iter             1243 drivers/net/dsa/bcm_sf2_cfp.c 			strlcpy(data + iter * ETH_GSTRING_LEN,
iter             1255 drivers/net/dsa/bcm_sf2_cfp.c 	unsigned int i, j, iter;
iter             1273 drivers/net/dsa/bcm_sf2_cfp.c 			iter = (i - 1) * s + j;
iter             1274 drivers/net/dsa/bcm_sf2_cfp.c 			data[iter] = core_readl(priv, stat->offset);
iter             1270 drivers/net/ethernet/broadcom/bnxt/bnxt.h 	struct rhashtable_iter		iter;
iter             1507 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	struct rhashtable_iter *iter = &tc_info->iter;
iter             1511 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rhashtable_walk_start(iter);
iter             1515 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		flow_node = rhashtable_walk_next(iter);
iter             1533 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rhashtable_walk_stop(iter);
iter             1547 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
iter             1565 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	rhashtable_walk_exit(&tc_info->iter);
iter              318 drivers/net/ethernet/chelsio/cxgb/common.h #define for_each_port(adapter, iter) \
iter              319 drivers/net/ethernet/chelsio/cxgb/common.h 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
iter              612 drivers/net/ethernet/chelsio/cxgb3/common.h #define for_each_port(adapter, iter) \
iter              613 drivers/net/ethernet/chelsio/cxgb3/common.h 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
iter             2686 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	u32 size, j, iter;
iter             2729 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			iter = up_cim_reg->ireg_offset_range;
iter             2734 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			iter = up_cim_reg->ireg_offset_range;
iter             2739 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			iter = 1;
iter             2745 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		for (j = 0; j < iter; j++, buff++) {
iter             1441 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h #define for_each_port(adapter, iter) \
iter             1442 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
iter             3528 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct scatterlist *iter;
iter             3541 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	for_each_sg(adapter->hma.sgt->sgl, iter,
iter             3543 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		page = sg_page(iter);
iter             3556 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct scatterlist *sgl, *iter;
iter             3612 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	for_each_sg(sgl, iter, sgt->orig_nents, i) {
iter             3621 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		sg_set_page(iter, newpage, page_size << page_order, 0);
iter             3639 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	for_each_sg(sgl, iter, sgt->nents, i) {
iter             3640 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		newpage = sg_page(iter);
iter             3641 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		adapter->hma.phy_addr[i] = sg_dma_address(iter);
iter              745 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 	struct rhashtable_iter iter;
iter              750 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 	rhashtable_walk_enter(&adap->flower_tbl, &iter);
iter              752 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 		rhashtable_walk_start(&iter);
iter              754 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 		while ((flower_entry = rhashtable_walk_next(&iter)) &&
iter              772 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 		rhashtable_walk_stop(&iter);
iter              775 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 	rhashtable_walk_exit(&iter);
iter              349 drivers/net/ethernet/chelsio/cxgb4vf/adapter.h #define for_each_ethrxq(sge, iter) \
iter              350 drivers/net/ethernet/chelsio/cxgb4vf/adapter.h 	for (iter = 0; iter < (sge)->ethqsets; iter++)
iter              306 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h #define for_each_port(adapter, iter) \
iter              307 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h 	for (iter = 0; iter < (adapter)->params.nports; iter++)
iter             1519 drivers/net/ethernet/mellanox/mlx4/fw.c 	struct mlx4_icm_iter iter;
iter             1532 drivers/net/ethernet/mellanox/mlx4/fw.c 	for (mlx4_icm_first(icm, &iter);
iter             1533 drivers/net/ethernet/mellanox/mlx4/fw.c 	     !mlx4_icm_last(&iter);
iter             1534 drivers/net/ethernet/mellanox/mlx4/fw.c 	     mlx4_icm_next(&iter)) {
iter             1540 drivers/net/ethernet/mellanox/mlx4/fw.c 		lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
iter             1544 drivers/net/ethernet/mellanox/mlx4/fw.c 				  (unsigned long long) mlx4_icm_addr(&iter),
iter             1545 drivers/net/ethernet/mellanox/mlx4/fw.c 				  mlx4_icm_size(&iter));
iter             1550 drivers/net/ethernet/mellanox/mlx4/fw.c 		for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
iter             1557 drivers/net/ethernet/mellanox/mlx4/fw.c 				cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
iter               97 drivers/net/ethernet/mellanox/mlx4/icm.h 				  struct mlx4_icm_iter *iter)
iter               99 drivers/net/ethernet/mellanox/mlx4/icm.h 	iter->icm      = icm;
iter              100 drivers/net/ethernet/mellanox/mlx4/icm.h 	iter->chunk    = list_empty(&icm->chunk_list) ?
iter              103 drivers/net/ethernet/mellanox/mlx4/icm.h 	iter->page_idx = 0;
iter              106 drivers/net/ethernet/mellanox/mlx4/icm.h static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
iter              108 drivers/net/ethernet/mellanox/mlx4/icm.h 	return !iter->chunk;
iter              111 drivers/net/ethernet/mellanox/mlx4/icm.h static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
iter              113 drivers/net/ethernet/mellanox/mlx4/icm.h 	if (++iter->page_idx >= iter->chunk->nsg) {
iter              114 drivers/net/ethernet/mellanox/mlx4/icm.h 		if (iter->chunk->list.next == &iter->icm->chunk_list) {
iter              115 drivers/net/ethernet/mellanox/mlx4/icm.h 			iter->chunk = NULL;
iter              119 drivers/net/ethernet/mellanox/mlx4/icm.h 		iter->chunk = list_entry(iter->chunk->list.next,
iter              121 drivers/net/ethernet/mellanox/mlx4/icm.h 		iter->page_idx = 0;
iter              125 drivers/net/ethernet/mellanox/mlx4/icm.h static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
iter              127 drivers/net/ethernet/mellanox/mlx4/icm.h 	if (iter->chunk->coherent)
iter              128 drivers/net/ethernet/mellanox/mlx4/icm.h 		return iter->chunk->buf[iter->page_idx].dma_addr;
iter              130 drivers/net/ethernet/mellanox/mlx4/icm.h 		return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
iter              133 drivers/net/ethernet/mellanox/mlx4/icm.h static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
iter              135 drivers/net/ethernet/mellanox/mlx4/icm.h 	if (iter->chunk->coherent)
iter              136 drivers/net/ethernet/mellanox/mlx4/icm.h 		return iter->chunk->buf[iter->page_idx].size;
iter              138 drivers/net/ethernet/mellanox/mlx4/icm.h 		return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
iter              374 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	struct mlx5e_ethtool_rule *iter;
iter              377 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
iter              378 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 		if (iter->flow_spec.location > rule->flow_spec.location)
iter              380 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 		head = &iter->list;
iter              467 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	struct mlx5e_ethtool_rule *iter;
iter              469 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
iter              470 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 		if (iter->flow_spec.location == location)
iter              471 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 			return iter;
iter              773 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	struct mlx5e_ethtool_rule *iter;
iter              776 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
iter              777 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 		del_ethtool_rule(priv, iter);
iter              755 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	struct fs_node *iter = list_entry(start, struct fs_node, list);
iter              761 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	list_for_each_advance_continue(iter, &root->children, reverse) {
iter              762 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		if (iter->type == FS_TYPE_FLOW_TABLE) {
iter              763 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 			fs_get_obj(ft, iter);
iter              766 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
iter              811 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	struct mlx5_flow_table *iter;
iter              815 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	fs_for_each_ft(iter, prio) {
iter              817 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		err = root->cmds->modify_flow_table(root, iter, ft);
iter              820 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 				       iter->id);
iter              940 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	struct mlx5_flow_rule *iter;
iter              955 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
iter              956 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		err = _mlx5_modify_rule_destination(iter, &dest);
iter              993 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	struct mlx5_flow_table *iter;
iter              995 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	fs_for_each_ft(iter, prio) {
iter              996 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		if (iter->level > ft->level)
iter              998 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		prev = &iter->node.list;
iter             1561 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		struct match_list *iter, *match_tmp;
iter             1565 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		list_for_each_entry_safe(iter, match_tmp, &head->list,
iter             1567 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 			tree_put_node(&iter->g->node, ft_locked);
iter             1568 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 			list_del(&iter->list);
iter             1569 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 			kfree(iter);
iter             1620 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	struct match_list *iter;
iter             1623 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	list_for_each_entry(iter, match_head, list)
iter             1624 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		version += (u64)atomic_read(&iter->g->node.version);
iter             1672 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	struct match_list *iter;
iter             1687 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	list_for_each_entry(iter, match_head, list) {
iter             1690 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		g = iter->g;
iter             1722 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 	list_for_each_entry(iter, match_head, list) {
iter             1723 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		g = iter->g;
iter             2438 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		struct fs_node *iter;
iter             2442 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 		list_for_each_entry_safe(iter, temp, &node->children, list)
iter             2443 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 			clean_tree(iter);
iter                9 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c #define devcom_for_each_component(priv, comp, iter) \
iter               10 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 	for (iter = 0; \
iter               11 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 	     comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
iter               12 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 	     iter++)
iter               69 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 	struct mlx5_devcom_list *priv = NULL, *iter;
iter               79 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 	list_for_each_entry(iter, &devcom_list, list) {
iter               84 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 			if (iter->devs[i])
iter               85 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 				tmp_dev = iter->devs[i];
iter               97 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 		priv = iter;
iter             5551 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct list_head *iter;
iter             5556 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
iter             5918 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct list_head *iter;
iter             5920 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	netdev_for_each_lower_dev(br_dev, dev, iter) {
iter             5932 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct list_head *iter;
iter             5934 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	netdev_for_each_lower_dev(br_dev, dev, iter) {
iter             6134 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct list_head *iter;
iter             6137 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
iter             6222 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct list_head *iter;
iter             6225 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
iter              316 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 	struct list_head *iter;
iter              318 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 	netdev_for_each_lower_dev(br_dev, dev, iter) {
iter              683 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	struct rhashtable_iter iter;
iter              690 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
iter              691 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	rhashtable_walk_start(&iter);
iter              692 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	while ((obj = rhashtable_walk_next(&iter))) {
iter              700 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	rhashtable_walk_stop(&iter);
iter              701 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	rhashtable_walk_exit(&iter);
iter             5940 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct fib6_info *iter;
iter             5959 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
iter             5963 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		rt_arr[i + 1] = iter;
iter             5964 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		fib6_info_hold(iter);
iter             6783 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct list_head *iter;
iter             6786 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
iter              224 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	struct list_head *iter;
iter              226 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	netdev_for_each_lower_dev(lag_dev, dev, iter)
iter             2027 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 	struct list_head *iter;
iter             2029 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 	netdev_for_each_lower_dev(br_dev, dev, iter) {
iter             1739 drivers/net/ethernet/mscc/ocelot.c 		struct list_head *iter;
iter             1741 drivers/net/ethernet/mscc/ocelot.c 		netdev_for_each_lower_dev(dev, slave, iter) {
iter              110 drivers/net/ethernet/netronome/nfp/abm/cls.c 	struct nfp_abm_u32_match *iter;
iter              112 drivers/net/ethernet/netronome/nfp/abm/cls.c 	list_for_each_entry(iter, &alink->dscp_map, list)
iter              113 drivers/net/ethernet/netronome/nfp/abm/cls.c 		if ((prio & iter->mask) == iter->val)
iter              114 drivers/net/ethernet/netronome/nfp/abm/cls.c 			return iter->band;
iter              158 drivers/net/ethernet/netronome/nfp/abm/cls.c 	struct nfp_abm_u32_match *iter;
iter              160 drivers/net/ethernet/netronome/nfp/abm/cls.c 	list_for_each_entry(iter, &alink->dscp_map, list)
iter              161 drivers/net/ethernet/netronome/nfp/abm/cls.c 		if (iter->handle == knode->handle) {
iter              162 drivers/net/ethernet/netronome/nfp/abm/cls.c 			list_del(&iter->list);
iter              163 drivers/net/ethernet/netronome/nfp/abm/cls.c 			kfree(iter);
iter              174 drivers/net/ethernet/netronome/nfp/abm/cls.c 	struct nfp_abm_u32_match *match = NULL, *iter;
iter              189 drivers/net/ethernet/netronome/nfp/abm/cls.c 	list_for_each_entry(iter, &alink->dscp_map, list) {
iter              192 drivers/net/ethernet/netronome/nfp/abm/cls.c 		if (iter->handle == knode->handle) {
iter              193 drivers/net/ethernet/netronome/nfp/abm/cls.c 			match = iter;
iter              197 drivers/net/ethernet/netronome/nfp/abm/cls.c 		cmask = iter->mask & mask;
iter              198 drivers/net/ethernet/netronome/nfp/abm/cls.c 		if ((iter->val & cmask) == (val & cmask) &&
iter              199 drivers/net/ethernet/netronome/nfp/abm/cls.c 		    iter->band != knode->res->classid) {
iter              248 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	struct radix_tree_iter iter;
iter              260 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
iter              269 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
iter              288 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	struct radix_tree_iter iter;
iter              303 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
iter             1574 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	int i, iter, rc = 0;
iter             1593 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
iter             1641 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	if (iter == QEDE_SELFTEST_POLL_COUNT) {
iter              172 drivers/net/ethernet/sun/ldmvsw.c 	struct vnet *iter;
iter              198 drivers/net/ethernet/sun/ldmvsw.c 	list_for_each_entry(iter, &vnet_list, list) {
iter              199 drivers/net/ethernet/sun/ldmvsw.c 		if (iter->local_mac == *local_mac) {
iter              200 drivers/net/ethernet/sun/ldmvsw.c 			vp = iter;
iter              346 drivers/net/ethernet/sun/sunvnet.c 	struct vnet *iter, *vp;
iter              350 drivers/net/ethernet/sun/sunvnet.c 	list_for_each_entry(iter, &vnet_list, list) {
iter              351 drivers/net/ethernet/sun/sunvnet.c 		if (iter->local_mac == *local_mac) {
iter              352 drivers/net/ethernet/sun/sunvnet.c 			vp = iter;
iter              769 drivers/net/tap.c 			    struct iov_iter *iter)
iter              781 drivers/net/tap.c 		if (iov_iter_count(iter) < vnet_hdr_len)
iter              789 drivers/net/tap.c 		if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
iter              793 drivers/net/tap.c 		iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
iter              809 drivers/net/tap.c 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
iter              810 drivers/net/tap.c 		if (ret || !iov_iter_count(iter))
iter              813 drivers/net/tap.c 		ret = copy_to_iter(&veth, sizeof(veth), iter);
iter              814 drivers/net/tap.c 		if (ret != sizeof(veth) || !iov_iter_count(iter))
iter              818 drivers/net/tap.c 	ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
iter             2045 drivers/net/tun.c 				struct iov_iter *iter)
iter             2056 drivers/net/tun.c 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
iter             2058 drivers/net/tun.c 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
iter             2061 drivers/net/tun.c 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
iter             2064 drivers/net/tun.c 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
iter             2080 drivers/net/tun.c 			    struct iov_iter *iter)
iter             2098 drivers/net/tun.c 		if (iov_iter_count(iter) < sizeof(pi))
iter             2102 drivers/net/tun.c 		if (iov_iter_count(iter) < total) {
iter             2107 drivers/net/tun.c 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
iter             2114 drivers/net/tun.c 		if (iov_iter_count(iter) < vnet_hdr_sz)
iter             2133 drivers/net/tun.c 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
iter             2136 drivers/net/tun.c 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
iter             2148 drivers/net/tun.c 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
iter             2149 drivers/net/tun.c 		if (ret || !iov_iter_count(iter))
iter             2152 drivers/net/tun.c 		ret = copy_to_iter(&veth, sizeof(veth), iter);
iter             2153 drivers/net/tun.c 		if (ret != sizeof(veth) || !iov_iter_count(iter))
iter             2157 drivers/net/tun.c 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
iter              178 drivers/net/usb/qmi_wwan.c 	struct list_head *iter;
iter              182 drivers/net/usb/qmi_wwan.c 	netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) {
iter              407 drivers/net/usb/qmi_wwan.c 	struct list_head *iter;
iter              412 drivers/net/usb/qmi_wwan.c 	netdev_for_each_upper_dev_rcu(dev, ldev, iter) {
iter             1502 drivers/net/usb/qmi_wwan.c 	struct list_head *iter;
iter             1516 drivers/net/usb/qmi_wwan.c 		netdev_for_each_upper_dev_rcu(dev->net, ldev, iter)
iter             1316 drivers/net/vrf.c 	struct list_head *iter;
iter             1318 drivers/net/vrf.c 	netdev_for_each_lower_dev(dev, port_dev, iter)
iter               71 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
iter              111 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
iter              303 drivers/net/wireless/ath/carl9170/debug.c 	struct carl9170_sta_tid *iter;
iter              309 drivers/net/wireless/ath/carl9170/debug.c 	list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
iter              311 drivers/net/wireless/ath/carl9170/debug.c 		spin_lock_bh(&iter->lock);
iter              314 drivers/net/wireless/ath/carl9170/debug.c 		    cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
iter              315 drivers/net/wireless/ath/carl9170/debug.c 		    iter->max, iter->state, iter->counter);
iter              318 drivers/net/wireless/ath/carl9170/debug.c 		    CARL9170_BAW_BITS, iter->bitmap);
iter              327 drivers/net/wireless/ath/carl9170/debug.c 		offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn));
iter              330 drivers/net/wireless/ath/carl9170/debug.c 		offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) %
iter              335 drivers/net/wireless/ath/carl9170/debug.c 		    " currently queued:%d\n", skb_queue_len(&iter->queue));
iter              338 drivers/net/wireless/ath/carl9170/debug.c 		skb_queue_walk(&iter->queue, skb) {
iter              347 drivers/net/wireless/ath/carl9170/debug.c 		spin_unlock_bh(&iter->lock);
iter              439 drivers/net/wireless/ath/carl9170/debug.c 	struct carl9170_vif_info *iter;
iter              449 drivers/net/wireless/ath/carl9170/debug.c 	list_for_each_entry_rcu(iter, &ar->vif_list, list) {
iter              450 drivers/net/wireless/ath/carl9170/debug.c 		struct ieee80211_vif *vif = carl9170_get_vif(iter);
iter              453 drivers/net/wireless/ath/carl9170/debug.c 		    "Master" : " Slave"), iter->id, vif->type, vif->addr,
iter              454 drivers/net/wireless/ath/carl9170/debug.c 		    iter->enable_beacon ? "beaconing " : "");
iter               23 drivers/net/wireless/ath/carl9170/fw.c 	const struct carl9170fw_desc_head *iter;
iter               25 drivers/net/wireless/ath/carl9170/fw.c 	carl9170fw_for_each_hdr(iter, ar->fw.desc) {
iter               26 drivers/net/wireless/ath/carl9170/fw.c 		if (carl9170fw_desc_cmp(iter, descid, len,
iter               28 drivers/net/wireless/ath/carl9170/fw.c 			return (void *)iter;
iter               32 drivers/net/wireless/ath/carl9170/fw.c 	if (carl9170fw_desc_cmp(iter, descid, len,
iter               34 drivers/net/wireless/ath/carl9170/fw.c 		return (void *)iter;
iter             1065 drivers/net/wireless/ath/carl9170/main.c 		struct carl9170_vif_info *iter;
iter             1070 drivers/net/wireless/ath/carl9170/main.c 		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
iter             1071 drivers/net/wireless/ath/carl9170/main.c 			if (iter->active && iter->enable_beacon)
iter              605 drivers/net/wireless/ath/carl9170/tx.c 	struct carl9170_sta_tid *iter;
iter              612 drivers/net/wireless/ath/carl9170/tx.c 	list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
iter              613 drivers/net/wireless/ath/carl9170/tx.c 		if (iter->state < CARL9170_TID_STATE_IDLE)
iter              616 drivers/net/wireless/ath/carl9170/tx.c 		spin_lock_bh(&iter->lock);
iter              617 drivers/net/wireless/ath/carl9170/tx.c 		skb = skb_peek(&iter->queue);
iter              627 drivers/net/wireless/ath/carl9170/tx.c 		sta = iter->sta;
iter              631 drivers/net/wireless/ath/carl9170/tx.c 		ieee80211_stop_tx_ba_session(sta, iter->tid);
iter              633 drivers/net/wireless/ath/carl9170/tx.c 		spin_unlock_bh(&iter->lock);
iter             1400 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *iter;
iter             1435 drivers/net/wireless/ath/carl9170/tx.c 	skb_queue_reverse_walk(&agg->queue, iter) {
iter             1436 drivers/net/wireless/ath/carl9170/tx.c 		qseq = carl9170_get_seq(iter);
iter             1439 drivers/net/wireless/ath/carl9170/tx.c 			__skb_queue_after(&agg->queue, iter, skb);
iter              630 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	struct scatterlist *iter;
iter              638 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	iter = table;
iter              639 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	for_each_sg(table, iter, sg_nents(table), i) {
iter              643 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			iter = table;
iter              644 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			for_each_sg(table, iter, sg_nents(table), i) {
iter              645 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 				new_page = sg_page(iter);
iter              654 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		sg_set_page(iter, new_page, alloc_size, 0);
iter             1180 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
iter             1193 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		iter->internal_txf = 0;
iter             1194 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		iter->fifo_size = 0;
iter             1195 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		iter->fifo = -1;
iter             1197 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			iter->lmac = 1;
iter             1199 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			iter->lmac = 0;
iter             1202 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	if (!iter->internal_txf)
iter             1203 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
iter             1204 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			iter->fifo_size =
iter             1205 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 				cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
iter             1206 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
iter             1210 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	iter->internal_txf = 1;
iter             1216 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) {
iter             1217 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		iter->fifo_size =
iter             1218 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			cfg->internal_txfifo_size[iter->fifo - txf_num];
iter             1219 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 		if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
iter             1231 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
iter             1246 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
iter             1248 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
iter             1250 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
iter             1281 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	for (i = 0; i < iter->fifo_size; i += sizeof(*data))
iter             1580 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
iter             1589 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 			size += iter->fifo_size;
iter              634 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	int iter;
iter              647 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	for (iter = 0; iter < 10; iter++) {
iter               86 drivers/net/wireless/marvell/libertas/firmware.c 	const struct lbs_fw_table *iter;
iter               89 drivers/net/wireless/marvell/libertas/firmware.c 		iter = priv->fw_table;
iter               91 drivers/net/wireless/marvell/libertas/firmware.c 		iter = ++priv->fw_iter;
iter               99 drivers/net/wireless/marvell/libertas/firmware.c 	if (!iter->helper) {
iter              105 drivers/net/wireless/marvell/libertas/firmware.c 	if (iter->model != priv->fw_model) {
iter              106 drivers/net/wireless/marvell/libertas/firmware.c 		iter++;
iter              110 drivers/net/wireless/marvell/libertas/firmware.c 	priv->fw_iter = iter;
iter              111 drivers/net/wireless/marvell/libertas/firmware.c 	do_load_firmware(priv, iter->helper, helper_firmware_cb);
iter              177 drivers/net/wireless/marvell/libertas/firmware.c 	const struct lbs_fw_table *iter;
iter              184 drivers/net/wireless/marvell/libertas/firmware.c 	iter = fw_table;
iter              185 drivers/net/wireless/marvell/libertas/firmware.c 	while (iter && iter->helper) {
iter              186 drivers/net/wireless/marvell/libertas/firmware.c 		if (iter->model != card_model)
iter              190 drivers/net/wireless/marvell/libertas/firmware.c 			ret = request_firmware(helper, iter->helper, dev);
iter              198 drivers/net/wireless/marvell/libertas/firmware.c 			if (iter->fwname == NULL)
iter              203 drivers/net/wireless/marvell/libertas/firmware.c 			ret = request_firmware(mainfw, iter->fwname, dev);
iter              217 drivers/net/wireless/marvell/libertas/firmware.c 		iter++;
iter              292 drivers/net/wireless/mediatek/mt76/dma.c 	struct sk_buff *iter;
iter              317 drivers/net/wireless/mediatek/mt76/dma.c 	skb_walk_frags(skb, iter) {
iter              321 drivers/net/wireless/mediatek/mt76/dma.c 		addr = dma_map_single(dev->dev, iter->data, iter->len,
iter              327 drivers/net/wireless/mediatek/mt76/dma.c 		tx_info.buf[n++].len = iter->len;
iter               28 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	struct sk_buff *iter, *last = skb;
iter               47 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	skb_walk_frags(skb, iter) {
iter               48 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 		last = iter;
iter               49 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 		if (!iter->next) {
iter              500 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		struct sk_buff *skb, *iter;
iter              510 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) {
iter              511 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			ieee80211hdr = (struct ieee80211_hdr *)iter->data;
iter              525 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 				skb = iter;
iter              611 drivers/net/wireless/realtek/rtw88/fw.c 	struct sk_buff *iter;
iter              623 drivers/net/wireless/realtek/rtw88/fw.c 		iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
iter              624 drivers/net/wireless/realtek/rtw88/fw.c 		if (!iter) {
iter              633 drivers/net/wireless/realtek/rtw88/fw.c 			rtw_fill_rsvd_page_desc(rtwdev, iter);
iter              635 drivers/net/wireless/realtek/rtw88/fw.c 		rsvd_pkt->skb = iter;
iter              651 drivers/net/wireless/realtek/rtw88/fw.c 			total_page += rtw_len_to_page(iter->len + tx_desc_sz,
iter              654 drivers/net/wireless/realtek/rtw88/fw.c 			total_page += rtw_len_to_page(iter->len, page_size);
iter             2794 drivers/net/wireless/ti/wlcore/main.c 	struct wl12xx_vif *iter;
iter             2808 drivers/net/wireless/ti/wlcore/main.c 	wl12xx_for_each_wlvif(wl, iter) {
iter             2809 drivers/net/wireless/ti/wlcore/main.c 		if (iter != wlvif)
iter             2815 drivers/net/wireless/ti/wlcore/main.c 	WARN_ON(iter != wlvif);
iter              169 drivers/nvdimm/blk.c 	struct bvec_iter iter;
iter              182 drivers/nvdimm/blk.c 	bio_for_each_segment(bvec, bio, iter) {
iter              187 drivers/nvdimm/blk.c 				bvec.bv_offset, rw, iter.bi_sector);
iter              192 drivers/nvdimm/blk.c 					(unsigned long long) iter.bi_sector, len);
iter             1446 drivers/nvdimm/btt.c 	struct bvec_iter iter;
iter             1456 drivers/nvdimm/btt.c 	bio_for_each_segment(bvec, bio, iter) {
iter             1468 drivers/nvdimm/btt.c 				  bio_op(bio), iter.bi_sector);
iter             1474 drivers/nvdimm/btt.c 					(unsigned long long) iter.bi_sector, len);
iter              192 drivers/nvdimm/pmem.c 	struct bvec_iter iter;
iter              200 drivers/nvdimm/pmem.c 	bio_for_each_segment(bvec, bio, iter) {
iter              202 drivers/nvdimm/pmem.c 				bvec.bv_offset, bio_op(bio), iter.bi_sector);
iter               42 drivers/nvme/host/tcp.c 	struct iov_iter		iter;
iter              179 drivers/nvme/host/tcp.c 	return req->iter.bvec->bv_page;
iter              184 drivers/nvme/host/tcp.c 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
iter              189 drivers/nvme/host/tcp.c 	return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
iter              195 drivers/nvme/host/tcp.c 	return req->iter.iov_offset;
iter              233 drivers/nvme/host/tcp.c 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
iter              234 drivers/nvme/host/tcp.c 	req->iter.iov_offset = offset;
iter              242 drivers/nvme/host/tcp.c 	iov_iter_advance(&req->iter, len);
iter              243 drivers/nvme/host/tcp.c 	if (!iov_iter_count(&req->iter) &&
iter              656 drivers/nvme/host/tcp.c 		if (!iov_iter_count(&req->iter)) {
iter              675 drivers/nvme/host/tcp.c 				iov_iter_count(&req->iter));
iter              679 drivers/nvme/host/tcp.c 				&req->iter, recv_len, queue->rcv_hash);
iter              682 drivers/nvme/host/tcp.c 					&req->iter, recv_len);
iter             1188 drivers/nvme/target/core.c 	struct radix_tree_iter iter;
iter             1191 drivers/nvme/target/core.c 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
iter               94 drivers/nvme/target/io-cmd-file.c 	ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
iter               95 drivers/nvme/target/io-cmd-file.c 	struct iov_iter iter;
iter              108 drivers/nvme/target/io-cmd-file.c 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
iter              114 drivers/nvme/target/io-cmd-file.c 	return call_iter(iocb, &iter);
iter              276 drivers/nvmem/core.c 	struct nvmem_cell *iter, *cell = NULL;
iter              279 drivers/nvmem/core.c 	list_for_each_entry(iter, &nvmem->cells, node) {
iter              280 drivers/nvmem/core.c 		if (strcmp(cell_id, iter->name) == 0) {
iter              281 drivers/nvmem/core.c 			cell = iter;
iter              740 drivers/nvmem/core.c 	struct nvmem_cell *iter, *cell = NULL;
iter              743 drivers/nvmem/core.c 	list_for_each_entry(iter, &nvmem->cells, node) {
iter              744 drivers/nvmem/core.c 		if (np == iter->np) {
iter              745 drivers/nvmem/core.c 			cell = iter;
iter             1934 drivers/pci/controller/pci-hyperv.c 	struct hv_pci_dev *iter, *hpdev = NULL;
iter             1937 drivers/pci/controller/pci-hyperv.c 	list_for_each_entry(iter, &hbus->children, list_entry) {
iter             1938 drivers/pci/controller/pci-hyperv.c 		if (iter->desc.win_slot.slot == wslot) {
iter             1939 drivers/pci/controller/pci-hyperv.c 			hpdev = iter;
iter               62 drivers/pci/endpoint/pci-epc-core.c 	struct class_dev_iter iter;
iter               64 drivers/pci/endpoint/pci-epc-core.c 	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
iter               65 drivers/pci/endpoint/pci-epc-core.c 	while ((dev = class_dev_iter_next(&iter))) {
iter               75 drivers/pci/endpoint/pci-epc-core.c 		class_dev_iter_exit(&iter);
iter               81 drivers/pci/endpoint/pci-epc-core.c 	class_dev_iter_exit(&iter);
iter              623 drivers/phy/phy-core.c 	struct class_dev_iter iter;
iter              625 drivers/phy/phy-core.c 	class_dev_iter_init(&iter, phy_class, NULL, NULL);
iter              626 drivers/phy/phy-core.c 	while ((dev = class_dev_iter_next(&iter))) {
iter              631 drivers/phy/phy-core.c 		class_dev_iter_exit(&iter);
iter              635 drivers/phy/phy-core.c 	class_dev_iter_exit(&iter);
iter              692 drivers/pinctrl/core.c 	struct radix_tree_iter iter;
iter              695 drivers/pinctrl/core.c 	radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
iter              696 drivers/pinctrl/core.c 		radix_tree_delete(&pctldev->pin_group_tree, iter.index);
iter              842 drivers/pinctrl/pinmux.c 	struct radix_tree_iter iter;
iter              845 drivers/pinctrl/pinmux.c 	radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
iter              846 drivers/pinctrl/pinmux.c 		radix_tree_delete(&pctldev->pin_function_tree, iter.index);
iter              428 drivers/platform/x86/intel_pmc_core.c 	int index, iter;
iter              430 drivers/platform/x86/intel_pmc_core.c 	iter = pmcdev->map->ppfear0_offset;
iter              433 drivers/platform/x86/intel_pmc_core.c 	     index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
iter              434 drivers/platform/x86/intel_pmc_core.c 		pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
iter              506 drivers/s390/block/dasd_diag.c 	struct req_iterator iter;
iter              527 drivers/s390/block/dasd_diag.c 	rq_for_each_segment(bv, req, iter) {
iter              548 drivers/s390/block/dasd_diag.c 	rq_for_each_segment(bv, req, iter) {
iter             3162 drivers/s390/block/dasd_eckd.c 	struct req_iterator iter;
iter             3209 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter             3844 drivers/s390/block/dasd_eckd.c 	struct req_iterator iter;
iter             3866 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter             3941 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter             4034 drivers/s390/block/dasd_eckd.c 	struct req_iterator iter;
iter             4108 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter             4364 drivers/s390/block/dasd_eckd.c 	struct req_iterator iter;
iter             4401 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter             4451 drivers/s390/block/dasd_eckd.c 		rq_for_each_segment(bv, req, iter) {
iter             4484 drivers/s390/block/dasd_eckd.c 		rq_for_each_segment(bv, req, iter) {
iter             4614 drivers/s390/block/dasd_eckd.c 	struct req_iterator iter;
iter             4720 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter             4763 drivers/s390/block/dasd_eckd.c 	struct req_iterator iter;
iter             4781 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
iter              447 drivers/s390/block/dasd_fba.c 	struct req_iterator iter;
iter              469 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
iter              511 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
iter              579 drivers/s390/block/dasd_fba.c 	struct req_iterator iter;
iter              593 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
iter              859 drivers/s390/block/dcssblk.c 	struct bvec_iter iter;
iter              895 drivers/s390/block/dcssblk.c 	bio_for_each_segment(bvec, bio, iter) {
iter               53 drivers/s390/block/scm_blk.c 	struct list_head *iter, *safe;
iter               57 drivers/s390/block/scm_blk.c 	list_for_each_safe(iter, safe, &inactive_requests) {
iter               58 drivers/s390/block/scm_blk.c 		scmrq = list_entry(iter, struct scm_request, list);
iter              186 drivers/s390/block/scm_blk.c 	struct req_iterator iter;
iter              201 drivers/s390/block/scm_blk.c 	rq_for_each_segment(bv, req, iter) {
iter              189 drivers/s390/block/xpram.c 	struct bvec_iter iter;
iter              206 drivers/s390/block/xpram.c 	bio_for_each_segment(bvec, bio, iter) {
iter              287 drivers/s390/cio/blacklist.c 	struct ccwdev_iter *iter = s->private;
iter              291 drivers/s390/cio/blacklist.c 	memset(iter, 0, sizeof(*iter));
iter              292 drivers/s390/cio/blacklist.c 	iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
iter              293 drivers/s390/cio/blacklist.c 	iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
iter              294 drivers/s390/cio/blacklist.c 	return iter;
iter              305 drivers/s390/cio/blacklist.c 	struct ccwdev_iter *iter;
iter              311 drivers/s390/cio/blacklist.c 	iter = it;
iter              312 drivers/s390/cio/blacklist.c 	if (iter->devno == __MAX_SUBCHANNEL) {
iter              313 drivers/s390/cio/blacklist.c 		iter->devno = 0;
iter              314 drivers/s390/cio/blacklist.c 		iter->ssid++;
iter              315 drivers/s390/cio/blacklist.c 		if (iter->ssid > __MAX_SSID)
iter              318 drivers/s390/cio/blacklist.c 		iter->devno++;
iter              319 drivers/s390/cio/blacklist.c 	return iter;
iter              325 drivers/s390/cio/blacklist.c 	struct ccwdev_iter *iter;
iter              327 drivers/s390/cio/blacklist.c 	iter = it;
iter              328 drivers/s390/cio/blacklist.c 	if (!is_blacklisted(iter->ssid, iter->devno))
iter              331 drivers/s390/cio/blacklist.c 	if (!iter->in_range) {
iter              333 drivers/s390/cio/blacklist.c 		if ((iter->devno == __MAX_SUBCHANNEL) ||
iter              334 drivers/s390/cio/blacklist.c 		    !is_blacklisted(iter->ssid, iter->devno + 1)) {
iter              336 drivers/s390/cio/blacklist.c 			seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
iter              339 drivers/s390/cio/blacklist.c 		iter->in_range = 1;
iter              340 drivers/s390/cio/blacklist.c 		seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
iter              343 drivers/s390/cio/blacklist.c 	if ((iter->devno == __MAX_SUBCHANNEL) ||
iter              344 drivers/s390/cio/blacklist.c 	    !is_blacklisted(iter->ssid, iter->devno + 1)) {
iter              346 drivers/s390/cio/blacklist.c 		iter->in_range = 0;
iter              347 drivers/s390/cio/blacklist.c 		seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
iter              491 drivers/s390/cio/vfio_ccw_cp.c 	struct ccwchain *iter;
iter              494 drivers/s390/cio/vfio_ccw_cp.c 	list_for_each_entry(iter, &cp->ccwchain_list, next) {
iter              495 drivers/s390/cio/vfio_ccw_cp.c 		ccw_head = iter->ch_iova;
iter              496 drivers/s390/cio/vfio_ccw_cp.c 		if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
iter              497 drivers/s390/cio/vfio_ccw_cp.c 			ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
iter              157 drivers/scsi/qedf/qedf_dbg.c qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
iter              161 drivers/scsi/qedf/qedf_dbg.c 	for (; iter->name; iter++) {
iter              163 drivers/scsi/qedf/qedf_dbg.c 					    iter->attr);
iter              166 drivers/scsi/qedf/qedf_dbg.c 			       iter->name, ret);
iter              172 drivers/scsi/qedf/qedf_dbg.c qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
iter              174 drivers/scsi/qedf/qedf_dbg.c 	for (; iter->name; iter++)
iter              175 drivers/scsi/qedf/qedf_dbg.c 		sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
iter              112 drivers/scsi/qedf/qedf_dbg.h 				   struct sysfs_bin_attrs *iter);
iter              114 drivers/scsi/qedf/qedf_dbg.h 				    struct sysfs_bin_attrs *iter);
iter              108 drivers/scsi/qedi/qedi_dbg.c qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
iter              112 drivers/scsi/qedi/qedi_dbg.c 	for (; iter->name; iter++) {
iter              114 drivers/scsi/qedi/qedi_dbg.c 					    iter->attr);
iter              117 drivers/scsi/qedi/qedi_dbg.c 			       iter->name, ret);
iter              123 drivers/scsi/qedi/qedi_dbg.c qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
iter              125 drivers/scsi/qedi/qedi_dbg.c 	for (; iter->name; iter++)
iter              126 drivers/scsi/qedi/qedi_dbg.c 		sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
iter               99 drivers/scsi/qedi/qedi_dbg.h 			   struct sysfs_bin_attrs *iter);
iter              101 drivers/scsi/qedi/qedi_dbg.h 			    struct sysfs_bin_attrs *iter);
iter              208 drivers/scsi/qla2xxx/qla_attr.c 		uint32_t *iter;
iter              211 drivers/scsi/qla2xxx/qla_attr.c 		iter = (uint32_t *)buf;
iter              213 drivers/scsi/qla2xxx/qla_attr.c 		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
iter              214 drivers/scsi/qla2xxx/qla_attr.c 			chksum += le32_to_cpu(*iter);
iter              216 drivers/scsi/qla2xxx/qla_attr.c 		*iter = cpu_to_le32(chksum);
iter              218 drivers/scsi/qla2xxx/qla_attr.c 		uint8_t *iter;
iter              221 drivers/scsi/qla2xxx/qla_attr.c 		iter = (uint8_t *)buf;
iter              224 drivers/scsi/qla2xxx/qla_attr.c 			chksum += *iter++;
iter              226 drivers/scsi/qla2xxx/qla_attr.c 		*iter = chksum;
iter              985 drivers/scsi/qla2xxx/qla_attr.c 	struct sysfs_entry *iter;
iter              988 drivers/scsi/qla2xxx/qla_attr.c 	for (iter = bin_file_entries; iter->name; iter++) {
iter              989 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
iter              991 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type == 2 && !IS_QLA25XX(vha->hw))
iter              993 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
iter              997 drivers/scsi/qla2xxx/qla_attr.c 		    iter->attr);
iter             1001 drivers/scsi/qla2xxx/qla_attr.c 			    iter->name, ret);
iter             1005 drivers/scsi/qla2xxx/qla_attr.c 			    iter->name);
iter             1013 drivers/scsi/qla2xxx/qla_attr.c 	struct sysfs_entry *iter;
iter             1016 drivers/scsi/qla2xxx/qla_attr.c 	for (iter = bin_file_entries; iter->name; iter++) {
iter             1017 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type && !IS_FWI2_CAPABLE(ha))
iter             1019 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type == 2 && !IS_QLA25XX(ha))
iter             1021 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
iter             1023 drivers/scsi/qla2xxx/qla_attr.c 		if (iter->type == 0x27 &&
iter             1028 drivers/scsi/qla2xxx/qla_attr.c 		    iter->attr);
iter               74 drivers/scsi/qla2xxx/qla_inline.h        uint32_t iter = bsize >> 2;
iter               76 drivers/scsi/qla2xxx/qla_inline.h        for (; iter ; iter--)
iter               87 drivers/scsi/qla2xxx/qla_inline.h 	uint32_t iter = bsize >> 2;
iter               89 drivers/scsi/qla2xxx/qla_inline.h 	for ( ; iter--; isrc++)
iter               50 drivers/scsi/qla2xxx/qla_isr.c 	unsigned long	iter;
iter               69 drivers/scsi/qla2xxx/qla_isr.c 	for (iter = 50; iter--; ) {
iter              168 drivers/scsi/qla2xxx/qla_isr.c 	unsigned long	iter;
iter              189 drivers/scsi/qla2xxx/qla_isr.c 	for (iter = 50; iter--; ) {
iter             1895 drivers/scsi/qla2xxx/qla_isr.c 		uint16_t iter;
iter             1912 drivers/scsi/qla2xxx/qla_isr.c 		iter = iocb->u.nvme.rsp_pyld_len >> 2;
iter             1913 drivers/scsi/qla2xxx/qla_isr.c 		for (; iter; iter--)
iter             3177 drivers/scsi/qla2xxx/qla_isr.c 	unsigned long	iter;
iter             3201 drivers/scsi/qla2xxx/qla_isr.c 	for (iter = 50; iter--; ) {
iter             3004 drivers/scsi/qla2xxx/qla_mbx.c 	uint32_t *iter = (void *)stats;
iter             3005 drivers/scsi/qla2xxx/qla_mbx.c 	ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
iter             3045 drivers/scsi/qla2xxx/qla_mbx.c 			for ( ; dwords--; iter++)
iter             3046 drivers/scsi/qla2xxx/qla_mbx.c 				le32_to_cpus(iter);
iter             3063 drivers/scsi/qla2xxx/qla_mbx.c 	uint32_t *iter, dwords;
iter             3090 drivers/scsi/qla2xxx/qla_mbx.c 			iter = &stats->link_fail_cnt;
iter             3091 drivers/scsi/qla2xxx/qla_mbx.c 			for ( ; dwords--; iter++)
iter             3092 drivers/scsi/qla2xxx/qla_mbx.c 				le32_to_cpus(iter);
iter             6516 drivers/scsi/qla2xxx/qla_mbx.c 	uint16_t iter, addr, offset;
iter             6527 drivers/scsi/qla2xxx/qla_mbx.c 	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
iter             6528 drivers/scsi/qla2xxx/qla_mbx.c 		if (iter == 4) {
iter             2910 drivers/scsi/qla2xxx/qla_mr.c 	unsigned long	iter;
iter             2934 drivers/scsi/qla2xxx/qla_mr.c 	for (iter = 50; iter--; clr_intr = 0) {
iter             2037 drivers/scsi/qla2xxx/qla_nx.c 	unsigned long	iter;
iter             2070 drivers/scsi/qla2xxx/qla_nx.c 	for (iter = 1; iter--; ) {
iter             3896 drivers/scsi/qla2xxx/qla_nx2.c 	unsigned long	iter;
iter             3947 drivers/scsi/qla2xxx/qla_nx2.c 	for (iter = 1; iter--; ) {
iter             3088 drivers/scsi/qla2xxx/qla_sup.c 	uint32_t istart, iend, iter, vend;
iter             3101 drivers/scsi/qla2xxx/qla_sup.c 		iter = istart;
iter             3102 drivers/scsi/qla2xxx/qla_sup.c 		while ((iter < iend) && !do_next) {
iter             3103 drivers/scsi/qla2xxx/qla_sup.c 			iter++;
iter             3104 drivers/scsi/qla2xxx/qla_sup.c 			if (qla2x00_read_flash_byte(ha, iter) == '/') {
iter             3105 drivers/scsi/qla2xxx/qla_sup.c 				if (qla2x00_read_flash_byte(ha, iter + 2) ==
iter             3109 drivers/scsi/qla2xxx/qla_sup.c 				    iter + 3) == '/')
iter             3118 drivers/scsi/qla2xxx/qla_sup.c 		while ((iter > istart) && !do_next) {
iter             3119 drivers/scsi/qla2xxx/qla_sup.c 			iter--;
iter             3120 drivers/scsi/qla2xxx/qla_sup.c 			if (qla2x00_read_flash_byte(ha, iter) == ' ')
iter             3130 drivers/scsi/qla2xxx/qla_sup.c 		vend = iter - 1;
iter             3132 drivers/scsi/qla2xxx/qla_sup.c 		while ((iter > istart) && !do_next) {
iter             3133 drivers/scsi/qla2xxx/qla_sup.c 			iter--;
iter             3134 drivers/scsi/qla2xxx/qla_sup.c 			rbyte = qla2x00_read_flash_byte(ha, iter);
iter             3142 drivers/scsi/qla2xxx/qla_sup.c 		iter++;
iter             3143 drivers/scsi/qla2xxx/qla_sup.c 		if ((vend - iter) &&
iter             3144 drivers/scsi/qla2xxx/qla_sup.c 		    ((vend - iter) < sizeof(ha->fcode_revision))) {
iter             3146 drivers/scsi/qla2xxx/qla_sup.c 			while (iter <= vend) {
iter             3147 drivers/scsi/qla2xxx/qla_sup.c 				*vbyte++ = qla2x00_read_flash_byte(ha, iter);
iter             3148 drivers/scsi/qla2xxx/qla_sup.c 				iter++;
iter              129 drivers/scsi/qla4xxx/ql4_attr.c 	struct sysfs_entry *iter;
iter              132 drivers/scsi/qla4xxx/ql4_attr.c 	for (iter = bin_file_entries; iter->name; iter++) {
iter              134 drivers/scsi/qla4xxx/ql4_attr.c 					    iter->attr);
iter              138 drivers/scsi/qla4xxx/ql4_attr.c 				   iter->name, ret);
iter              145 drivers/scsi/qla4xxx/ql4_attr.c 	struct sysfs_entry *iter;
iter              147 drivers/scsi/qla4xxx/ql4_attr.c 	for (iter = bin_file_entries; iter->name; iter++)
iter              149 drivers/scsi/qla4xxx/ql4_attr.c 				      iter->attr);
iter              389 drivers/soc/qcom/qcom-geni-se.c 	int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE;
iter              392 drivers/soc/qcom/qcom-geni-se.c 	if (iter <= 0 || iter > NUM_PACKING_VECTORS)
iter              395 drivers/soc/qcom/qcom-geni-se.c 	for (i = 0; i < iter; i++) {
iter              409 drivers/soc/qcom/qcom-geni-se.c 	cfg[iter - 1] |= PACKING_STOP_BIT;
iter              287 drivers/staging/android/ashmem.c static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
iter              310 drivers/staging/android/ashmem.c 	ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
iter             3233 drivers/staging/exfat/exfat_super.c static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             3240 drivers/staging/exfat/exfat_super.c 	rw = iov_iter_rw(iter);
iter             3243 drivers/staging/exfat/exfat_super.c 		if (EXFAT_I(inode)->mmu_private < iov_iter_count(iter))
iter             3246 drivers/staging/exfat/exfat_super.c 	ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
iter             3249 drivers/staging/exfat/exfat_super.c 		exfat_write_failed(mapping, iov_iter_count(iter));
iter              248 drivers/staging/qlge/qlge_ethtool.c 	u64 *iter = &qdev->nic_stats.tx_pkts;
iter              266 drivers/staging/qlge/qlge_ethtool.c 			*iter = data;
iter              267 drivers/staging/qlge/qlge_ethtool.c 		iter++;
iter              280 drivers/staging/qlge/qlge_ethtool.c 			*iter = data;
iter              281 drivers/staging/qlge/qlge_ethtool.c 		iter++;
iter              285 drivers/staging/qlge/qlge_ethtool.c 	iter += QLGE_RCV_MAC_ERR_STATS;
iter              297 drivers/staging/qlge/qlge_ethtool.c 			*iter = data;
iter              298 drivers/staging/qlge/qlge_ethtool.c 		iter++;
iter              311 drivers/staging/qlge/qlge_ethtool.c 			*iter = data;
iter              312 drivers/staging/qlge/qlge_ethtool.c 		iter++;
iter              323 drivers/staging/qlge/qlge_ethtool.c 		*iter = data;
iter              153 drivers/staging/vc04_services/interface/vchi/vchi.h 				   struct vchi_msg_iter *iter,
iter              176 drivers/staging/vc04_services/interface/vchi/vchi.h extern int32_t vchi_msg_iter_has_next(const struct vchi_msg_iter *iter);
iter              179 drivers/staging/vc04_services/interface/vchi/vchi.h extern int32_t vchi_msg_iter_next(struct vchi_msg_iter *iter,
iter              185 drivers/staging/vc04_services/interface/vchi/vchi.h extern int32_t vchi_msg_iter_remove(struct vchi_msg_iter *iter);
iter              189 drivers/staging/vc04_services/interface/vchi/vchi.h extern int32_t vchi_msg_iter_hold(struct vchi_msg_iter *iter,
iter              193 drivers/staging/vc04_services/interface/vchi/vchi.h extern int32_t vchi_msg_iter_hold_next(struct vchi_msg_iter *iter,
iter             1250 drivers/target/sbp/sbp_target.c 	struct sg_mapping_iter iter;
iter             1289 drivers/target/sbp/sbp_target.c 	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
iter             1302 drivers/target/sbp/sbp_target.c 		sg_miter_next(&iter);
iter             1304 drivers/target/sbp/sbp_target.c 		tfr_length = min3(length, max_payload, (int)iter.length);
iter             1310 drivers/target/sbp/sbp_target.c 				offset, iter.addr, tfr_length);
iter             1317 drivers/target/sbp/sbp_target.c 		iter.consumed = tfr_length;
iter             1320 drivers/target/sbp/sbp_target.c 	sg_miter_stop(&iter);
iter              865 drivers/target/target_core_device.c 	struct devices_idr_iter *iter = data;
iter              869 drivers/target/target_core_device.c 	config_item_put(iter->prev_item);
iter              870 drivers/target/target_core_device.c 	iter->prev_item = NULL;
iter              881 drivers/target/target_core_device.c 	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
iter              882 drivers/target/target_core_device.c 	if (!iter->prev_item)
iter              886 drivers/target/target_core_device.c 	ret = iter->fn(dev, iter->data);
iter              903 drivers/target/target_core_device.c 	struct devices_idr_iter iter = { .fn = fn, .data = data };
iter              907 drivers/target/target_core_device.c 	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
iter              909 drivers/target/target_core_device.c 	config_item_put(iter.prev_item);
iter              269 drivers/target/target_core_file.c 	struct iov_iter iter = {};
iter              293 drivers/target/target_core_file.c 	iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
iter              306 drivers/target/target_core_file.c 		ret = call_write_iter(file, &aio_cmd->iocb, &iter);
iter              308 drivers/target/target_core_file.c 		ret = call_read_iter(file, &aio_cmd->iocb, &iter);
iter              323 drivers/target/target_core_file.c 	struct iov_iter iter;
iter              343 drivers/target/target_core_file.c 	iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
iter              345 drivers/target/target_core_file.c 		ret = vfs_iter_write(fd, &iter, &pos, 0);
iter              347 drivers/target/target_core_file.c 		ret = vfs_iter_read(fd, &iter, &pos, 0);
iter              381 drivers/target/target_core_file.c 					ret += iov_iter_zero(data_length - ret, &iter);
iter              443 drivers/target/target_core_file.c 	struct iov_iter iter;
iter              480 drivers/target/target_core_file.c 	iov_iter_bvec(&iter, READ, bvec, nolb, len);
iter              481 drivers/target/target_core_file.c 	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
iter             1481 drivers/usb/core/devio.c 	struct usb_memory *usbm = NULL, *iter;
iter             1486 drivers/usb/core/devio.c 	list_for_each_entry(iter, &ps->memory_list, memlist) {
iter             1487 drivers/usb/core/devio.c 		if (uurb_start >= iter->vm_start &&
iter             1488 drivers/usb/core/devio.c 				uurb_start < iter->vm_start + iter->size) {
iter             1489 drivers/usb/core/devio.c 			if (uurb->buffer_length > iter->vm_start + iter->size -
iter             1493 drivers/usb/core/devio.c 				usbm = iter;
iter              716 drivers/usb/gadget/function/f_fs.c static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
iter              718 drivers/usb/gadget/function/f_fs.c 	ssize_t ret = copy_to_iter(data, data_len, iter);
iter              722 drivers/usb/gadget/function/f_fs.c 	if (unlikely(iov_iter_count(iter)))
iter              874 drivers/usb/gadget/function/f_fs.c 					  struct iov_iter *iter)
iter              886 drivers/usb/gadget/function/f_fs.c 	ret = copy_to_iter(buf->data, buf->length, iter);
iter              892 drivers/usb/gadget/function/f_fs.c 	if (unlikely(iov_iter_count(iter))) {
iter              908 drivers/usb/gadget/function/f_fs.c 				      struct iov_iter *iter)
iter              912 drivers/usb/gadget/function/f_fs.c 	ssize_t ret = copy_to_iter(data, data_len, iter);
iter              916 drivers/usb/gadget/function/f_fs.c 	if (unlikely(iov_iter_count(iter)))
iter               34 drivers/vfio/platform/vfio_platform_common.c 	struct vfio_platform_reset_node *iter;
iter               38 drivers/vfio/platform/vfio_platform_common.c 	list_for_each_entry(iter, &reset_list, link) {
iter               39 drivers/vfio/platform/vfio_platform_common.c 		if (!strcmp(iter->compat, compat) &&
iter               40 drivers/vfio/platform/vfio_platform_common.c 			try_module_get(iter->owner)) {
iter               41 drivers/vfio/platform/vfio_platform_common.c 			*module = iter->owner;
iter               42 drivers/vfio/platform/vfio_platform_common.c 			reset_fn = iter->of_reset;
iter              733 drivers/vfio/platform/vfio_platform_common.c 	struct vfio_platform_reset_node *iter, *temp;
iter              736 drivers/vfio/platform/vfio_platform_common.c 	list_for_each_entry_safe(iter, temp, &reset_list, link) {
iter              737 drivers/vfio/platform/vfio_platform_common.c 		if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
iter              738 drivers/vfio/platform/vfio_platform_common.c 			list_del(&iter->link);
iter              594 drivers/vhost/net.c static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
iter              600 drivers/vhost/net.c 	iov_iter_init(iter, WRITE, vq->iov, out, len);
iter              601 drivers/vhost/net.c 	iov_iter_advance(iter, hdr_size);
iter              603 drivers/vhost/net.c 	return iov_iter_count(iter);
iter              623 drivers/vhost/scsi.c 		      struct iov_iter *iter,
iter              633 drivers/vhost/scsi.c 	bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
iter              639 drivers/vhost/scsi.c 	iov_iter_advance(iter, bytes);
iter              651 drivers/vhost/scsi.c vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
iter              655 drivers/vhost/scsi.c 	if (!iter || !iter->iov) {
iter              661 drivers/vhost/scsi.c 	sgl_count = iov_iter_npages(iter, 0xffff);
iter              672 drivers/vhost/scsi.c 		      struct iov_iter *iter,
iter              678 drivers/vhost/scsi.c 	while (iov_iter_count(iter)) {
iter              679 drivers/vhost/scsi.c 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
iter              244 drivers/video/fbdev/riva/riva_hw.c     int iter = 0;
iter              330 drivers/video/fbdev/riva/riva_hw.c         iter++;
iter              376 drivers/video/fbdev/riva/riva_hw.c         if (iter>100)
iter              977 drivers/xen/pvcalls-back.c 	struct radix_tree_iter iter;
iter              989 drivers/xen/pvcalls-back.c 	radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
iter              995 drivers/xen/pvcalls-back.c 				slot = radix_tree_iter_retry(&iter);
iter              235 fs/9p/vfs_addr.c v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              241 fs/9p/vfs_addr.c 	if (iov_iter_rw(iter) == WRITE) {
iter              242 fs/9p/vfs_addr.c 		n = p9_client_write(file->private_data, pos, iter, &err);
iter              250 fs/9p/vfs_addr.c 		n = p9_client_read(file->private_data, pos, iter, &err);
iter              393 fs/affs/file.c affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              398 fs/affs/file.c 	size_t count = iov_iter_count(iter);
iter              402 fs/affs/file.c 	if (iov_iter_rw(iter) == WRITE) {
iter              409 fs/affs/file.c 	ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
iter              410 fs/affs/file.c 	if (ret < 0 && iov_iter_rw(iter) == WRITE)
iter              356 fs/afs/cmservice.c 		call->_iter = &call->iter;
iter              357 fs/afs/cmservice.c 		iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
iter              363 fs/afs/cmservice.c 		       iov_iter_count(&call->iter), call->count2 * 3 * 4);
iter              331 fs/afs/fsclient.c 	       call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
iter              371 fs/afs/fsclient.c 		iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
iter              378 fs/afs/fsclient.c 		       iov_iter_count(&call->iter), req->remain);
iter              404 fs/afs/fsclient.c 		       iov_iter_count(&call->iter), req->actual_len - req->len);
iter             1860 fs/afs/fsclient.c 	_enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter));
iter              118 fs/afs/internal.h 	struct iov_iter		iter;		/* Buffer iterator */
iter             1139 fs/afs/internal.h 	iov_iter_kvec(&call->iter, READ, call->kvec, 1, size);
iter             1154 fs/afs/internal.h 	iov_iter_discard(&call->iter, READ, size);
iter              154 fs/afs/rxrpc.c 	call->_iter = &call->iter;
iter              518 fs/afs/rxrpc.c 			iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
iter              520 fs/afs/rxrpc.c 						     call->rxcall, &call->iter,
iter              523 fs/afs/rxrpc.c 			trace_afs_receive_data(call, &call->iter, false, ret);
iter              924 fs/afs/rxrpc.c 	struct iov_iter *iter = call->_iter;
iter              929 fs/afs/rxrpc.c 	_enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more);
iter              931 fs/afs/rxrpc.c 	ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
iter              448 fs/afs/yfsclient.c 	       call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
iter              483 fs/afs/yfsclient.c 		iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
iter              490 fs/afs/yfsclient.c 		       iov_iter_count(&call->iter), req->remain);
iter              516 fs/afs/yfsclient.c 		       iov_iter_count(&call->iter), req->actual_len - req->len);
iter             1483 fs/aio.c       		struct iov_iter *iter)
iter             1489 fs/aio.c       		ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
iter             1496 fs/aio.c       				iter);
iter             1498 fs/aio.c       	return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
iter             1525 fs/aio.c       	struct iov_iter iter;
iter             1539 fs/aio.c       	ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
iter             1542 fs/aio.c       	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
iter             1544 fs/aio.c       		aio_rw_done(req, call_read_iter(file, req, &iter));
iter             1553 fs/aio.c       	struct iov_iter iter;
iter             1567 fs/aio.c       	ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
iter             1570 fs/aio.c       	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
iter             1584 fs/aio.c       		aio_rw_done(req, call_write_iter(file, req, &iter));
iter              203 fs/block_dev.c __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
iter              215 fs/block_dev.c 	if ((pos | iov_iter_alignment(iter)) &
iter              236 fs/block_dev.c 	ret = bio_iov_iter_get_pages(&bio, iter);
iter              241 fs/block_dev.c 	if (iov_iter_rw(iter) == READ) {
iter              243 fs/block_dev.c 		if (iter_is_iovec(iter))
iter              339 fs/block_dev.c __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
iter              348 fs/block_dev.c 	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
iter              353 fs/block_dev.c 	if ((pos | iov_iter_alignment(iter)) &
iter              370 fs/block_dev.c 	dio->should_dirty = is_read && iter_is_iovec(iter);
iter              387 fs/block_dev.c 		ret = bio_iov_iter_get_pages(bio, iter);
iter              406 fs/block_dev.c 		nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
iter              467 fs/block_dev.c blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              471 fs/block_dev.c 	nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
iter              475 fs/block_dev.c 		return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
iter              477 fs/block_dev.c 	return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
iter             2808 fs/btrfs/check-integrity.c 		struct bvec_iter iter;
iter             2828 fs/btrfs/check-integrity.c 		bio_for_each_segment(bvec, bio, iter) {
iter             2843 fs/btrfs/check-integrity.c 		bio_for_each_segment(bvec, bio, iter)
iter             2887 fs/btrfs/extent_io.c 	btrfs_bio->iter = bio->bi_iter;
iter             2914 fs/btrfs/extent_io.c 	btrfs_bio->iter = bio->bi_iter;
iter              156 fs/btrfs/file-item.c 	struct bvec_iter iter;
iter              211 fs/btrfs/file-item.c 	bio_for_each_segment(bvec, bio, iter) {
iter              441 fs/btrfs/file-item.c 	struct bvec_iter iter;
iter              473 fs/btrfs/file-item.c 	bio_for_each_segment(bvec, bio, iter) {
iter             8102 fs/btrfs/inode.c 	struct bvec_iter iter;
iter             8116 fs/btrfs/inode.c 	io_bio->bio.bi_iter = io_bio->iter;
iter             8118 fs/btrfs/inode.c 	bio_for_each_segment(bvec, &io_bio->bio, iter) {
iter             8207 fs/btrfs/inode.c 	struct bvec_iter iter;
iter             8225 fs/btrfs/inode.c 	io_bio->bio.bi_iter = io_bio->iter;
iter             8227 fs/btrfs/inode.c 	bio_for_each_segment(bvec, &io_bio->bio, iter) {
iter             8702 fs/btrfs/inode.c 			       const struct iov_iter *iter, loff_t offset)
iter             8712 fs/btrfs/inode.c 	if (iov_iter_alignment(iter) & blocksize_mask)
iter             8716 fs/btrfs/inode.c 	if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
iter             8723 fs/btrfs/inode.c 	for (seg = 0; seg < iter->nr_segs; seg++) {
iter             8724 fs/btrfs/inode.c 		for (i = seg + 1; i < iter->nr_segs; i++) {
iter             8725 fs/btrfs/inode.c 			if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
iter             8734 fs/btrfs/inode.c static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             8748 fs/btrfs/inode.c 	if (check_direct_IO(fs_info, iter, offset))
iter             8759 fs/btrfs/inode.c 	count = iov_iter_count(iter);
iter             8765 fs/btrfs/inode.c 	if (iov_iter_rw(iter) == WRITE) {
iter             8804 fs/btrfs/inode.c 				   iter, btrfs_get_blocks_direct, NULL,
iter             8806 fs/btrfs/inode.c 	if (iov_iter_rw(iter) == WRITE) {
iter             3762 fs/btrfs/qgroup.c 	struct ulist_iterator iter;
iter             3771 fs/btrfs/qgroup.c 		ULIST_ITER_INIT(&iter);
iter             3772 fs/btrfs/qgroup.c 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
iter             1168 fs/btrfs/raid56.c 		struct bvec_iter iter;
iter             1176 fs/btrfs/raid56.c 			bio->bi_iter = btrfs_io_bio(bio)->iter;
iter             1178 fs/btrfs/raid56.c 		bio_for_each_segment(bvec, bio, iter) {
iter              150 fs/btrfs/tests/btrfs-tests.c 	struct radix_tree_iter iter;
iter              163 fs/btrfs/tests/btrfs-tests.c 	radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
iter              172 fs/btrfs/tests/btrfs-tests.c 				slot = radix_tree_iter_retry(&iter);
iter              175 fs/btrfs/tests/btrfs-tests.c 		slot = radix_tree_iter_resume(slot, &iter);
iter              299 fs/btrfs/volumes.h 	struct bvec_iter iter;
iter             1389 fs/ceph/addr.c static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
iter               81 fs/ceph/file.c static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
iter               87 fs/ceph/file.c 	if (maxsize > iov_iter_count(iter))
iter               88 fs/ceph/file.c 		maxsize = iov_iter_count(iter);
iter               96 fs/ceph/file.c 		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
iter              101 fs/ceph/file.c 		iov_iter_advance(iter, bytes);
iter              128 fs/ceph/file.c static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
iter              132 fs/ceph/file.c 	size_t orig_count = iov_iter_count(iter);
iter              136 fs/ceph/file.c 	iov_iter_truncate(iter, maxsize);
iter              137 fs/ceph/file.c 	npages = iov_iter_npages(iter, INT_MAX);
iter              138 fs/ceph/file.c 	iov_iter_reexpand(iter, orig_count);
iter              148 fs/ceph/file.c 	bytes = __iter_get_bvecs(iter, maxsize, bv);
iter              804 fs/ceph/file.c 	     inode, rc, osd_data->bvec_pos.iter.bi_size);
iter              822 fs/ceph/file.c 		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
iter              824 fs/ceph/file.c 			int zlen = osd_data->bvec_pos.iter.bi_size - rc;
iter              842 fs/ceph/file.c 				      osd_data->bvec_pos.iter.bi_size);
iter              926 fs/ceph/file.c ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
iter              942 fs/ceph/file.c 	size_t count = iov_iter_count(iter);
iter              944 fs/ceph/file.c 	bool write = iov_iter_rw(iter) == WRITE;
iter              945 fs/ceph/file.c 	bool should_dirty = !write && iter_is_iovec(iter);
iter              966 fs/ceph/file.c 	while (iov_iter_count(iter) > 0) {
iter              967 fs/ceph/file.c 		u64 size = iov_iter_count(iter);
iter              990 fs/ceph/file.c 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
iter              886 fs/cifs/cifsfs.c cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
iter              892 fs/cifs/cifsfs.c 		return cifs_user_readv(iocb, iter);
iter              898 fs/cifs/cifsfs.c 	return generic_file_read_iter(iocb, iter);
iter             1287 fs/cifs/cifsglob.h 	struct iov_iter		iter;
iter             1325 fs/cifs/cifsglob.h 				struct iov_iter *iter);
iter              579 fs/cifs/cifsproto.h int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
iter             3025 fs/cifs/file.c 				struct iov_iter tmp_from = ctx->iter;
iter             3114 fs/cifs/file.c 		ctx->iter = *from;
iter             3369 fs/cifs/file.c cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
iter             3379 fs/cifs/file.c 		if (unlikely(iov_iter_is_pipe(iter))) {
iter             3382 fs/cifs/file.c 			written = copy_to_iter(addr, copy, iter);
iter             3385 fs/cifs/file.c 			written = copy_page_to_iter(page, 0, copy, iter);
iter             3387 fs/cifs/file.c 		if (written < copy && iov_iter_count(iter) > 0)
iter             3409 fs/cifs/file.c 		    struct cifs_readdata *rdata, struct iov_iter *iter,
iter             3446 fs/cifs/file.c 		if (iter)
iter             3448 fs/cifs/file.c 					page, page_offset, n, iter);
iter             3476 fs/cifs/file.c 			      struct iov_iter *iter)
iter             3478 fs/cifs/file.c 	return uncached_fill_pages(server, rdata, iter, iter->count);
iter             3558 fs/cifs/file.c 	struct iov_iter direct_iov = ctx->iter;
iter             3690 fs/cifs/file.c 	struct iov_iter *to = &ctx->iter;
iter             3842 fs/cifs/file.c 		ctx->iter = *to;
iter             4126 fs/cifs/file.c 		     struct cifs_readdata *rdata, struct iov_iter *iter,
iter             4190 fs/cifs/file.c 		if (iter)
iter             4192 fs/cifs/file.c 					page, page_offset, n, iter);
iter             4220 fs/cifs/file.c 			       struct iov_iter *iter)
iter             4222 fs/cifs/file.c 	return readpages_fill_pages(server, rdata, iter, iter->count);
iter             4735 fs/cifs/file.c cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
iter              839 fs/cifs/misc.c setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
iter              846 fs/cifs/misc.c 	size_t count = iov_iter_count(iter);
iter              849 fs/cifs/misc.c 	unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
iter              853 fs/cifs/misc.c 	if (iov_iter_is_kvec(iter)) {
iter              854 fs/cifs/misc.c 		memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
iter              856 fs/cifs/misc.c 		iov_iter_advance(iter, count);
iter              885 fs/cifs/misc.c 		rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
iter              897 fs/cifs/misc.c 		iov_iter_advance(iter, rc);
iter              924 fs/cifs/misc.c 	iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
iter             3961 fs/cifs/smb2ops.c 	struct iov_iter iter;
iter             4056 fs/cifs/smb2ops.c 		iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
iter             4062 fs/cifs/smb2ops.c 		iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
iter             4071 fs/cifs/smb2ops.c 	length = rdata->copy_into_pages(server, rdata, &iter);
iter             1098 fs/dax.c       	struct iov_iter *iter = data;
iter             1104 fs/dax.c       	if (iov_iter_rw(iter) == READ) {
iter             1110 fs/dax.c       			return iov_iter_zero(min(length, end - pos), iter);
iter             1163 fs/dax.c       		if (iov_iter_rw(iter) == WRITE)
iter             1165 fs/dax.c       					map_len, iter);
iter             1168 fs/dax.c       					map_len, iter);
iter             1195 fs/dax.c       dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
iter             1203 fs/dax.c       	if (iov_iter_rw(iter) == WRITE) {
iter             1213 fs/dax.c       	while (iov_iter_count(iter)) {
iter             1214 fs/dax.c       		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter             1215 fs/dax.c       				iter, dax_iomap_actor);
iter              105 fs/direct-io.c 	struct iov_iter *iter;
iter              171 fs/direct-io.c 	ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
iter              193 fs/direct-io.c 		iov_iter_advance(sdio->iter, ret);
iter             1165 fs/direct-io.c 		      struct block_device *bdev, struct iov_iter *iter,
iter             1173 fs/direct-io.c 	const size_t count = iov_iter_count(iter);
iter             1180 fs/direct-io.c 	unsigned long align = offset | iov_iter_alignment(iter);
iter             1196 fs/direct-io.c 	if (iov_iter_rw(iter) == READ && !count)
iter             1212 fs/direct-io.c 		if (iov_iter_rw(iter) == READ) {
iter             1231 fs/direct-io.c 	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
iter             1247 fs/direct-io.c 	else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
iter             1253 fs/direct-io.c 	if (iov_iter_rw(iter) == WRITE) {
iter             1268 fs/direct-io.c 	if (dio->is_async && iov_iter_rw(iter) == WRITE) {
iter             1311 fs/direct-io.c 	dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
iter             1312 fs/direct-io.c 	sdio.iter = iter;
iter             1322 fs/direct-io.c 	sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
iter             1368 fs/direct-io.c 	if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
iter             1380 fs/direct-io.c 	    (iov_iter_rw(iter) == READ || dio->result == count))
iter             1395 fs/direct-io.c 			     struct block_device *bdev, struct iov_iter *iter,
iter             1412 fs/direct-io.c 	return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
iter              939 fs/ext2/inode.c ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              944 fs/ext2/inode.c 	size_t count = iov_iter_count(iter);
iter              948 fs/ext2/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
iter              949 fs/ext2/inode.c 	if (ret < 0 && iov_iter_rw(iter) == WRITE)
iter             3676 fs/ext4/inode.c static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
iter             3683 fs/ext4/inode.c 	size_t count = iov_iter_count(iter);
iter             3757 fs/ext4/inode.c 	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
iter             3833 fs/ext4/inode.c static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
iter             3837 fs/ext4/inode.c 	size_t count = iov_iter_count(iter);
iter             3857 fs/ext4/inode.c 				   iter, ext4_dio_get_block, NULL, NULL, 0);
iter             3863 fs/ext4/inode.c static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             3867 fs/ext4/inode.c 	size_t count = iov_iter_count(iter);
iter             3888 fs/ext4/inode.c 	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
iter             3889 fs/ext4/inode.c 	if (iov_iter_rw(iter) == READ)
iter             3890 fs/ext4/inode.c 		ret = ext4_direct_IO_read(iocb, iter);
iter             3892 fs/ext4/inode.c 		ret = ext4_direct_IO_write(iocb, iter);
iter             3893 fs/ext4/inode.c 	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
iter             2747 fs/f2fs/data.c static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
iter             2753 fs/f2fs/data.c 	unsigned long align = offset | iov_iter_alignment(iter);
iter             2811 fs/f2fs/data.c static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             2817 fs/f2fs/data.c 	size_t count = iov_iter_count(iter);
iter             2819 fs/f2fs/data.c 	int rw = iov_iter_rw(iter);
iter             2825 fs/f2fs/data.c 	err = check_direct_IO(inode, iter, offset);
iter             2829 fs/f2fs/data.c 	if (f2fs_force_buffered_io(inode, iocb, iter))
iter             2832 fs/f2fs/data.c 	do_opu = allow_outplace_dio(inode, iocb, iter);
iter             2858 fs/f2fs/data.c 			iter, rw == WRITE ? get_data_block_dio_write :
iter             3694 fs/f2fs/f2fs.h 				struct kiocb *iocb, struct iov_iter *iter)
iter             3699 fs/f2fs/f2fs.h 	unsigned long align = offset | iov_iter_alignment(iter);
iter             3705 fs/f2fs/f2fs.h 				struct kiocb *iocb, struct iov_iter *iter)
iter             3708 fs/f2fs/f2fs.h 	int rw = iov_iter_rw(iter);
iter             3711 fs/f2fs/f2fs.h 				!block_unaligned_IO(inode, iocb, iter));
iter             3715 fs/f2fs/f2fs.h 				struct kiocb *iocb, struct iov_iter *iter)
iter             3718 fs/f2fs/f2fs.h 	int rw = iov_iter_rw(iter);
iter             3731 fs/f2fs/f2fs.h 		if (block_unaligned_IO(inode, iocb, iter))
iter              135 fs/f2fs/trace.c 	struct radix_tree_iter iter;
iter              142 fs/f2fs/trace.c 	radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
iter              143 fs/f2fs/trace.c 		results[ret] = iter.index;
iter              260 fs/fat/inode.c static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              265 fs/fat/inode.c 	size_t count = iov_iter_count(iter);
iter              269 fs/fat/inode.c 	if (iov_iter_rw(iter) == WRITE) {
iter              288 fs/fat/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter, fat_get_block);
iter              289 fs/fat/inode.c 	if (ret < 0 && iov_iter_rw(iter) == WRITE)
iter              640 fs/fuse/dev.c  	struct iov_iter *iter;
iter              652 fs/fuse/dev.c  			   struct iov_iter *iter)
iter              656 fs/fuse/dev.c  	cs->iter = iter;
iter              728 fs/fuse/dev.c  		err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
iter              735 fs/fuse/dev.c  		iov_iter_advance(cs->iter, err);
iter             1421 fs/fuse/file.c ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
iter             1432 fs/fuse/file.c 	size_t count = iov_iter_count(iter);
iter             1440 fs/fuse/file.c 	max_pages = iov_iter_npages(iter, fc->max_pages);
iter             1454 fs/fuse/file.c 	io->should_dirty = !write && iter_is_iovec(iter);
iter             1460 fs/fuse/file.c 		err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
iter             1480 fs/fuse/file.c 			iov_iter_revert(iter, nbytes);
iter             1490 fs/fuse/file.c 			iov_iter_revert(iter, nbytes - nres);
iter             1494 fs/fuse/file.c 			max_pages = iov_iter_npages(iter, fc->max_pages);
iter             1510 fs/fuse/file.c 				  struct iov_iter *iter,
iter             1516 fs/fuse/file.c 	res = fuse_direct_io(io, iter, ppos, 0);
iter             1523 fs/fuse/file.c static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
iter             3062 fs/fuse/file.c fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             3072 fs/fuse/file.c 	size_t count = iov_iter_count(iter);
iter             3080 fs/fuse/file.c 	if ((iov_iter_rw(iter) == READ) && (offset > i_size))
iter             3084 fs/fuse/file.c 	if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
iter             3087 fs/fuse/file.c 		iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
iter             3088 fs/fuse/file.c 		count = iov_iter_count(iter);
iter             3100 fs/fuse/file.c 	io->write = (iov_iter_rw(iter) == WRITE);
iter             3114 fs/fuse/file.c 	if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE)
iter             3126 fs/fuse/file.c 	if (iov_iter_rw(iter) == WRITE) {
iter             3127 fs/fuse/file.c 		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
iter             3130 fs/fuse/file.c 		ret = __fuse_direct_read(io, iter, &pos);
iter             3148 fs/fuse/file.c 	if (iov_iter_rw(iter) == WRITE) {
iter             1044 fs/fuse/fuse_i.h ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
iter             1648 fs/gfs2/glock.c 	struct rhashtable_iter iter;
iter             1650 fs/gfs2/glock.c 	rhashtable_walk_enter(&gl_hash_table, &iter);
iter             1653 fs/gfs2/glock.c 		rhashtable_walk_start(&iter);
iter             1655 fs/gfs2/glock.c 		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
iter             1660 fs/gfs2/glock.c 		rhashtable_walk_stop(&iter);
iter             1663 fs/gfs2/glock.c 	rhashtable_walk_exit(&iter);
iter              129 fs/hfs/inode.c static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              134 fs/hfs/inode.c 	size_t count = iov_iter_count(iter);
iter              137 fs/hfs/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block);
iter              143 fs/hfs/inode.c 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
iter              126 fs/hfsplus/inode.c static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              131 fs/hfsplus/inode.c 	size_t count = iov_iter_count(iter);
iter              134 fs/hfsplus/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block);
iter              140 fs/hfsplus/inode.c 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
iter             1167 fs/io_uring.c  			   struct iov_iter *iter)
iter             1199 fs/io_uring.c  	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
iter             1221 fs/io_uring.c  			iov_iter_advance(iter, offset);
iter             1229 fs/io_uring.c  			iter->bvec = bvec + seg_skip;
iter             1230 fs/io_uring.c  			iter->nr_segs -= seg_skip;
iter             1231 fs/io_uring.c  			iter->count -= bvec->bv_len + offset;
iter             1232 fs/io_uring.c  			iter->iov_offset = offset & ~PAGE_MASK;
iter             1241 fs/io_uring.c  			       struct iov_iter *iter)
iter             1259 fs/io_uring.c  		ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
iter             1270 fs/io_uring.c  						iovec, iter);
iter             1273 fs/io_uring.c  	return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
iter             1340 fs/io_uring.c  			   struct iov_iter *iter)
iter             1354 fs/io_uring.c  	while (iov_iter_count(iter)) {
iter             1358 fs/io_uring.c  		if (!iov_iter_is_bvec(iter)) {
iter             1359 fs/io_uring.c  			iovec = iov_iter_iovec(iter);
iter             1362 fs/io_uring.c  			iovec.iov_base = kmap(iter->bvec->bv_page)
iter             1363 fs/io_uring.c  						+ iter->iov_offset;
iter             1364 fs/io_uring.c  			iovec.iov_len = min(iter->count,
iter             1365 fs/io_uring.c  					iter->bvec->bv_len - iter->iov_offset);
iter             1376 fs/io_uring.c  		if (iov_iter_is_bvec(iter))
iter             1377 fs/io_uring.c  			kunmap(iter->bvec->bv_page);
iter             1387 fs/io_uring.c  		iov_iter_advance(iter, nr);
iter             1398 fs/io_uring.c  	struct iov_iter iter;
iter             1411 fs/io_uring.c  	ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
iter             1419 fs/io_uring.c  	iov_count = iov_iter_count(&iter);
iter             1425 fs/io_uring.c  			ret2 = call_read_iter(file, kiocb, &iter);
iter             1427 fs/io_uring.c  			ret2 = loop_rw_iter(READ, file, kiocb, &iter);
iter             1463 fs/io_uring.c  	struct iov_iter iter;
iter             1476 fs/io_uring.c  	ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
iter             1483 fs/io_uring.c  	iov_count = iov_iter_count(&iter);
iter             1516 fs/io_uring.c  			ret2 = call_write_iter(file, kiocb, &iter);
iter             1518 fs/io_uring.c  			ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
iter              819 fs/iomap/buffered-io.c iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
iter              825 fs/iomap/buffered-io.c 	while (iov_iter_count(iter)) {
iter              826 fs/iomap/buffered-io.c 		ret = iomap_apply(inode, pos, iov_iter_count(iter),
iter              827 fs/iomap/buffered-io.c 				IOMAP_WRITE, ops, iter, iomap_write_actor);
iter               38 fs/iomap/direct-io.c 			struct iov_iter		*iter;
iter              203 fs/iomap/direct-io.c 	unsigned int align = iov_iter_alignment(dio->submit.iter);
iter              204 fs/iomap/direct-io.c 	struct iov_iter iter;
iter              242 fs/iomap/direct-io.c 	iter = *dio->submit.iter;
iter              243 fs/iomap/direct-io.c 	iov_iter_truncate(&iter, length);
iter              245 fs/iomap/direct-io.c 	nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
iter              259 fs/iomap/direct-io.c 			iov_iter_revert(dio->submit.iter, copied);
iter              271 fs/iomap/direct-io.c 		ret = bio_iov_iter_get_pages(bio, &iter);
iter              297 fs/iomap/direct-io.c 		iov_iter_advance(dio->submit.iter, n);
iter              303 fs/iomap/direct-io.c 		nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
iter              329 fs/iomap/direct-io.c 	length = iov_iter_zero(length, dio->submit.iter);
iter              338 fs/iomap/direct-io.c 	struct iov_iter *iter = dio->submit.iter;
iter              348 fs/iomap/direct-io.c 		copied = copy_from_iter(iomap->inline_data + pos, length, iter);
iter              355 fs/iomap/direct-io.c 		copied = copy_to_iter(iomap->inline_data + pos, length, iter);
iter              396 fs/iomap/direct-io.c iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iter              401 fs/iomap/direct-io.c 	size_t count = iov_iter_count(iter);
iter              426 fs/iomap/direct-io.c 	dio->submit.iter = iter;
iter              431 fs/iomap/direct-io.c 	if (iov_iter_rw(iter) == READ) {
iter              435 fs/iomap/direct-io.c 		if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
iter              479 fs/iomap/direct-io.c 	if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
iter              502 fs/iomap/direct-io.c 		if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
iter              508 fs/iomap/direct-io.c 			iov_iter_revert(iter, pos - dio->i_size);
iter              511 fs/iomap/direct-io.c 	} while ((count = iov_iter_count(iter)) > 0);
iter              334 fs/jfs/inode.c static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              339 fs/jfs/inode.c 	size_t count = iov_iter_count(iter);
iter              342 fs/jfs/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
iter              348 fs/jfs/inode.c 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
iter             1115 fs/libfs.c     ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             2902 fs/locks.c     	struct locks_iterator *iter = f->private;
iter             2911 fs/locks.c     	lock_get_status(f, fl, iter->li_pos, "");
iter             2914 fs/locks.c     		lock_get_status(f, bfl, iter->li_pos, " ->");
iter             2960 fs/locks.c     	struct locks_iterator *iter = f->private;
iter             2962 fs/locks.c     	iter->li_pos = *pos + 1;
iter             2965 fs/locks.c     	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
iter             2970 fs/locks.c     	struct locks_iterator *iter = f->private;
iter             2972 fs/locks.c     	++iter->li_pos;
iter             2973 fs/locks.c     	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
iter              264 fs/nfs/direct.c ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              272 fs/nfs/direct.c 	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
iter              274 fs/nfs/direct.c 	if (iov_iter_rw(iter) == READ)
iter              275 fs/nfs/direct.c 		return nfs_file_direct_read(iocb, iter);
iter              276 fs/nfs/direct.c 	return nfs_file_direct_write(iocb, iter);
iter              448 fs/nfs/direct.c 					      struct iov_iter *iter,
iter              463 fs/nfs/direct.c 	while (iov_iter_count(iter)) {
iter              469 fs/nfs/direct.c 		result = iov_iter_get_pages_alloc(iter, &pagevec, 
iter              475 fs/nfs/direct.c 		iov_iter_advance(iter, bytes);
iter              542 fs/nfs/direct.c ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
iter              550 fs/nfs/direct.c 	size_t count = iov_iter_count(iter);
iter              581 fs/nfs/direct.c 	if (iter_is_iovec(iter))
iter              587 fs/nfs/direct.c 	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
iter              597 fs/nfs/direct.c 		iov_iter_revert(iter, requested);
iter              853 fs/nfs/direct.c 					       struct iov_iter *iter,
iter              868 fs/nfs/direct.c 	NFS_I(inode)->write_io += iov_iter_count(iter);
iter              869 fs/nfs/direct.c 	while (iov_iter_count(iter)) {
iter              875 fs/nfs/direct.c 		result = iov_iter_get_pages_alloc(iter, &pagevec, 
iter              881 fs/nfs/direct.c 		iov_iter_advance(iter, bytes);
iter              956 fs/nfs/direct.c ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
iter              968 fs/nfs/direct.c 		file, iov_iter_count(iter), (long long) iocb->ki_pos);
iter              970 fs/nfs/direct.c 	result = generic_write_checks(iocb, iter);
iter              977 fs/nfs/direct.c 	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
iter             1002 fs/nfs/direct.c 	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
iter             1019 fs/nfs/direct.c 		iov_iter_revert(iter, requested);
iter              902 fs/nfsd/vfs.c  	struct iov_iter iter;
iter              907 fs/nfsd/vfs.c  	iov_iter_kvec(&iter, READ, vec, vlen, *count);
iter              908 fs/nfsd/vfs.c  	host_err = vfs_iter_read(file, &iter, &ppos, 0);
iter              955 fs/nfsd/vfs.c  	struct iov_iter		iter;
iter              983 fs/nfsd/vfs.c  	iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
iter              984 fs/nfsd/vfs.c  	host_err = vfs_iter_write(file, &iter, &pos, flags);
iter              295 fs/nilfs2/inode.c nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              299 fs/nilfs2/inode.c 	if (iov_iter_rw(iter) == WRITE)
iter              303 fs/nilfs2/inode.c 	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
iter             2447 fs/ocfs2/aops.c static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             2462 fs/ocfs2/aops.c 	if (iocb->ki_pos + iter->count > i_size_read(inode) &&
iter             2466 fs/ocfs2/aops.c 	if (iov_iter_rw(iter) == READ)
iter             2472 fs/ocfs2/aops.c 				    iter, get_block,
iter             1105 fs/ocfs2/dlm/dlmcommon.h 				      struct dlm_node_iter *iter)
iter             1107 fs/ocfs2/dlm/dlmcommon.h 	memcpy(iter->node_map, map, sizeof(iter->node_map));
iter             1108 fs/ocfs2/dlm/dlmcommon.h 	iter->curnode = -1;
iter             1111 fs/ocfs2/dlm/dlmcommon.h static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
iter             1114 fs/ocfs2/dlm/dlmcommon.h 	bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
iter             1116 fs/ocfs2/dlm/dlmcommon.h 		iter->curnode = O2NM_MAX_NODES;
iter             1119 fs/ocfs2/dlm/dlmcommon.h 	iter->curnode = bit;
iter              409 fs/ocfs2/dlm/dlmdomain.c 	struct hlist_node *iter;
iter              421 fs/ocfs2/dlm/dlmdomain.c 		iter = bucket->first;
iter              422 fs/ocfs2/dlm/dlmdomain.c 		while (iter) {
iter              424 fs/ocfs2/dlm/dlmdomain.c 			res = hlist_entry(iter, struct dlm_lock_resource,
iter              435 fs/ocfs2/dlm/dlmdomain.c 				iter = res->hash_node.next;
iter              713 fs/ocfs2/dlm/dlmmaster.c 	struct dlm_node_iter iter;
iter              936 fs/ocfs2/dlm/dlmmaster.c 	dlm_node_iter_init(mle->vote_map, &iter);
iter              937 fs/ocfs2/dlm/dlmmaster.c 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
iter             1164 fs/ocfs2/dlm/dlmmaster.c static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
iter             1171 fs/ocfs2/dlm/dlmmaster.c 	iter->curnode = -1;
iter             1172 fs/ocfs2/dlm/dlmmaster.c 	iter->orig_bm = orig_bm;
iter             1173 fs/ocfs2/dlm/dlmmaster.c 	iter->cur_bm = cur_bm;
iter             1176 fs/ocfs2/dlm/dlmmaster.c        		p1 = *(iter->orig_bm + i);
iter             1177 fs/ocfs2/dlm/dlmmaster.c 	       	p2 = *(iter->cur_bm + i);
iter             1178 fs/ocfs2/dlm/dlmmaster.c 		iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
iter             1182 fs/ocfs2/dlm/dlmmaster.c static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
iter             1187 fs/ocfs2/dlm/dlmmaster.c 	if (iter->curnode >= O2NM_MAX_NODES)
iter             1190 fs/ocfs2/dlm/dlmmaster.c 	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
iter             1191 fs/ocfs2/dlm/dlmmaster.c 			    iter->curnode+1);
iter             1193 fs/ocfs2/dlm/dlmmaster.c 		iter->curnode = O2NM_MAX_NODES;
iter             1198 fs/ocfs2/dlm/dlmmaster.c 	if (test_bit(bit, iter->orig_bm))
iter             1203 fs/ocfs2/dlm/dlmmaster.c 	iter->curnode = bit;
iter             1658 fs/ocfs2/dlm/dlmmaster.c 	struct dlm_node_iter iter;
iter             1674 fs/ocfs2/dlm/dlmmaster.c 	dlm_node_iter_init(nodemap, &iter);
iter             1675 fs/ocfs2/dlm/dlmmaster.c 	while ((to = dlm_node_iter_next(&iter)) >= 0) {
iter             2170 fs/ocfs2/dlm/dlmmaster.c 	struct dlm_node_iter iter;
iter             2176 fs/ocfs2/dlm/dlmmaster.c 	dlm_node_iter_init(dlm->domain_map, &iter);
iter             2179 fs/ocfs2/dlm/dlmmaster.c 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
iter             3030 fs/ocfs2/dlm/dlmmaster.c 				  struct dlm_node_iter *iter)
iter             3045 fs/ocfs2/dlm/dlmmaster.c 	while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
iter             3055 fs/ocfs2/dlm/dlmmaster.c 			clear_bit(nodenum, iter->node_map);
iter             3070 fs/ocfs2/dlm/dlmmaster.c 			clear_bit(nodenum, iter->node_map);
iter             3419 fs/ocfs2/dlm/dlmmaster.c 	struct dlm_node_iter iter;
iter             3423 fs/ocfs2/dlm/dlmmaster.c 	dlm_node_iter_init(dlm->domain_map, &iter);
iter             3424 fs/ocfs2/dlm/dlmmaster.c 	clear_bit(old_master, iter.node_map);
iter             3425 fs/ocfs2/dlm/dlmmaster.c 	clear_bit(dlm->node_num, iter.node_map);
iter             3437 fs/ocfs2/dlm/dlmmaster.c 				     dlm->node_num, &iter);
iter             3447 fs/ocfs2/dlm/dlmmaster.c 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
iter             3455 fs/ocfs2/dlm/dlmmaster.c 	memset(iter.node_map, 0, sizeof(iter.node_map));
iter             3456 fs/ocfs2/dlm/dlmmaster.c 	set_bit(old_master, iter.node_map);
iter             3459 fs/ocfs2/dlm/dlmmaster.c 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
iter             1092 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *iter, *queue = &res->granted;
iter             1096 fs/ocfs2/dlm/dlmrecovery.c 		list_for_each(iter, queue)
iter             1614 fs/ocfs2/dlm/dlmrecovery.c 	struct dlm_node_iter iter;
iter             1644 fs/ocfs2/dlm/dlmrecovery.c 	dlm_node_iter_init(dlm->domain_map, &iter);
iter             1647 fs/ocfs2/dlm/dlmrecovery.c 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
iter             1801 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *queue, *iter;
iter             1844 fs/ocfs2/dlm/dlmrecovery.c 				list_for_each(iter, tmpq) {
iter             1845 fs/ocfs2/dlm/dlmrecovery.c 					lock = list_entry(iter,
iter             2682 fs/ocfs2/dlm/dlmrecovery.c 	struct dlm_node_iter iter;
iter             2689 fs/ocfs2/dlm/dlmrecovery.c 	dlm_node_iter_init(dlm->domain_map, &iter);
iter             2692 fs/ocfs2/dlm/dlmrecovery.c 	clear_bit(dead_node, iter.node_map);
iter             2698 fs/ocfs2/dlm/dlmrecovery.c 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
iter             2845 fs/ocfs2/dlm/dlmrecovery.c 	struct dlm_node_iter iter;
iter             2854 fs/ocfs2/dlm/dlmrecovery.c 	dlm_node_iter_init(dlm->domain_map, &iter);
iter             2864 fs/ocfs2/dlm/dlmrecovery.c 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
iter             2889 fs/ocfs2/dlm/dlmrecovery.c 		iter.curnode = -1;
iter             3027 fs/ocfs2/dlmglue.c 	struct ocfs2_lock_res *iter, *ret = NULL;
iter             3032 fs/ocfs2/dlmglue.c 	list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
iter             3034 fs/ocfs2/dlmglue.c 		if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
iter             3041 fs/ocfs2/dlmglue.c 		if (iter->l_ops != NULL) {
iter             3042 fs/ocfs2/dlmglue.c 			ret = iter;
iter             3053 fs/ocfs2/dlmglue.c 	struct ocfs2_lock_res *iter;
iter             3056 fs/ocfs2/dlmglue.c 	iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
iter             3057 fs/ocfs2/dlmglue.c 	if (iter) {
iter             3065 fs/ocfs2/dlmglue.c 		priv->p_tmp_res = *iter;
iter             3066 fs/ocfs2/dlmglue.c 		iter = &priv->p_tmp_res;
iter             3070 fs/ocfs2/dlmglue.c 	return iter;
iter             3080 fs/ocfs2/dlmglue.c 	struct ocfs2_lock_res *iter = v;
iter             3084 fs/ocfs2/dlmglue.c 	iter = ocfs2_dlm_next_res(iter, priv);
iter             3086 fs/ocfs2/dlmglue.c 	if (iter) {
iter             3087 fs/ocfs2/dlmglue.c 		list_add(&dummy->l_debug_list, &iter->l_debug_list);
iter             3088 fs/ocfs2/dlmglue.c 		priv->p_tmp_res = *iter;
iter             3089 fs/ocfs2/dlmglue.c 		iter = &priv->p_tmp_res;
iter             3093 fs/ocfs2/dlmglue.c 	return iter;
iter             2040 fs/ocfs2/journal.c 	struct inode *iter;
iter             2054 fs/ocfs2/journal.c 	iter = ocfs2_iget(p->osb, ino,
iter             2056 fs/ocfs2/journal.c 	if (IS_ERR(iter))
iter             2061 fs/ocfs2/journal.c 		OCFS2_I(iter)->ip_flags |= OCFS2_INODE_DIO_ORPHAN_ENTRY;
iter             2065 fs/ocfs2/journal.c 	if (OCFS2_I(iter)->ip_next_orphan) {
iter             2066 fs/ocfs2/journal.c 		iput(iter);
iter             2070 fs/ocfs2/journal.c 	trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
iter             2073 fs/ocfs2/journal.c 	OCFS2_I(iter)->ip_next_orphan = p->head;
iter             2074 fs/ocfs2/journal.c 	p->head = iter;
iter             2185 fs/ocfs2/journal.c 	struct inode *iter;
iter             2206 fs/ocfs2/journal.c 		iter = oi->ip_next_orphan;
iter             2262 fs/ocfs2/journal.c 		inode = iter;
iter              362 fs/orangefs/devorangefs-req.c 				      struct iov_iter *iter)
iter              371 fs/orangefs/devorangefs-req.c 	int total = ret = iov_iter_count(iter);
iter              388 fs/orangefs/devorangefs-req.c 	if (!copy_from_iter_full(&head, head_size, iter)) {
iter              423 fs/orangefs/devorangefs-req.c 	if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
iter              474 fs/orangefs/devorangefs-req.c 			         op->downcall.trailer_size, iter)) {
iter               49 fs/orangefs/file.c     loff_t *offset, struct iov_iter *iter, size_t total_size,
iter              104 fs/orangefs/file.c 		ret = orangefs_bufmap_copy_from_iovec(iter, buffer_index,
iter              138 fs/orangefs/file.c 			iov_iter_revert(iter, total_size);
iter              232 fs/orangefs/file.c 		ret = orangefs_bufmap_copy_to_iovec(iter, buffer_index,
iter              311 fs/orangefs/file.c     struct iov_iter *iter)
iter              328 fs/orangefs/file.c 		ro->blksiz = iter->count;
iter              336 fs/orangefs/file.c 	ret = generic_file_read_iter(iocb, iter);
iter              343 fs/orangefs/file.c     struct iov_iter *iter)
iter              354 fs/orangefs/file.c 	ret = generic_file_write_iter(iocb, iter);
iter               23 fs/orangefs/inode.c 	struct iov_iter iter;
iter               55 fs/orangefs/inode.c 	iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
iter               57 fs/orangefs/inode.c 	ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
iter               99 fs/orangefs/inode.c 	struct iov_iter iter;
iter              119 fs/orangefs/inode.c 	iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
iter              128 fs/orangefs/inode.c 	ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
iter              255 fs/orangefs/inode.c 	struct iov_iter iter;
iter              311 fs/orangefs/inode.c 	iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
iter              313 fs/orangefs/inode.c 	ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
iter              317 fs/orangefs/inode.c 	iov_iter_zero(~0U, &iter);
iter              589 fs/orangefs/inode.c 				  struct iov_iter *iter)
iter              601 fs/orangefs/inode.c 	enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
iter              607 fs/orangefs/inode.c 	size_t count = iov_iter_count(iter);
iter              633 fs/orangefs/inode.c 	while (iov_iter_count(iter)) {
iter              634 fs/orangefs/inode.c 		size_t each_count = iov_iter_count(iter);
iter              653 fs/orangefs/inode.c 		ret = wait_for_direct_io(type, inode, offset, iter,
iter              488 fs/orangefs/orangefs-bufmap.c int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
iter              505 fs/orangefs/orangefs-bufmap.c 		if (copy_page_from_iter(page, 0, n, iter) != n)
iter              516 fs/orangefs/orangefs-bufmap.c int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
iter              534 fs/orangefs/orangefs-bufmap.c 		n = copy_page_to_iter(page, 0, n, iter);
iter               29 fs/orangefs/orangefs-bufmap.h int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
iter               33 fs/orangefs/orangefs-bufmap.h int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
iter              228 fs/overlayfs/file.c static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
iter              235 fs/overlayfs/file.c 	if (!iov_iter_count(iter))
iter              243 fs/overlayfs/file.c 	ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
iter              254 fs/overlayfs/file.c static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
iter              262 fs/overlayfs/file.c 	if (!iov_iter_count(iter))
iter              278 fs/overlayfs/file.c 	ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
iter             3276 fs/proc/base.c static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
iter             3280 fs/proc/base.c 	if (iter.task)
iter             3281 fs/proc/base.c 		put_task_struct(iter.task);
iter             3284 fs/proc/base.c 	iter.task = NULL;
iter             3285 fs/proc/base.c 	pid = find_ge_pid(iter.tgid, ns);
iter             3287 fs/proc/base.c 		iter.tgid = pid_nr_ns(pid, ns);
iter             3288 fs/proc/base.c 		iter.task = pid_task(pid, PIDTYPE_PID);
iter             3301 fs/proc/base.c 		if (!iter.task || !has_group_leader_pid(iter.task)) {
iter             3302 fs/proc/base.c 			iter.tgid += 1;
iter             3305 fs/proc/base.c 		get_task_struct(iter.task);
iter             3308 fs/proc/base.c 	return iter;
iter             3316 fs/proc/base.c 	struct tgid_iter iter;
iter             3335 fs/proc/base.c 	iter.tgid = pos - TGID_OFFSET;
iter             3336 fs/proc/base.c 	iter.task = NULL;
iter             3337 fs/proc/base.c 	for (iter = next_tgid(ns, iter);
iter             3338 fs/proc/base.c 	     iter.task;
iter             3339 fs/proc/base.c 	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
iter             3344 fs/proc/base.c 		if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
iter             3347 fs/proc/base.c 		len = snprintf(name, sizeof(name), "%u", iter.tgid);
iter             3348 fs/proc/base.c 		ctx->pos = iter.tgid + TGID_OFFSET;
iter             3350 fs/proc/base.c 				     proc_pid_instantiate, iter.task, NULL)) {
iter             3351 fs/proc/base.c 			put_task_struct(iter.task);
iter              407 fs/read_write.c 	struct iov_iter iter;
iter              412 fs/read_write.c 	iov_iter_init(&iter, READ, &iov, 1, len);
iter              414 fs/read_write.c 	ret = call_read_iter(filp, &kiocb, &iter);
iter              476 fs/read_write.c 	struct iov_iter iter;
iter              481 fs/read_write.c 	iov_iter_init(&iter, WRITE, &iov, 1, len);
iter              483 fs/read_write.c 	ret = call_write_iter(filp, &kiocb, &iter);
iter              678 fs/read_write.c static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
iter              691 fs/read_write.c 		ret = call_read_iter(filp, &kiocb, iter);
iter              693 fs/read_write.c 		ret = call_write_iter(filp, &kiocb, iter);
iter              701 fs/read_write.c static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
iter              709 fs/read_write.c 	while (iov_iter_count(iter)) {
iter              710 fs/read_write.c 		struct iovec iovec = iov_iter_iovec(iter);
iter              729 fs/read_write.c 		iov_iter_advance(iter, nr);
iter              914 fs/read_write.c static ssize_t do_iter_read(struct file *file, struct iov_iter *iter,
iter              925 fs/read_write.c 	tot_len = iov_iter_count(iter);
iter              933 fs/read_write.c 		ret = do_iter_readv_writev(file, iter, pos, READ, flags);
iter              935 fs/read_write.c 		ret = do_loop_readv_writev(file, iter, pos, READ, flags);
iter              942 fs/read_write.c ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
iter              947 fs/read_write.c 	return do_iter_read(file, iter, ppos, flags);
iter              951 fs/read_write.c static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
iter              962 fs/read_write.c 	tot_len = iov_iter_count(iter);
iter              970 fs/read_write.c 		ret = do_iter_readv_writev(file, iter, pos, WRITE, flags);
iter              972 fs/read_write.c 		ret = do_loop_readv_writev(file, iter, pos, WRITE, flags);
iter              978 fs/read_write.c ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
iter              983 fs/read_write.c 	return do_iter_write(file, iter, ppos, flags);
iter              992 fs/read_write.c 	struct iov_iter iter;
iter              995 fs/read_write.c 	ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
iter              997 fs/read_write.c 		ret = do_iter_read(file, &iter, pos, flags);
iter             1009 fs/read_write.c 	struct iov_iter iter;
iter             1012 fs/read_write.c 	ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
iter             1015 fs/read_write.c 		ret = do_iter_write(file, &iter, pos, flags);
iter             1181 fs/read_write.c 	struct iov_iter iter;
iter             1184 fs/read_write.c 	ret = compat_import_iovec(READ, vec, vlen, UIO_FASTIOV, &iov, &iter);
iter             1186 fs/read_write.c 		ret = do_iter_read(file, &iter, pos, flags);
iter             1289 fs/read_write.c 	struct iov_iter iter;
iter             1292 fs/read_write.c 	ret = compat_import_iovec(WRITE, vec, vlen, UIO_FASTIOV, &iov, &iter);
iter             1295 fs/read_write.c 		ret = do_iter_write(file, &iter, pos, flags);
iter             3263 fs/reiserfs/inode.c static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter             3267 fs/reiserfs/inode.c 	size_t count = iov_iter_count(iter);
iter             3270 fs/reiserfs/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter,
iter             3277 fs/reiserfs/inode.c 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
iter             1244 fs/reiserfs/stree.c 	int iter = 0;
iter             1256 fs/reiserfs/stree.c 		iter++;
iter             1264 fs/splice.c    static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
iter             1269 fs/splice.c    		.total_len = iov_iter_count(iter),
iter             1271 fs/splice.c    		.u.data = iter
iter             1292 fs/splice.c    static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
iter             1309 fs/splice.c    		ret = iter_to_pipe(iter, pipe, buf_flag);
iter             1347 fs/splice.c    static long do_vmsplice(struct file *f, struct iov_iter *iter, unsigned int flags)
iter             1352 fs/splice.c    	if (!iov_iter_count(iter))
iter             1355 fs/splice.c    	if (iov_iter_rw(iter) == WRITE)
iter             1356 fs/splice.c    		return vmsplice_to_pipe(f, iter, flags);
iter             1358 fs/splice.c    		return vmsplice_to_user(f, iter, flags);
iter             1366 fs/splice.c    	struct iov_iter iter;
iter             1377 fs/splice.c    			     ARRAY_SIZE(iovstack), &iov, &iter);
iter             1379 fs/splice.c    		error = do_vmsplice(f.file, &iter, flags);
iter             1392 fs/splice.c    	struct iov_iter iter;
iter             1403 fs/splice.c    			     ARRAY_SIZE(iovstack), &iov, &iter);
iter             1405 fs/splice.c    		error = do_vmsplice(f.file, &iter, flags);
iter              108 fs/udf/file.c  static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              216 fs/udf/inode.c static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter              221 fs/udf/inode.c 	size_t count = iov_iter_count(iter);
iter              224 fs/udf/inode.c 	ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
iter              225 fs/udf/inode.c 	if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
iter              165 include/crypto/if_alg.h int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
iter              182 include/drm/drm_client.h #define drm_client_for_each_connector_iter(connector, iter) \
iter              183 include/drm/drm_client.h 	drm_for_each_connector_iter(connector, iter) \
iter             1587 include/drm/drm_connector.h 				   struct drm_connector_list_iter *iter);
iter             1589 include/drm/drm_connector.h drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
iter             1590 include/drm/drm_connector.h void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
iter             1604 include/drm/drm_connector.h #define drm_for_each_connector_iter(connector, iter) \
iter             1605 include/drm/drm_connector.h 	while ((connector = drm_connector_list_iter_next(iter)))
iter               46 include/drm/drm_damage_helper.h #define drm_atomic_for_each_plane_damage(iter, rect) \
iter               47 include/drm/drm_damage_helper.h 	while (drm_atomic_helper_damage_iter_next(iter, rect))
iter               75 include/drm/drm_damage_helper.h drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter               79 include/drm/drm_damage_helper.h drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
iter              167 include/drm/drm_print.h drm_coredump_printer(struct drm_print_iterator *iter)
iter              172 include/drm/drm_print.h 		.arg = iter,
iter              176 include/drm/drm_print.h 	iter->offset = 0;
iter              256 include/kvm/arm_vgic.h 	struct vgic_state_iter *iter;
iter               29 include/linux/bio.h #define bio_iter_iovec(bio, iter)				\
iter               30 include/linux/bio.h 	bvec_iter_bvec((bio)->bi_io_vec, (iter))
iter               32 include/linux/bio.h #define bio_iter_page(bio, iter)				\
iter               33 include/linux/bio.h 	bvec_iter_page((bio)->bi_io_vec, (iter))
iter               34 include/linux/bio.h #define bio_iter_len(bio, iter)					\
iter               35 include/linux/bio.h 	bvec_iter_len((bio)->bi_io_vec, (iter))
iter               36 include/linux/bio.h #define bio_iter_offset(bio, iter)				\
iter               37 include/linux/bio.h 	bvec_iter_offset((bio)->bi_io_vec, (iter))
iter               46 include/linux/bio.h #define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
iter               47 include/linux/bio.h #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
iter              125 include/linux/bio.h 				    struct bvec_iter_all *iter)
iter              127 include/linux/bio.h 	if (iter->idx >= bio->bi_vcnt)
iter              130 include/linux/bio.h 	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
iter              138 include/linux/bio.h #define bio_for_each_segment_all(bvl, bio, iter) \
iter              139 include/linux/bio.h 	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
iter              141 include/linux/bio.h static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
iter              144 include/linux/bio.h 	iter->bi_sector += bytes >> 9;
iter              147 include/linux/bio.h 		iter->bi_size -= bytes;
iter              149 include/linux/bio.h 		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
iter              153 include/linux/bio.h #define __bio_for_each_segment(bvl, bio, iter, start)			\
iter              154 include/linux/bio.h 	for (iter = (start);						\
iter              155 include/linux/bio.h 	     (iter).bi_size &&						\
iter              156 include/linux/bio.h 		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
iter              157 include/linux/bio.h 	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
iter              159 include/linux/bio.h #define bio_for_each_segment(bvl, bio, iter)				\
iter              160 include/linux/bio.h 	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
iter              162 include/linux/bio.h #define __bio_for_each_bvec(bvl, bio, iter, start)		\
iter              163 include/linux/bio.h 	for (iter = (start);						\
iter              164 include/linux/bio.h 	     (iter).bi_size &&						\
iter              165 include/linux/bio.h 		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
iter              166 include/linux/bio.h 	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
iter              169 include/linux/bio.h #define bio_for_each_bvec(bvl, bio, iter)			\
iter              170 include/linux/bio.h 	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
iter              172 include/linux/bio.h #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
iter              178 include/linux/bio.h 	struct bvec_iter iter;
iter              196 include/linux/bio.h 	bio_for_each_segment(bv, bio, iter)
iter              254 include/linux/bio.h 	struct bvec_iter iter = bio->bi_iter;
iter              262 include/linux/bio.h 	bio_advance_iter(bio, &iter, iter.bi_size);
iter              264 include/linux/bio.h 	if (!iter.bi_bvec_done)
iter              265 include/linux/bio.h 		idx = iter.bi_idx - 1;
iter              267 include/linux/bio.h 		idx = iter.bi_idx;
iter              275 include/linux/bio.h 	if (iter.bi_bvec_done)
iter              276 include/linux/bio.h 		bv->bv_len = iter.bi_bvec_done;
iter              442 include/linux/bio.h int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
iter              472 include/linux/bio.h void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
iter              743 include/linux/bio.h #define bip_for_each_vec(bvl, bip, iter)				\
iter              744 include/linux/bio.h 	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
iter              814 include/linux/blkdev.h 	struct bvec_iter iter;
iter              827 include/linux/blkdev.h 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
iter              831 include/linux/blkdev.h 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
iter              835 include/linux/blkdev.h 		 bio_iter_last(bvec, _iter.iter))
iter              271 include/linux/bpf_verifier.h #define bpf_for_each_spilled_reg(iter, frame, reg)			\
iter              272 include/linux/bpf_verifier.h 	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);		\
iter              273 include/linux/bpf_verifier.h 	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
iter              274 include/linux/bpf_verifier.h 	     iter++, reg = bpf_get_spilled_reg(iter, frame))
iter               45 include/linux/bvec.h #define __bvec_iter_bvec(bvec, iter)	(&(bvec)[(iter).bi_idx])
iter               48 include/linux/bvec.h #define mp_bvec_iter_page(bvec, iter)				\
iter               49 include/linux/bvec.h 	(__bvec_iter_bvec((bvec), (iter))->bv_page)
iter               51 include/linux/bvec.h #define mp_bvec_iter_len(bvec, iter)				\
iter               52 include/linux/bvec.h 	min((iter).bi_size,					\
iter               53 include/linux/bvec.h 	    __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
iter               55 include/linux/bvec.h #define mp_bvec_iter_offset(bvec, iter)				\
iter               56 include/linux/bvec.h 	(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
iter               58 include/linux/bvec.h #define mp_bvec_iter_page_idx(bvec, iter)			\
iter               59 include/linux/bvec.h 	(mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
iter               61 include/linux/bvec.h #define mp_bvec_iter_bvec(bvec, iter)				\
iter               63 include/linux/bvec.h 	.bv_page	= mp_bvec_iter_page((bvec), (iter)),	\
iter               64 include/linux/bvec.h 	.bv_len		= mp_bvec_iter_len((bvec), (iter)),	\
iter               65 include/linux/bvec.h 	.bv_offset	= mp_bvec_iter_offset((bvec), (iter)),	\
iter               69 include/linux/bvec.h  #define bvec_iter_offset(bvec, iter)				\
iter               70 include/linux/bvec.h 	(mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
iter               72 include/linux/bvec.h #define bvec_iter_len(bvec, iter)				\
iter               73 include/linux/bvec.h 	min_t(unsigned, mp_bvec_iter_len((bvec), (iter)),		\
iter               74 include/linux/bvec.h 	      PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
iter               76 include/linux/bvec.h #define bvec_iter_page(bvec, iter)				\
iter               77 include/linux/bvec.h 	(mp_bvec_iter_page((bvec), (iter)) +			\
iter               78 include/linux/bvec.h 	 mp_bvec_iter_page_idx((bvec), (iter)))
iter               80 include/linux/bvec.h #define bvec_iter_bvec(bvec, iter)				\
iter               82 include/linux/bvec.h 	.bv_page	= bvec_iter_page((bvec), (iter)),	\
iter               83 include/linux/bvec.h 	.bv_len		= bvec_iter_len((bvec), (iter)),	\
iter               84 include/linux/bvec.h 	.bv_offset	= bvec_iter_offset((bvec), (iter)),	\
iter               88 include/linux/bvec.h 		struct bvec_iter *iter, unsigned bytes)
iter               90 include/linux/bvec.h 	if (WARN_ONCE(bytes > iter->bi_size,
iter               92 include/linux/bvec.h 		iter->bi_size = 0;
iter               97 include/linux/bvec.h 		const struct bio_vec *cur = bv + iter->bi_idx;
iter               98 include/linux/bvec.h 		unsigned len = min3(bytes, iter->bi_size,
iter               99 include/linux/bvec.h 				    cur->bv_len - iter->bi_bvec_done);
iter              102 include/linux/bvec.h 		iter->bi_size -= len;
iter              103 include/linux/bvec.h 		iter->bi_bvec_done += len;
iter              105 include/linux/bvec.h 		if (iter->bi_bvec_done == cur->bv_len) {
iter              106 include/linux/bvec.h 			iter->bi_bvec_done = 0;
iter              107 include/linux/bvec.h 			iter->bi_idx++;
iter              113 include/linux/bvec.h #define for_each_bvec(bvl, bio_vec, iter, start)			\
iter              114 include/linux/bvec.h 	for (iter = (start);						\
iter              115 include/linux/bvec.h 	     (iter).bi_size &&						\
iter              116 include/linux/bvec.h 		((bvl = bvec_iter_bvec((bio_vec), (iter))), 1);	\
iter              117 include/linux/bvec.h 	     bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
iter               89 include/linux/ceph/messenger.h 	struct bvec_iter iter;
iter               96 include/linux/ceph/messenger.h 		BUG_ON(!(it)->iter.bi_size);				      \
iter               97 include/linux/ceph/messenger.h 		__cur_n = min((it)->iter.bi_size, __n);			      \
iter               99 include/linux/ceph/messenger.h 		bio_advance_iter((it)->bio, &(it)->iter, __cur_n);	      \
iter              100 include/linux/ceph/messenger.h 		if (!(it)->iter.bi_size && (it)->bio->bi_next) {	      \
iter              103 include/linux/ceph/messenger.h 			(it)->iter = (it)->bio->bi_iter;		      \
iter              123 include/linux/ceph/messenger.h 		__cur_iter = (it)->iter;				      \
iter              133 include/linux/ceph/messenger.h 	struct bvec_iter iter;
iter              137 include/linux/ceph/messenger.h 	BUG_ON((n) > (it)->iter.bi_size);				      \
iter              139 include/linux/ceph/messenger.h 	bvec_iter_advance((it)->bvecs, &(it)->iter, (n));		      \
iter              156 include/linux/ceph/messenger.h 		__cur_iter = (it)->iter;				      \
iter              163 include/linux/ceph/messenger.h 	BUG_ON((n) > (it)->iter.bi_size);				      \
iter              164 include/linux/ceph/messenger.h 	(it)->iter.bi_size = (n);					      \
iter              214 include/linux/dax.h ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
iter               26 include/linux/devcoredump.h 	struct scatterlist *iter;
iter               30 include/linux/devcoredump.h 	iter = table;
iter               31 include/linux/devcoredump.h 	for_each_sg(table, iter, sg_nents(table), i) {
iter               32 include/linux/devcoredump.h 		page = sg_page(iter);
iter               38 include/linux/devcoredump.h 	iter = table;
iter               40 include/linux/devcoredump.h 	while (!sg_is_last(iter)) {
iter               41 include/linux/devcoredump.h 		iter++;
iter               42 include/linux/devcoredump.h 		if (sg_is_chain(iter)) {
iter               43 include/linux/devcoredump.h 			iter = sg_chain_ptr(iter);
iter               45 include/linux/devcoredump.h 			delete_iter = iter;
iter              160 include/linux/device.h void subsys_dev_iter_init(struct subsys_dev_iter *iter,
iter              164 include/linux/device.h struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
iter              165 include/linux/device.h void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
iter              626 include/linux/device.h extern void class_dev_iter_init(struct class_dev_iter *iter,
iter              630 include/linux/device.h extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
iter              631 include/linux/device.h extern void class_dev_iter_exit(struct class_dev_iter *iter);
iter               62 include/linux/dma-fence-chain.h #define dma_fence_chain_for_each(iter, head)	\
iter               63 include/linux/dma-fence-chain.h 	for (iter = dma_fence_get(head); iter; \
iter               64 include/linux/dma-fence-chain.h 	     iter = dma_fence_chain_walk(iter))
iter              391 include/linux/fs.h 	ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
iter             1895 include/linux/fs.h 				     struct iov_iter *iter)
iter             1897 include/linux/fs.h 	return file->f_op->read_iter(kio, iter);
iter             1901 include/linux/fs.h 				      struct iov_iter *iter)
iter             1903 include/linux/fs.h 	return file->f_op->write_iter(kio, iter);
iter             3112 include/linux/fs.h ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
iter             3114 include/linux/fs.h ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
iter             3167 include/linux/fs.h 			     struct block_device *bdev, struct iov_iter *iter,
iter             3174 include/linux/fs.h 					 struct iov_iter *iter,
iter             3177 include/linux/fs.h 	return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
iter             3311 include/linux/fs.h extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
iter              421 include/linux/ftrace.h struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
iter              422 include/linux/ftrace.h struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
iter              424 include/linux/ftrace.h #define for_ftrace_rec_iter(iter)		\
iter              425 include/linux/ftrace.h 	for (iter = ftrace_rec_iter_start();	\
iter              426 include/linux/ftrace.h 	     iter;				\
iter              427 include/linux/ftrace.h 	     iter = ftrace_rec_iter_next(iter))
iter              183 include/linux/generic-radix-tree.h static inline void __genradix_iter_advance(struct genradix_iter *iter,
iter              186 include/linux/generic-radix-tree.h 	iter->offset += obj_size;
iter              189 include/linux/generic-radix-tree.h 	    (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE)
iter              190 include/linux/generic-radix-tree.h 		iter->offset = round_up(iter->offset, PAGE_SIZE);
iter              192 include/linux/generic-radix-tree.h 	iter->pos++;
iter              197 include/linux/iomap.h ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iter              129 include/linux/memcontrol.h 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
iter              294 include/linux/mroute_base.h 		     struct mr_table *(*iter)(struct net *net,
iter              344 include/linux/mroute_base.h 		 struct mr_table *(*iter)(struct net *net,
iter              389 include/linux/mroute_base.h void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
iter              432 include/linux/mroute_base.h static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
iter              628 include/linux/mtd/nand.h 					struct nand_io_iter *iter)
iter              632 include/linux/mtd/nand.h 	iter->req.mode = req->mode;
iter              633 include/linux/mtd/nand.h 	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter              634 include/linux/mtd/nand.h 	iter->req.ooboffs = req->ooboffs;
iter              635 include/linux/mtd/nand.h 	iter->oobbytes_per_page = mtd_oobavail(mtd, req);
iter              636 include/linux/mtd/nand.h 	iter->dataleft = req->len;
iter              637 include/linux/mtd/nand.h 	iter->oobleft = req->ooblen;
iter              638 include/linux/mtd/nand.h 	iter->req.databuf.in = req->datbuf;
iter              639 include/linux/mtd/nand.h 	iter->req.datalen = min_t(unsigned int,
iter              640 include/linux/mtd/nand.h 				  nand->memorg.pagesize - iter->req.dataoffs,
iter              641 include/linux/mtd/nand.h 				  iter->dataleft);
iter              642 include/linux/mtd/nand.h 	iter->req.oobbuf.in = req->oobbuf;
iter              643 include/linux/mtd/nand.h 	iter->req.ooblen = min_t(unsigned int,
iter              644 include/linux/mtd/nand.h 				 iter->oobbytes_per_page - iter->req.ooboffs,
iter              645 include/linux/mtd/nand.h 				 iter->oobleft);
iter              656 include/linux/mtd/nand.h 					     struct nand_io_iter *iter)
iter              658 include/linux/mtd/nand.h 	nanddev_pos_next_page(nand, &iter->req.pos);
iter              659 include/linux/mtd/nand.h 	iter->dataleft -= iter->req.datalen;
iter              660 include/linux/mtd/nand.h 	iter->req.databuf.in += iter->req.datalen;
iter              661 include/linux/mtd/nand.h 	iter->oobleft -= iter->req.ooblen;
iter              662 include/linux/mtd/nand.h 	iter->req.oobbuf.in += iter->req.ooblen;
iter              663 include/linux/mtd/nand.h 	iter->req.dataoffs = 0;
iter              664 include/linux/mtd/nand.h 	iter->req.ooboffs = 0;
iter              665 include/linux/mtd/nand.h 	iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
iter              666 include/linux/mtd/nand.h 				  iter->dataleft);
iter              667 include/linux/mtd/nand.h 	iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
iter              668 include/linux/mtd/nand.h 				 iter->oobleft);
iter              683 include/linux/mtd/nand.h 				       const struct nand_io_iter *iter)
iter              685 include/linux/mtd/nand.h 	if (iter->dataleft || iter->oobleft)
iter              701 include/linux/mtd/nand.h #define nanddev_io_for_each_page(nand, start, req, iter)		\
iter              702 include/linux/mtd/nand.h 	for (nanddev_io_iter_init(nand, start, req, iter);		\
iter              703 include/linux/mtd/nand.h 	     !nanddev_io_iter_end(nand, iter);				\
iter              704 include/linux/mtd/nand.h 	     nanddev_io_iter_next_page(nand, iter))
iter             4252 include/linux/netdevice.h 						     struct list_head **iter);
iter             4254 include/linux/netdevice.h 						     struct list_head **iter);
iter             4257 include/linux/netdevice.h #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
iter             4258 include/linux/netdevice.h 	for (iter = &(dev)->adj_list.upper, \
iter             4259 include/linux/netdevice.h 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
iter             4261 include/linux/netdevice.h 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
iter             4274 include/linux/netdevice.h 				    struct list_head **iter);
iter             4276 include/linux/netdevice.h 					struct list_head **iter);
iter             4278 include/linux/netdevice.h #define netdev_for_each_lower_private(dev, priv, iter) \
iter             4279 include/linux/netdevice.h 	for (iter = (dev)->adj_list.lower.next, \
iter             4280 include/linux/netdevice.h 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
iter             4282 include/linux/netdevice.h 	     priv = netdev_lower_get_next_private(dev, &(iter)))
iter             4284 include/linux/netdevice.h #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
iter             4285 include/linux/netdevice.h 	for (iter = &(dev)->adj_list.lower, \
iter             4286 include/linux/netdevice.h 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
iter             4288 include/linux/netdevice.h 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
iter             4291 include/linux/netdevice.h 				struct list_head **iter);
iter             4293 include/linux/netdevice.h #define netdev_for_each_lower_dev(dev, ldev, iter) \
iter             4294 include/linux/netdevice.h 	for (iter = (dev)->adj_list.lower.next, \
iter             4295 include/linux/netdevice.h 	     ldev = netdev_lower_get_next(dev, &(iter)); \
iter             4297 include/linux/netdevice.h 	     ldev = netdev_lower_get_next(dev, &(iter)))
iter             4300 include/linux/netdevice.h 					     struct list_head **iter);
iter              482 include/linux/nfs_fs.h 			struct iov_iter *iter);
iter              484 include/linux/nfs_fs.h 			struct iov_iter *iter);
iter              294 include/linux/pnfs_osd_xdr.h 	struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
iter              297 include/linux/pnfs_osd_xdr.h 	struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
iter              221 include/linux/radix-tree.h 			struct radix_tree_iter *iter, void __rcu **slot);
iter              237 include/linux/radix-tree.h 		const struct radix_tree_iter *iter, unsigned int tag);
iter              252 include/linux/radix-tree.h 			      struct radix_tree_iter *iter, gfp_t gfp,
iter              269 include/linux/radix-tree.h radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
iter              279 include/linux/radix-tree.h 	iter->index = 0;
iter              280 include/linux/radix-tree.h 	iter->next_index = start;
iter              298 include/linux/radix-tree.h 			     struct radix_tree_iter *iter, unsigned flags);
iter              312 include/linux/radix-tree.h 			struct radix_tree_iter *iter, unsigned long index)
iter              314 include/linux/radix-tree.h 	radix_tree_iter_init(iter, index);
iter              315 include/linux/radix-tree.h 	return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
iter              328 include/linux/radix-tree.h void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
iter              330 include/linux/radix-tree.h 	iter->next_index = iter->index;
iter              331 include/linux/radix-tree.h 	iter->tags = 0;
iter              336 include/linux/radix-tree.h __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
iter              338 include/linux/radix-tree.h 	return iter->index + slots;
iter              352 include/linux/radix-tree.h 					struct radix_tree_iter *iter);
iter              361 include/linux/radix-tree.h radix_tree_chunk_size(struct radix_tree_iter *iter)
iter              363 include/linux/radix-tree.h 	return iter->next_index - iter->index;
iter              386 include/linux/radix-tree.h 				struct radix_tree_iter *iter, unsigned flags)
iter              389 include/linux/radix-tree.h 		iter->tags >>= 1;
iter              390 include/linux/radix-tree.h 		if (unlikely(!iter->tags))
iter              392 include/linux/radix-tree.h 		if (likely(iter->tags & 1ul)) {
iter              393 include/linux/radix-tree.h 			iter->index = __radix_tree_iter_add(iter, 1);
iter              398 include/linux/radix-tree.h 			unsigned offset = __ffs(iter->tags);
iter              400 include/linux/radix-tree.h 			iter->tags >>= offset++;
iter              401 include/linux/radix-tree.h 			iter->index = __radix_tree_iter_add(iter, offset);
iter              406 include/linux/radix-tree.h 		long count = radix_tree_chunk_size(iter);
iter              410 include/linux/radix-tree.h 			iter->index = __radix_tree_iter_add(iter, 1);
iter              416 include/linux/radix-tree.h 				iter->next_index = 0;
iter              437 include/linux/radix-tree.h #define radix_tree_for_each_slot(slot, root, iter, start)		\
iter              438 include/linux/radix-tree.h 	for (slot = radix_tree_iter_init(iter, start) ;			\
iter              439 include/linux/radix-tree.h 	     slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;	\
iter              440 include/linux/radix-tree.h 	     slot = radix_tree_next_slot(slot, iter, 0))
iter              453 include/linux/radix-tree.h #define radix_tree_for_each_tagged(slot, root, iter, start, tag)	\
iter              454 include/linux/radix-tree.h 	for (slot = radix_tree_iter_init(iter, start) ;			\
iter              455 include/linux/radix-tree.h 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
iter              457 include/linux/radix-tree.h 	     slot = radix_tree_next_slot(slot, iter,			\
iter              246 include/linux/rhashtable.h 			   struct rhashtable_iter *iter);
iter              247 include/linux/rhashtable.h void rhashtable_walk_exit(struct rhashtable_iter *iter);
iter              248 include/linux/rhashtable.h int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
iter              250 include/linux/rhashtable.h static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
iter              252 include/linux/rhashtable.h 	(void)rhashtable_walk_start_check(iter);
iter              255 include/linux/rhashtable.h void *rhashtable_walk_next(struct rhashtable_iter *iter);
iter              256 include/linux/rhashtable.h void *rhashtable_walk_peek(struct rhashtable_iter *iter);
iter              257 include/linux/rhashtable.h void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
iter             1256 include/linux/rhashtable.h 				       struct rhashtable_iter *iter)
iter             1258 include/linux/rhashtable.h 	return rhashtable_walk_enter(&hlt->ht, iter);
iter              133 include/linux/ring_buffer.h void ring_buffer_read_start(struct ring_buffer_iter *iter);
iter              134 include/linux/ring_buffer.h void ring_buffer_read_finish(struct ring_buffer_iter *iter);
iter              137 include/linux/ring_buffer.h ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
iter              139 include/linux/ring_buffer.h ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
iter              140 include/linux/ring_buffer.h void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
iter              141 include/linux/ring_buffer.h int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
iter             3460 include/linux/skbuff.h #define skb_walk_frags(skb, iter)	\
iter             3461 include/linux/skbuff.h 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
iter               51 include/linux/trace_events.h int trace_raw_output_prep(struct trace_iterator *iter,
iter              106 include/linux/uio.h static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
iter              109 include/linux/uio.h 		.iov_base = iter->iov->iov_base + iter->iov_offset,
iter              110 include/linux/uio.h 		.iov_len = min(iter->count,
iter              111 include/linux/uio.h 			       iter->iov->iov_len - iter->iov_offset),
iter               82 include/net/bonding.h #define bond_for_each_slave(bond, pos, iter) \
iter               83 include/net/bonding.h 	netdev_for_each_lower_private((bond)->dev, pos, iter)
iter               86 include/net/bonding.h #define bond_for_each_slave_rcu(bond, pos, iter) \
iter               87 include/net/bonding.h 	netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
iter              378 include/net/bonding.h 	struct list_head *iter;
iter              381 include/net/bonding.h 	bond_for_each_slave(bond, tmp, iter) {
iter              391 include/net/bonding.h 	struct list_head *iter;
iter              394 include/net/bonding.h 	bond_for_each_slave(bond, tmp, iter) {
iter              576 include/net/bonding.h 	struct list_head *iter;
iter              579 include/net/bonding.h 	bond_for_each_slave(bond, tmp, iter) {
iter              672 include/net/bonding.h 	struct list_head *iter;
iter              675 include/net/bonding.h 	bond_for_each_slave(bond, tmp, iter)
iter              686 include/net/bonding.h 	struct list_head *iter;
iter              689 include/net/bonding.h 	bond_for_each_slave_rcu(bond, tmp, iter)
iter              699 include/net/bonding.h 	struct list_head *iter;
iter              703 include/net/bonding.h 	bond_for_each_slave_rcu(bond, tmp, iter)
iter             5882 include/net/cfg80211.h 		       void (*iter)(struct wiphy *wiphy,
iter             7269 include/net/cfg80211.h 			       void (*iter)(const struct ieee80211_iface_combination *c,
iter              174 include/net/ip.h 		      unsigned int hlen, struct ip_fraglist_iter *iter);
iter              175 include/net/ip.h void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
iter              177 include/net/ip.h static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
iter              179 include/net/ip.h 	struct sk_buff *skb = iter->frag;
iter              181 include/net/ip.h 	iter->frag = skb->next;
iter              165 include/net/ipv6.h 		      struct ip6_fraglist_iter *iter);
iter              166 include/net/ipv6.h void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter);
iter              168 include/net/ipv6.h static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter)
iter              170 include/net/ipv6.h 	struct sk_buff *skb = iter->frag;
iter              172 include/net/ipv6.h 	iter->frag = skb->next;
iter             5579 include/net/mac80211.h 			 void (*iter)(struct ieee80211_hw *hw,
iter             5603 include/net/mac80211.h 			     void (*iter)(struct ieee80211_hw *hw,
iter             5631 include/net/mac80211.h 	void (*iter)(struct ieee80211_hw *hw,
iter              222 include/net/netfilter/nf_conntrack.h 			       int (*iter)(struct nf_conn *i, void *data),
iter              226 include/net/netfilter/nf_conntrack.h void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
iter              116 include/net/netfilter/nf_conntrack_expect.h void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *data);
iter              118 include/net/netfilter/nf_conntrack_expect.h 			      bool (*iter)(struct nf_conntrack_expect *e, void *data),
iter              252 include/net/netfilter/nf_tables.h 			      const struct nft_set_iter *iter,
iter              355 include/net/netfilter/nf_tables.h 						struct nft_set_iter *iter);
iter              330 include/net/netlabel.h 	struct netlbl_lsm_catmap *iter;
iter              333 include/net/netlabel.h 		iter = catmap;
iter              335 include/net/netlabel.h 		kfree(iter);
iter              106 include/net/sctp/sctp.h void sctp_transport_walk_start(struct rhashtable_iter *iter);
iter              107 include/net/sctp/sctp.h void sctp_transport_walk_stop(struct rhashtable_iter *iter);
iter              109 include/net/sctp/sctp.h 			struct rhashtable_iter *iter);
iter              111 include/net/sctp/sctp.h 			struct rhashtable_iter *iter, int pos);
iter             2943 include/rdma/ib_verbs.h #define rdma_for_each_port(device, iter)                                       \
iter             2944 include/rdma/ib_verbs.h 	for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type(   \
iter             2945 include/rdma/ib_verbs.h 						     unsigned int, iter)));    \
iter             2946 include/rdma/ib_verbs.h 	     iter <= rdma_end_port(device); (iter)++)
iter             1014 include/rdma/rdmavt_qp.h int rvt_qp_iter_next(struct rvt_qp_iter *iter);
iter              464 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, struct iov_iter *iter,
iter              467 include/trace/events/afs.h 	    TP_ARGS(call, iter, want_more, ret),
iter              482 include/trace/events/afs.h 		    __entry->remain	= iov_iter_count(iter);
iter              346 include/trace/trace_events.h trace_raw_output_##call(struct trace_iterator *iter, int flags,		\
iter              349 include/trace/trace_events.h 	struct trace_seq *s = &iter->seq;				\
iter              350 include/trace/trace_events.h 	struct trace_seq __maybe_unused *p = &iter->tmp_seq;		\
iter              354 include/trace/trace_events.h 	field = (typeof(field))iter->ent;				\
iter              356 include/trace/trace_events.h 	ret = trace_raw_output_prep(iter, trace_event);			\
iter              371 include/trace/trace_events.h trace_raw_output_##call(struct trace_iterator *iter, int flags,		\
iter              376 include/trace/trace_events.h 	struct trace_seq *p = &iter->tmp_seq;				\
iter              378 include/trace/trace_events.h 	entry = iter->ent;						\
iter              388 include/trace/trace_events.h 	return trace_output_call(iter, #call, print);			\
iter              250 include/uapi/drm/etnaviv_drm.h 	__u8  iter;       /* in/out, select pm domain at index iter */
iter              260 include/uapi/drm/etnaviv_drm.h 	__u16 iter;       /* in/out, select pm source at index iter */
iter              747 ipc/util.c     	struct ipc_proc_iter *iter = s->private;
iter              748 ipc/util.c     	return iter->pid_ns;
iter              786 ipc/util.c     	struct ipc_proc_iter *iter = s->private;
iter              787 ipc/util.c     	struct ipc_proc_iface *iface = iter->iface;
iter              794 ipc/util.c     	return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
iter              803 ipc/util.c     	struct ipc_proc_iter *iter = s->private;
iter              804 ipc/util.c     	struct ipc_proc_iface *iface = iter->iface;
iter              807 ipc/util.c     	ids = &iter->ns->ids[iface->ids];
iter              830 ipc/util.c     	struct ipc_proc_iter *iter = s->private;
iter              831 ipc/util.c     	struct ipc_proc_iface *iface = iter->iface;
iter              838 ipc/util.c     	ids = &iter->ns->ids[iface->ids];
iter              845 ipc/util.c     	struct ipc_proc_iter *iter = s->private;
iter              846 ipc/util.c     	struct ipc_proc_iface *iface = iter->iface;
iter              865 ipc/util.c     	struct ipc_proc_iter *iter;
iter              867 ipc/util.c     	iter = __seq_open_private(file, &sysvipc_proc_seqops, sizeof(*iter));
iter              868 ipc/util.c     	if (!iter)
iter              871 ipc/util.c     	iter->iface = PDE_DATA(inode);
iter              872 ipc/util.c     	iter->ns    = get_ipc_ns(current->nsproxy->ipc_ns);
iter              873 ipc/util.c     	iter->pid_ns = get_pid_ns(task_active_pid_ns(current));
iter              881 ipc/util.c     	struct ipc_proc_iter *iter = seq->private;
iter              882 ipc/util.c     	put_ipc_ns(iter->ns);
iter              883 ipc/util.c     	put_pid_ns(iter->pid_ns);
iter              998 kernel/auditsc.c 	unsigned int iter;
iter             1031 kernel/auditsc.c 	iter = 0;
iter             1104 kernel/auditsc.c 			if (require_data || (iter > 0) ||
iter             1106 kernel/auditsc.c 				if (iter == 0) {
iter             1114 kernel/auditsc.c 						    " a%d[%d]=", arg, iter++);
iter             1148 kernel/auditsc.c 			iter = 0;
iter              166 kernel/bpf/inode.c static void map_iter_free(struct map_iter *iter)
iter              168 kernel/bpf/inode.c 	if (iter) {
iter              169 kernel/bpf/inode.c 		kfree(iter->key);
iter              170 kernel/bpf/inode.c 		kfree(iter);
iter              176 kernel/bpf/inode.c 	struct map_iter *iter;
iter              178 kernel/bpf/inode.c 	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
iter              179 kernel/bpf/inode.c 	if (!iter)
iter              182 kernel/bpf/inode.c 	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
iter              183 kernel/bpf/inode.c 	if (!iter->key)
iter              186 kernel/bpf/inode.c 	return iter;
iter              189 kernel/bpf/inode.c 	map_iter_free(iter);
iter              252 kernel/bpf/inode.c 	struct map_iter *iter;
iter              256 kernel/bpf/inode.c 	iter = map_iter_alloc(map);
iter              257 kernel/bpf/inode.c 	if (!iter)
iter              262 kernel/bpf/inode.c 		map_iter_free(iter);
iter              267 kernel/bpf/inode.c 	m->private = iter;
iter              405 kernel/cgroup/cgroup-v1.c 	int *iter, ret;
iter              448 kernel/cgroup/cgroup-v1.c 	iter = l->list + index;
iter              449 kernel/cgroup/cgroup-v1.c 	*pos = *iter;
iter              450 kernel/cgroup/cgroup-v1.c 	return iter;
iter             1916 kernel/events/core.c 	struct perf_event *iter;
iter             1922 kernel/events/core.c 		iter = event->aux_event;
iter             1924 kernel/events/core.c 		put_event(iter);
iter             1932 kernel/events/core.c 	for_each_sibling_event(iter, event->group_leader) {
iter             1933 kernel/events/core.c 		if (iter->aux_event != event)
iter             1936 kernel/events/core.c 		iter->aux_event = NULL;
iter             1944 kernel/events/core.c 		event_sched_out(iter, cpuctx, ctx);
iter             6984 kernel/events/core.c 	struct perf_event *iter;
iter             6989 kernel/events/core.c 	list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
iter             6996 kernel/events/core.c 		cpu = iter->cpu;
iter             6998 kernel/events/core.c 			cpu = READ_ONCE(iter->oncpu);
iter             9173 kernel/events/core.c 	struct perf_addr_filter *filter, *iter;
iter             9175 kernel/events/core.c 	list_for_each_entry_safe(filter, iter, filters, entry) {
iter              108 kernel/events/hw_breakpoint.c 	struct perf_event *iter;
iter              111 kernel/events/hw_breakpoint.c 	list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
iter              112 kernel/events/hw_breakpoint.c 		if (iter->hw.target == tsk &&
iter              113 kernel/events/hw_breakpoint.c 		    find_slot_idx(iter->attr.bp_type) == type &&
iter              114 kernel/events/hw_breakpoint.c 		    (iter->cpu < 0 || cpu == iter->cpu))
iter              115 kernel/events/hw_breakpoint.c 			count += hw_breakpoint_weight(iter);
iter              492 kernel/gcov/clang.c 	struct gcov_iterator *iter;
iter              494 kernel/gcov/clang.c 	iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL);
iter              495 kernel/gcov/clang.c 	if (!iter)
iter              498 kernel/gcov/clang.c 	iter->info = info;
iter              500 kernel/gcov/clang.c 	iter->size = convert_to_gcda(NULL, info);
iter              501 kernel/gcov/clang.c 	iter->buffer = vmalloc(iter->size);
iter              502 kernel/gcov/clang.c 	if (!iter->buffer)
iter              505 kernel/gcov/clang.c 	convert_to_gcda(iter->buffer, info);
iter              507 kernel/gcov/clang.c 	return iter;
iter              510 kernel/gcov/clang.c 	kfree(iter);
iter              519 kernel/gcov/clang.c void gcov_iter_free(struct gcov_iterator *iter)
iter              521 kernel/gcov/clang.c 	vfree(iter->buffer);
iter              522 kernel/gcov/clang.c 	kfree(iter);
iter              529 kernel/gcov/clang.c struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
iter              531 kernel/gcov/clang.c 	return iter->info;
iter              538 kernel/gcov/clang.c void gcov_iter_start(struct gcov_iterator *iter)
iter              540 kernel/gcov/clang.c 	iter->pos = 0;
iter              549 kernel/gcov/clang.c int gcov_iter_next(struct gcov_iterator *iter)
iter              551 kernel/gcov/clang.c 	if (iter->pos < iter->size)
iter              552 kernel/gcov/clang.c 		iter->pos += ITER_STRIDE;
iter              554 kernel/gcov/clang.c 	if (iter->pos >= iter->size)
iter              567 kernel/gcov/clang.c int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
iter              571 kernel/gcov/clang.c 	if (iter->pos >= iter->size)
iter              575 kernel/gcov/clang.c 	if (iter->pos + len > iter->size)
iter              576 kernel/gcov/clang.c 		len = iter->size - iter->pos;
iter              578 kernel/gcov/clang.c 	seq_write(seq, iter->buffer + iter->pos, len);
iter              109 kernel/gcov/fs.c 	struct gcov_iterator *iter = data;
iter              112 kernel/gcov/fs.c 	if (gcov_iter_next(iter))
iter              115 kernel/gcov/fs.c 	return iter;
iter              121 kernel/gcov/fs.c 	struct gcov_iterator *iter = data;
iter              123 kernel/gcov/fs.c 	if (gcov_iter_write(iter, seq))
iter              181 kernel/gcov/fs.c 	struct gcov_iterator *iter;
iter              195 kernel/gcov/fs.c 	iter = gcov_iter_new(info);
iter              196 kernel/gcov/fs.c 	if (!iter)
iter              202 kernel/gcov/fs.c 	seq->private = iter;
iter              208 kernel/gcov/fs.c 	gcov_iter_free(iter);
iter              220 kernel/gcov/fs.c 	struct gcov_iterator *iter;
iter              225 kernel/gcov/fs.c 	iter = seq->private;
iter              226 kernel/gcov/fs.c 	info = gcov_iter_get_info(iter);
iter              227 kernel/gcov/fs.c 	gcov_iter_free(iter);
iter              358 kernel/gcov/gcc_3_4.c static struct gcov_fn_info *get_func(struct gcov_iterator *iter)
iter              360 kernel/gcov/gcc_3_4.c 	return get_fn_info(iter->info, iter->function);
iter              363 kernel/gcov/gcc_3_4.c static struct type_info *get_type(struct gcov_iterator *iter)
iter              365 kernel/gcov/gcc_3_4.c 	return &iter->type_info[iter->type];
iter              376 kernel/gcov/gcc_3_4.c 	struct gcov_iterator *iter;
iter              378 kernel/gcov/gcc_3_4.c 	iter = kzalloc(struct_size(iter, type_info, num_counter_active(info)),
iter              380 kernel/gcov/gcc_3_4.c 	if (iter)
iter              381 kernel/gcov/gcc_3_4.c 		iter->info = info;
iter              383 kernel/gcov/gcc_3_4.c 	return iter;
iter              390 kernel/gcov/gcc_3_4.c void gcov_iter_free(struct gcov_iterator *iter)
iter              392 kernel/gcov/gcc_3_4.c 	kfree(iter);
iter              399 kernel/gcov/gcc_3_4.c struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
iter              401 kernel/gcov/gcc_3_4.c 	return iter->info;
iter              408 kernel/gcov/gcc_3_4.c void gcov_iter_start(struct gcov_iterator *iter)
iter              412 kernel/gcov/gcc_3_4.c 	iter->record = 0;
iter              413 kernel/gcov/gcc_3_4.c 	iter->function = 0;
iter              414 kernel/gcov/gcc_3_4.c 	iter->type = 0;
iter              415 kernel/gcov/gcc_3_4.c 	iter->count = 0;
iter              416 kernel/gcov/gcc_3_4.c 	iter->num_types = 0;
iter              418 kernel/gcov/gcc_3_4.c 		if (counter_active(iter->info, i)) {
iter              419 kernel/gcov/gcc_3_4.c 			iter->type_info[iter->num_types].ctr_type = i;
iter              420 kernel/gcov/gcc_3_4.c 			iter->type_info[iter->num_types++].offset = 0;
iter              443 kernel/gcov/gcc_3_4.c int gcov_iter_next(struct gcov_iterator *iter)
iter              445 kernel/gcov/gcc_3_4.c 	switch (iter->record) {
iter              453 kernel/gcov/gcc_3_4.c 		iter->record++;
iter              457 kernel/gcov/gcc_3_4.c 		iter->count++;
iter              460 kernel/gcov/gcc_3_4.c 		if (iter->count < get_func(iter)->n_ctrs[iter->type]) {
iter              461 kernel/gcov/gcc_3_4.c 			iter->record = 9;
iter              465 kernel/gcov/gcc_3_4.c 		get_type(iter)->offset += iter->count;
iter              466 kernel/gcov/gcc_3_4.c 		iter->count = 0;
iter              467 kernel/gcov/gcc_3_4.c 		iter->type++;
iter              470 kernel/gcov/gcc_3_4.c 		if (iter->type < iter->num_types) {
iter              471 kernel/gcov/gcc_3_4.c 			iter->record = 7;
iter              475 kernel/gcov/gcc_3_4.c 		iter->type = 0;
iter              476 kernel/gcov/gcc_3_4.c 		iter->function++;
iter              479 kernel/gcov/gcc_3_4.c 		if (iter->function < iter->info->n_functions)
iter              480 kernel/gcov/gcc_3_4.c 			iter->record = 3;
iter              482 kernel/gcov/gcc_3_4.c 			iter->record = -1;
iter              486 kernel/gcov/gcc_3_4.c 	if (iter->record == -1)
iter              532 kernel/gcov/gcc_3_4.c int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
iter              536 kernel/gcov/gcc_3_4.c 	switch (iter->record) {
iter              541 kernel/gcov/gcc_3_4.c 		rc = seq_write_gcov_u32(seq, iter->info->version);
iter              544 kernel/gcov/gcc_3_4.c 		rc = seq_write_gcov_u32(seq, iter->info->stamp);
iter              553 kernel/gcov/gcc_3_4.c 		rc = seq_write_gcov_u32(seq, get_func(iter)->ident);
iter              556 kernel/gcov/gcc_3_4.c 		rc = seq_write_gcov_u32(seq, get_func(iter)->checksum);
iter              560 kernel/gcov/gcc_3_4.c 			GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type));
iter              564 kernel/gcov/gcc_3_4.c 				get_func(iter)->n_ctrs[iter->type] * 2);
iter              568 kernel/gcov/gcc_3_4.c 			iter->info->counts[iter->type].
iter              569 kernel/gcov/gcc_3_4.c 				values[iter->count + get_type(iter)->offset]);
iter              493 kernel/gcov/gcc_4_7.c 	struct gcov_iterator *iter;
iter              495 kernel/gcov/gcc_4_7.c 	iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL);
iter              496 kernel/gcov/gcc_4_7.c 	if (!iter)
iter              499 kernel/gcov/gcc_4_7.c 	iter->info = info;
iter              501 kernel/gcov/gcc_4_7.c 	iter->size = convert_to_gcda(NULL, info);
iter              502 kernel/gcov/gcc_4_7.c 	iter->buffer = vmalloc(iter->size);
iter              503 kernel/gcov/gcc_4_7.c 	if (!iter->buffer)
iter              506 kernel/gcov/gcc_4_7.c 	convert_to_gcda(iter->buffer, info);
iter              508 kernel/gcov/gcc_4_7.c 	return iter;
iter              511 kernel/gcov/gcc_4_7.c 	kfree(iter);
iter              520 kernel/gcov/gcc_4_7.c void gcov_iter_free(struct gcov_iterator *iter)
iter              522 kernel/gcov/gcc_4_7.c 	vfree(iter->buffer);
iter              523 kernel/gcov/gcc_4_7.c 	kfree(iter);
iter              530 kernel/gcov/gcc_4_7.c struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
iter              532 kernel/gcov/gcc_4_7.c 	return iter->info;
iter              539 kernel/gcov/gcc_4_7.c void gcov_iter_start(struct gcov_iterator *iter)
iter              541 kernel/gcov/gcc_4_7.c 	iter->pos = 0;
iter              550 kernel/gcov/gcc_4_7.c int gcov_iter_next(struct gcov_iterator *iter)
iter              552 kernel/gcov/gcc_4_7.c 	if (iter->pos < iter->size)
iter              553 kernel/gcov/gcc_4_7.c 		iter->pos += ITER_STRIDE;
iter              555 kernel/gcov/gcc_4_7.c 	if (iter->pos >= iter->size)
iter              568 kernel/gcov/gcc_4_7.c int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
iter              572 kernel/gcov/gcc_4_7.c 	if (iter->pos >= iter->size)
iter              576 kernel/gcov/gcc_4_7.c 	if (iter->pos + len > iter->size)
iter              577 kernel/gcov/gcc_4_7.c 		len = iter->size - iter->pos;
iter              579 kernel/gcov/gcc_4_7.c 	seq_write(seq, iter->buffer + iter->pos, len);
iter               66 kernel/gcov/gcov.h void gcov_iter_free(struct gcov_iterator *iter);
iter               67 kernel/gcov/gcov.h void gcov_iter_start(struct gcov_iterator *iter);
iter               68 kernel/gcov/gcov.h int gcov_iter_next(struct gcov_iterator *iter);
iter               69 kernel/gcov/gcov.h int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq);
iter               70 kernel/gcov/gcov.h struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter);
iter              321 kernel/jump_label.c 	struct jump_entry *iter;
iter              323 kernel/jump_label.c 	iter = iter_start;
iter              324 kernel/jump_label.c 	while (iter < iter_stop) {
iter              325 kernel/jump_label.c 		if (addr_conflict(iter, start, end))
iter              327 kernel/jump_label.c 		iter++;
iter              458 kernel/jump_label.c 	struct jump_entry *iter;
iter              476 kernel/jump_label.c 	for (iter = iter_start; iter < iter_stop; iter++) {
iter              480 kernel/jump_label.c 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
iter              481 kernel/jump_label.c 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
iter              483 kernel/jump_label.c 		if (init_section_contains((void *)jump_entry_code(iter), 1))
iter              484 kernel/jump_label.c 			jump_entry_set_init(iter);
iter              486 kernel/jump_label.c 		iterk = jump_entry_key(iter);
iter              491 kernel/jump_label.c 		static_key_set_entries(key, iter);
iter              594 kernel/jump_label.c 	struct jump_entry *iter;
iter              600 kernel/jump_label.c 	for (iter = iter_start; iter < iter_stop; iter++) {
iter              602 kernel/jump_label.c 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
iter              603 kernel/jump_label.c 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
iter              611 kernel/jump_label.c 	struct jump_entry *iter;
iter              621 kernel/jump_label.c 	for (iter = iter_start; iter < iter_stop; iter++) {
iter              624 kernel/jump_label.c 		if (within_module_init(jump_entry_code(iter), mod))
iter              625 kernel/jump_label.c 			jump_entry_set_init(iter);
iter              627 kernel/jump_label.c 		iterk = jump_entry_key(iter);
iter              633 kernel/jump_label.c 			static_key_set_entries(key, iter);
iter              655 kernel/jump_label.c 		jlm->entries = iter;
iter              661 kernel/jump_label.c 		if (jump_label_type(iter) != jump_label_init_type(iter))
iter              662 kernel/jump_label.c 			__jump_label_update(key, iter, iter_stop, true);
iter              672 kernel/jump_label.c 	struct jump_entry *iter;
iter              676 kernel/jump_label.c 	for (iter = iter_start; iter < iter_stop; iter++) {
iter              677 kernel/jump_label.c 		if (jump_entry_key(iter) == key)
iter              680 kernel/jump_label.c 		key = jump_entry_key(iter);
iter              456 kernel/kallsyms.c static int get_ksymbol_arch(struct kallsym_iter *iter)
iter              458 kernel/kallsyms.c 	int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
iter              459 kernel/kallsyms.c 				   &iter->value, &iter->type,
iter              460 kernel/kallsyms.c 				   iter->name);
iter              463 kernel/kallsyms.c 		iter->pos_arch_end = iter->pos;
iter              470 kernel/kallsyms.c static int get_ksymbol_mod(struct kallsym_iter *iter)
iter              472 kernel/kallsyms.c 	int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
iter              473 kernel/kallsyms.c 				     &iter->value, &iter->type,
iter              474 kernel/kallsyms.c 				     iter->name, iter->module_name,
iter              475 kernel/kallsyms.c 				     &iter->exported);
iter              477 kernel/kallsyms.c 		iter->pos_mod_end = iter->pos;
iter              484 kernel/kallsyms.c static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
iter              486 kernel/kallsyms.c 	int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
iter              487 kernel/kallsyms.c 					 &iter->value, &iter->type,
iter              488 kernel/kallsyms.c 					 iter->name, iter->module_name,
iter              489 kernel/kallsyms.c 					 &iter->exported);
iter              491 kernel/kallsyms.c 		iter->pos_ftrace_mod_end = iter->pos;
iter              498 kernel/kallsyms.c static int get_ksymbol_bpf(struct kallsym_iter *iter)
iter              500 kernel/kallsyms.c 	strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
iter              501 kernel/kallsyms.c 	iter->exported = 0;
iter              502 kernel/kallsyms.c 	return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
iter              503 kernel/kallsyms.c 			       &iter->value, &iter->type,
iter              504 kernel/kallsyms.c 			       iter->name) < 0 ? 0 : 1;
iter              508 kernel/kallsyms.c static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
iter              510 kernel/kallsyms.c 	unsigned off = iter->nameoff;
iter              512 kernel/kallsyms.c 	iter->module_name[0] = '\0';
iter              513 kernel/kallsyms.c 	iter->value = kallsyms_sym_address(iter->pos);
iter              515 kernel/kallsyms.c 	iter->type = kallsyms_get_symbol_type(off);
iter              517 kernel/kallsyms.c 	off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
iter              519 kernel/kallsyms.c 	return off - iter->nameoff;
iter              522 kernel/kallsyms.c static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
iter              524 kernel/kallsyms.c 	iter->name[0] = '\0';
iter              525 kernel/kallsyms.c 	iter->nameoff = get_symbol_offset(new_pos);
iter              526 kernel/kallsyms.c 	iter->pos = new_pos;
iter              528 kernel/kallsyms.c 		iter->pos_arch_end = 0;
iter              529 kernel/kallsyms.c 		iter->pos_mod_end = 0;
iter              530 kernel/kallsyms.c 		iter->pos_ftrace_mod_end = 0;
iter              539 kernel/kallsyms.c static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
iter              541 kernel/kallsyms.c 	iter->pos = pos;
iter              543 kernel/kallsyms.c 	if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
iter              544 kernel/kallsyms.c 	    get_ksymbol_arch(iter))
iter              547 kernel/kallsyms.c 	if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
iter              548 kernel/kallsyms.c 	    get_ksymbol_mod(iter))
iter              551 kernel/kallsyms.c 	if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
iter              552 kernel/kallsyms.c 	    get_ksymbol_ftrace_mod(iter))
iter              555 kernel/kallsyms.c 	return get_ksymbol_bpf(iter);
iter              559 kernel/kallsyms.c static int update_iter(struct kallsym_iter *iter, loff_t pos)
iter              563 kernel/kallsyms.c 		return update_iter_mod(iter, pos);
iter              566 kernel/kallsyms.c 	if (pos != iter->pos)
iter              567 kernel/kallsyms.c 		reset_iter(iter, pos);
iter              569 kernel/kallsyms.c 	iter->nameoff += get_ksymbol_core(iter);
iter              570 kernel/kallsyms.c 	iter->pos++;
iter              598 kernel/kallsyms.c 	struct kallsym_iter *iter = m->private;
iter              601 kernel/kallsyms.c 	if (!iter->name[0])
iter              604 kernel/kallsyms.c 	value = iter->show_value ? (void *)iter->value : NULL;
iter              606 kernel/kallsyms.c 	if (iter->module_name[0]) {
iter              613 kernel/kallsyms.c 		type = iter->exported ? toupper(iter->type) :
iter              614 kernel/kallsyms.c 					tolower(iter->type);
iter              616 kernel/kallsyms.c 			   type, iter->name, iter->module_name);
iter              619 kernel/kallsyms.c 			   iter->type, iter->name);
iter              671 kernel/kallsyms.c 	struct kallsym_iter *iter;
iter              672 kernel/kallsyms.c 	iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
iter              673 kernel/kallsyms.c 	if (!iter)
iter              675 kernel/kallsyms.c 	reset_iter(iter, 0);
iter              677 kernel/kallsyms.c 	iter->show_value = kallsyms_show_value();
iter             2199 kernel/kprobes.c 	unsigned long *iter;
iter             2202 kernel/kprobes.c 	for (iter = start; iter < end; iter++) {
iter             2203 kernel/kprobes.c 		entry = arch_deref_entry_point((void *)*iter);
iter              548 kernel/locking/lockdep_proc.c 	struct lock_stat_data *iter;
iter              553 kernel/locking/lockdep_proc.c 	iter = data->stats + (*pos - 1);
iter              554 kernel/locking/lockdep_proc.c 	if (iter >= data->iter_end)
iter              555 kernel/locking/lockdep_proc.c 		iter = NULL;
iter              557 kernel/locking/lockdep_proc.c 	return iter;
iter              598 kernel/locking/lockdep_proc.c 		struct lock_stat_data *iter = data->stats;
iter              602 kernel/locking/lockdep_proc.c 			iter->class = class;
iter              603 kernel/locking/lockdep_proc.c 			iter->stats = lock_stats(class);
iter              604 kernel/locking/lockdep_proc.c 			iter++;
iter              606 kernel/locking/lockdep_proc.c 		data->iter_end = iter;
iter             1726 kernel/rcu/rcutorture.c static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
iter             1730 kernel/rcu/rcutorture.c 		if (need_resched() || (iter & 0xfff))
iter              723 kernel/sched/psi.c static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
iter              728 kernel/sched/psi.c 	if (!*iter)
iter              730 kernel/sched/psi.c 	else if (*iter == &psi_system)
iter              733 kernel/sched/psi.c 		cgroup = cgroup_parent(*iter);
iter              736 kernel/sched/psi.c 		*iter = cgroup;
iter              740 kernel/sched/psi.c 	if (*iter)
iter              743 kernel/sched/psi.c 	*iter = &psi_system;
iter              752 kernel/sched/psi.c 	void *iter = NULL;
iter              780 kernel/sched/psi.c 	while ((group = iterate_groups(task, &iter))) {
iter              794 kernel/sched/psi.c 	void *iter = NULL;
iter              796 kernel/sched/psi.c 	while ((group = iterate_groups(task, &iter))) {
iter              470 kernel/sched/rt.c #define for_each_rt_rq(rt_rq, iter, rq)					\
iter              471 kernel/sched/rt.c 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
iter              472 kernel/sched/rt.c 		(iter = next_task_group(iter)) &&			\
iter              473 kernel/sched/rt.c 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
iter              577 kernel/sched/rt.c #define for_each_rt_rq(rt_rq, iter, rq) \
iter              578 kernel/sched/rt.c 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
iter              651 kernel/sched/rt.c 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
iter              654 kernel/sched/rt.c 		if (iter == rt_rq)
iter              657 kernel/sched/rt.c 		raw_spin_lock(&iter->rt_runtime_lock);
iter              663 kernel/sched/rt.c 		if (iter->rt_runtime == RUNTIME_INF)
iter              670 kernel/sched/rt.c 		diff = iter->rt_runtime - iter->rt_time;
iter              675 kernel/sched/rt.c 			iter->rt_runtime -= diff;
iter              678 kernel/sched/rt.c 				raw_spin_unlock(&iter->rt_runtime_lock);
iter              683 kernel/sched/rt.c 		raw_spin_unlock(&iter->rt_runtime_lock);
iter              694 kernel/sched/rt.c 	rt_rq_iter_t iter;
iter              700 kernel/sched/rt.c 	for_each_rt_rq(rt_rq, iter, rq) {
iter              728 kernel/sched/rt.c 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
iter              734 kernel/sched/rt.c 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
iter              737 kernel/sched/rt.c 			raw_spin_lock(&iter->rt_runtime_lock);
iter              739 kernel/sched/rt.c 				diff = min_t(s64, iter->rt_runtime, want);
iter              740 kernel/sched/rt.c 				iter->rt_runtime -= diff;
iter              743 kernel/sched/rt.c 				iter->rt_runtime -= want;
iter              746 kernel/sched/rt.c 			raw_spin_unlock(&iter->rt_runtime_lock);
iter              775 kernel/sched/rt.c 	rt_rq_iter_t iter;
iter              784 kernel/sched/rt.c 	for_each_rt_rq(rt_rq, iter, rq) {
iter             2715 kernel/sched/rt.c 	rt_rq_iter_t iter;
iter             2719 kernel/sched/rt.c 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
iter              306 kernel/time/timer_list.c 	struct timer_list_iter *iter = v;
iter              308 kernel/time/timer_list.c 	if (iter->cpu == -1 && !iter->second_pass)
iter              309 kernel/time/timer_list.c 		timer_list_header(m, iter->now);
iter              310 kernel/time/timer_list.c 	else if (!iter->second_pass)
iter              311 kernel/time/timer_list.c 		print_cpu(m, iter->cpu, iter->now);
iter              313 kernel/time/timer_list.c 	else if (iter->cpu == -1 && iter->second_pass)
iter              316 kernel/time/timer_list.c 		print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
iter              321 kernel/time/timer_list.c static void *move_iter(struct timer_list_iter *iter, loff_t offset)
iter              324 kernel/time/timer_list.c 		iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
iter              325 kernel/time/timer_list.c 		if (iter->cpu >= nr_cpu_ids) {
iter              327 kernel/time/timer_list.c 			if (!iter->second_pass) {
iter              328 kernel/time/timer_list.c 				iter->cpu = -1;
iter              329 kernel/time/timer_list.c 				iter->second_pass = true;
iter              337 kernel/time/timer_list.c 	return iter;
iter              342 kernel/time/timer_list.c 	struct timer_list_iter *iter = file->private;
iter              345 kernel/time/timer_list.c 		iter->now = ktime_to_ns(ktime_get());
iter              346 kernel/time/timer_list.c 	iter->cpu = -1;
iter              347 kernel/time/timer_list.c 	iter->second_pass = false;
iter              348 kernel/time/timer_list.c 	return move_iter(iter, *offset);
iter              353 kernel/time/timer_list.c 	struct timer_list_iter *iter = file->private;
iter              355 kernel/time/timer_list.c 	return move_iter(iter, 1);
iter             1277 kernel/trace/blktrace.c typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
iter             1280 kernel/trace/blktrace.c static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
iter             1284 kernel/trace/blktrace.c 	unsigned long long ts  = iter->ts;
iter             1287 kernel/trace/blktrace.c 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
iter             1291 kernel/trace/blktrace.c 	trace_seq_printf(&iter->seq,
iter             1293 kernel/trace/blktrace.c 			 MAJOR(t->device), MINOR(t->device), iter->cpu,
iter             1294 kernel/trace/blktrace.c 			 secs, nsec_rem, iter->ent->pid, act, rwbs);
iter             1297 kernel/trace/blktrace.c static void blk_log_action(struct trace_iterator *iter, const char *act,
iter             1301 kernel/trace/blktrace.c 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
iter             1305 kernel/trace/blktrace.c 		const union kernfs_node_id *id = cgid_start(iter->ent);
iter             1312 kernel/trace/blktrace.c 			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
iter             1316 kernel/trace/blktrace.c 			trace_seq_printf(&iter->seq,
iter             1321 kernel/trace/blktrace.c 		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
iter             1504 kernel/trace/blktrace.c static enum print_line_t print_one_line(struct trace_iterator *iter,
iter             1507 kernel/trace/blktrace.c 	struct trace_array *tr = iter->tr;
iter             1508 kernel/trace/blktrace.c 	struct trace_seq *s = &iter->seq;
iter             1515 kernel/trace/blktrace.c 	t	   = te_blk_io_trace(iter->ent);
iter             1522 kernel/trace/blktrace.c 		log_action(iter, long_act ? "message" : "m", has_cg);
iter             1523 kernel/trace/blktrace.c 		blk_log_msg(s, iter->ent, has_cg);
iter             1530 kernel/trace/blktrace.c 		log_action(iter, what2act[what].act[long_act], has_cg);
iter             1531 kernel/trace/blktrace.c 		what2act[what].print(s, iter->ent, has_cg);
iter             1537 kernel/trace/blktrace.c static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
iter             1540 kernel/trace/blktrace.c 	return print_one_line(iter, false);
iter             1543 kernel/trace/blktrace.c static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
iter             1545 kernel/trace/blktrace.c 	struct trace_seq *s = &iter->seq;
iter             1546 kernel/trace/blktrace.c 	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
iter             1550 kernel/trace/blktrace.c 		.time     = iter->ts,
iter             1559 kernel/trace/blktrace.c blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
iter             1562 kernel/trace/blktrace.c 	blk_trace_synthesize_old_trace(iter);
iter             1564 kernel/trace/blktrace.c 	return trace_handle_return(&iter->seq);
iter             1567 kernel/trace/blktrace.c static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
iter             1572 kernel/trace/blktrace.c 	return print_one_line(iter, true);
iter             2445 kernel/trace/ftrace.c 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
iter             2447 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
iter             2448 kernel/trace/ftrace.c 	iter->index = 0;
iter             2451 kernel/trace/ftrace.c 	while (iter->pg && !iter->pg->index)
iter             2452 kernel/trace/ftrace.c 		iter->pg = iter->pg->next;
iter             2454 kernel/trace/ftrace.c 	if (!iter->pg)
iter             2457 kernel/trace/ftrace.c 	return iter;
iter             2466 kernel/trace/ftrace.c struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
iter             2468 kernel/trace/ftrace.c 	iter->index++;
iter             2470 kernel/trace/ftrace.c 	if (iter->index >= iter->pg->index) {
iter             2471 kernel/trace/ftrace.c 		iter->pg = iter->pg->next;
iter             2472 kernel/trace/ftrace.c 		iter->index = 0;
iter             2475 kernel/trace/ftrace.c 		while (iter->pg && !iter->pg->index)
iter             2476 kernel/trace/ftrace.c 			iter->pg = iter->pg->next;
iter             2479 kernel/trace/ftrace.c 	if (!iter->pg)
iter             2482 kernel/trace/ftrace.c 	return iter;
iter             2491 kernel/trace/ftrace.c struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
iter             2493 kernel/trace/ftrace.c 	return &iter->pg->records[iter->index];
iter             3071 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3072 kernel/trace/ftrace.c 	struct trace_array *tr = iter->ops->private;
iter             3081 kernel/trace/ftrace.c 	iter->pos = *pos;
iter             3090 kernel/trace/ftrace.c 	if (!iter->probe) {
iter             3092 kernel/trace/ftrace.c 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
iter             3095 kernel/trace/ftrace.c 	if (iter->probe_entry)
iter             3096 kernel/trace/ftrace.c 		hnd = &iter->probe_entry->hlist;
iter             3098 kernel/trace/ftrace.c 	hash = iter->probe->ops.func_hash->filter_hash;
iter             3110 kernel/trace/ftrace.c 	if (iter->pidx >= size) {
iter             3111 kernel/trace/ftrace.c 		if (iter->probe->list.next == func_probes)
iter             3113 kernel/trace/ftrace.c 		next = iter->probe->list.next;
iter             3114 kernel/trace/ftrace.c 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
iter             3115 kernel/trace/ftrace.c 		hash = iter->probe->ops.func_hash->filter_hash;
iter             3117 kernel/trace/ftrace.c 		iter->pidx = 0;
iter             3120 kernel/trace/ftrace.c 	hhd = &hash->buckets[iter->pidx];
iter             3123 kernel/trace/ftrace.c 		iter->pidx++;
iter             3133 kernel/trace/ftrace.c 			iter->pidx++;
iter             3141 kernel/trace/ftrace.c 	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
iter             3143 kernel/trace/ftrace.c 	return iter;
iter             3148 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3152 kernel/trace/ftrace.c 	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
iter             3155 kernel/trace/ftrace.c 	if (iter->mod_pos > *pos)
iter             3158 kernel/trace/ftrace.c 	iter->probe = NULL;
iter             3159 kernel/trace/ftrace.c 	iter->probe_entry = NULL;
iter             3160 kernel/trace/ftrace.c 	iter->pidx = 0;
iter             3161 kernel/trace/ftrace.c 	for (l = 0; l <= (*pos - iter->mod_pos); ) {
iter             3170 kernel/trace/ftrace.c 	iter->flags |= FTRACE_ITER_PROBE;
iter             3172 kernel/trace/ftrace.c 	return iter;
iter             3176 kernel/trace/ftrace.c t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
iter             3182 kernel/trace/ftrace.c 	probe = iter->probe;
iter             3183 kernel/trace/ftrace.c 	probe_entry = iter->probe_entry;
iter             3202 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3203 kernel/trace/ftrace.c 	struct trace_array *tr = iter->tr;
iter             3206 kernel/trace/ftrace.c 	iter->pos = *pos;
iter             3208 kernel/trace/ftrace.c 	iter->mod_list = iter->mod_list->next;
iter             3210 kernel/trace/ftrace.c 	if (iter->mod_list == &tr->mod_trace ||
iter             3211 kernel/trace/ftrace.c 	    iter->mod_list == &tr->mod_notrace) {
iter             3212 kernel/trace/ftrace.c 		iter->flags &= ~FTRACE_ITER_MOD;
iter             3216 kernel/trace/ftrace.c 	iter->mod_pos = *pos;
iter             3218 kernel/trace/ftrace.c 	return iter;
iter             3223 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3227 kernel/trace/ftrace.c 	if (iter->func_pos > *pos)
iter             3230 kernel/trace/ftrace.c 	iter->mod_pos = iter->func_pos;
iter             3233 kernel/trace/ftrace.c 	if (!iter->tr)
iter             3236 kernel/trace/ftrace.c 	for (l = 0; l <= (*pos - iter->func_pos); ) {
iter             3242 kernel/trace/ftrace.c 		iter->flags &= ~FTRACE_ITER_MOD;
iter             3247 kernel/trace/ftrace.c 	iter->flags |= FTRACE_ITER_MOD;
iter             3249 kernel/trace/ftrace.c 	return iter;
iter             3253 kernel/trace/ftrace.c t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
iter             3256 kernel/trace/ftrace.c 	struct trace_array *tr = iter->tr;
iter             3258 kernel/trace/ftrace.c 	if (WARN_ON_ONCE(!iter->mod_list) ||
iter             3259 kernel/trace/ftrace.c 			 iter->mod_list == &tr->mod_trace ||
iter             3260 kernel/trace/ftrace.c 			 iter->mod_list == &tr->mod_notrace)
iter             3263 kernel/trace/ftrace.c 	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
iter             3278 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3284 kernel/trace/ftrace.c 	if (iter->idx >= iter->pg->index) {
iter             3285 kernel/trace/ftrace.c 		if (iter->pg->next) {
iter             3286 kernel/trace/ftrace.c 			iter->pg = iter->pg->next;
iter             3287 kernel/trace/ftrace.c 			iter->idx = 0;
iter             3291 kernel/trace/ftrace.c 		rec = &iter->pg->records[iter->idx++];
iter             3292 kernel/trace/ftrace.c 		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
iter             3293 kernel/trace/ftrace.c 		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
iter             3295 kernel/trace/ftrace.c 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
iter             3306 kernel/trace/ftrace.c 	iter->pos = iter->func_pos = *pos;
iter             3307 kernel/trace/ftrace.c 	iter->func = rec;
iter             3309 kernel/trace/ftrace.c 	return iter;
iter             3315 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3322 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_PROBE)
iter             3325 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_MOD)
iter             3328 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_PRINTALL) {
iter             3342 kernel/trace/ftrace.c static void reset_iter_read(struct ftrace_iterator *iter)
iter             3344 kernel/trace/ftrace.c 	iter->pos = 0;
iter             3345 kernel/trace/ftrace.c 	iter->func_pos = 0;
iter             3346 kernel/trace/ftrace.c 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
iter             3351 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3363 kernel/trace/ftrace.c 	if (*pos < iter->pos)
iter             3364 kernel/trace/ftrace.c 		reset_iter_read(iter);
iter             3371 kernel/trace/ftrace.c 	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
iter             3372 kernel/trace/ftrace.c 	    ftrace_hash_empty(iter->hash)) {
iter             3373 kernel/trace/ftrace.c 		iter->func_pos = 1; /* Account for the message */
iter             3376 kernel/trace/ftrace.c 		iter->flags |= FTRACE_ITER_PRINTALL;
iter             3378 kernel/trace/ftrace.c 		iter->flags &= ~FTRACE_ITER_PROBE;
iter             3379 kernel/trace/ftrace.c 		return iter;
iter             3382 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_MOD)
iter             3390 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
iter             3391 kernel/trace/ftrace.c 	iter->idx = 0;
iter             3401 kernel/trace/ftrace.c 	return iter;
iter             3427 kernel/trace/ftrace.c 	struct ftrace_iterator *iter = m->private;
iter             3430 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_PROBE)
iter             3431 kernel/trace/ftrace.c 		return t_probe_show(m, iter);
iter             3433 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_MOD)
iter             3434 kernel/trace/ftrace.c 		return t_mod_show(m, iter);
iter             3436 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_PRINTALL) {
iter             3437 kernel/trace/ftrace.c 		if (iter->flags & FTRACE_ITER_NOTRACE)
iter             3444 kernel/trace/ftrace.c 	rec = iter->func;
iter             3450 kernel/trace/ftrace.c 	if (iter->flags & FTRACE_ITER_ENABLED) {
iter             3489 kernel/trace/ftrace.c 	struct ftrace_iterator *iter;
iter             3499 kernel/trace/ftrace.c 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
iter             3500 kernel/trace/ftrace.c 	if (!iter)
iter             3503 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
iter             3504 kernel/trace/ftrace.c 	iter->ops = &global_ops;
iter             3512 kernel/trace/ftrace.c 	struct ftrace_iterator *iter;
iter             3523 kernel/trace/ftrace.c 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
iter             3524 kernel/trace/ftrace.c 	if (!iter)
iter             3527 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
iter             3528 kernel/trace/ftrace.c 	iter->flags = FTRACE_ITER_ENABLED;
iter             3529 kernel/trace/ftrace.c 	iter->ops = &global_ops;
iter             3554 kernel/trace/ftrace.c 	struct ftrace_iterator *iter;
iter             3568 kernel/trace/ftrace.c 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
iter             3569 kernel/trace/ftrace.c 	if (!iter)
iter             3572 kernel/trace/ftrace.c 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
iter             3575 kernel/trace/ftrace.c 	iter->ops = ops;
iter             3576 kernel/trace/ftrace.c 	iter->flags = flag;
iter             3577 kernel/trace/ftrace.c 	iter->tr = tr;
iter             3589 kernel/trace/ftrace.c 	iter->mod_list = mod_head;
iter             3595 kernel/trace/ftrace.c 			iter->hash = alloc_ftrace_hash(size_bits);
iter             3598 kernel/trace/ftrace.c 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
iter             3601 kernel/trace/ftrace.c 		if (!iter->hash) {
iter             3602 kernel/trace/ftrace.c 			trace_parser_put(&iter->parser);
iter             3606 kernel/trace/ftrace.c 		iter->hash = hash;
iter             3611 kernel/trace/ftrace.c 		iter->pg = ftrace_pages_start;
iter             3616 kernel/trace/ftrace.c 			m->private = iter;
iter             3619 kernel/trace/ftrace.c 			free_ftrace_hash(iter->hash);
iter             3620 kernel/trace/ftrace.c 			trace_parser_put(&iter->parser);
iter             3623 kernel/trace/ftrace.c 		file->private_data = iter;
iter             3630 kernel/trace/ftrace.c 		kfree(iter);
iter             4638 kernel/trace/ftrace.c static int ftrace_process_regex(struct ftrace_iterator *iter,
iter             4641 kernel/trace/ftrace.c 	struct ftrace_hash *hash = iter->hash;
iter             4642 kernel/trace/ftrace.c 	struct trace_array *tr = iter->ops->private;
iter             4679 kernel/trace/ftrace.c 	struct ftrace_iterator *iter;
iter             4688 kernel/trace/ftrace.c 		iter = m->private;
iter             4690 kernel/trace/ftrace.c 		iter = file->private_data;
iter             4697 kernel/trace/ftrace.c 	parser = &iter->parser;
iter             4702 kernel/trace/ftrace.c 		ret = ftrace_process_regex(iter, parser->buffer,
iter             5022 kernel/trace/ftrace.c 	struct ftrace_iterator *iter;
iter             5029 kernel/trace/ftrace.c 		iter = m->private;
iter             5032 kernel/trace/ftrace.c 		iter = file->private_data;
iter             5034 kernel/trace/ftrace.c 	parser = &iter->parser;
iter             5036 kernel/trace/ftrace.c 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
iter             5041 kernel/trace/ftrace.c 	mutex_lock(&iter->ops->func_hash->regex_lock);
iter             5044 kernel/trace/ftrace.c 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
iter             5047 kernel/trace/ftrace.c 			orig_hash = &iter->ops->func_hash->filter_hash;
iter             5048 kernel/trace/ftrace.c 			if (iter->tr && !list_empty(&iter->tr->mod_trace))
iter             5049 kernel/trace/ftrace.c 				iter->hash->flags |= FTRACE_HASH_FL_MOD;
iter             5051 kernel/trace/ftrace.c 			orig_hash = &iter->ops->func_hash->notrace_hash;
iter             5054 kernel/trace/ftrace.c 		ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
iter             5055 kernel/trace/ftrace.c 						      iter->hash, filter_hash);
iter             5059 kernel/trace/ftrace.c 		iter->hash = NULL;
iter             5062 kernel/trace/ftrace.c 	mutex_unlock(&iter->ops->func_hash->regex_lock);
iter             5063 kernel/trace/ftrace.c 	free_ftrace_hash(iter->hash);
iter             5064 kernel/trace/ftrace.c 	if (iter->tr)
iter             5065 kernel/trace/ftrace.c 		trace_array_put(iter->tr);
iter             5066 kernel/trace/ftrace.c 	kfree(iter);
iter             1918 kernel/trace/ring_buffer.c rb_iter_head_event(struct ring_buffer_iter *iter)
iter             1920 kernel/trace/ring_buffer.c 	return __rb_page_index(iter->head_page, iter->head);
iter             1948 kernel/trace/ring_buffer.c static void rb_inc_iter(struct ring_buffer_iter *iter)
iter             1950 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
iter             1958 kernel/trace/ring_buffer.c 	if (iter->head_page == cpu_buffer->reader_page)
iter             1959 kernel/trace/ring_buffer.c 		iter->head_page = rb_set_head_page(cpu_buffer);
iter             1961 kernel/trace/ring_buffer.c 		rb_inc_page(cpu_buffer, &iter->head_page);
iter             1963 kernel/trace/ring_buffer.c 	iter->read_stamp = iter->head_page->page->time_stamp;
iter             1964 kernel/trace/ring_buffer.c 	iter->head = 0;
iter             3544 kernel/trace/ring_buffer.c static void rb_iter_reset(struct ring_buffer_iter *iter)
iter             3546 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
iter             3549 kernel/trace/ring_buffer.c 	iter->head_page = cpu_buffer->reader_page;
iter             3550 kernel/trace/ring_buffer.c 	iter->head = cpu_buffer->reader_page->read;
iter             3552 kernel/trace/ring_buffer.c 	iter->cache_reader_page = iter->head_page;
iter             3553 kernel/trace/ring_buffer.c 	iter->cache_read = cpu_buffer->read;
iter             3555 kernel/trace/ring_buffer.c 	if (iter->head)
iter             3556 kernel/trace/ring_buffer.c 		iter->read_stamp = cpu_buffer->read_stamp;
iter             3558 kernel/trace/ring_buffer.c 		iter->read_stamp = iter->head_page->page->time_stamp;
iter             3568 kernel/trace/ring_buffer.c void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
iter             3573 kernel/trace/ring_buffer.c 	if (!iter)
iter             3576 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
iter             3579 kernel/trace/ring_buffer.c 	rb_iter_reset(iter);
iter             3588 kernel/trace/ring_buffer.c int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
iter             3596 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
iter             3604 kernel/trace/ring_buffer.c 	return ((iter->head_page == commit_page && iter->head == commit) ||
iter             3605 kernel/trace/ring_buffer.c 		(iter->head_page == reader && commit_page == head_page &&
iter             3607 kernel/trace/ring_buffer.c 		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
iter             3642 kernel/trace/ring_buffer.c rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
iter             3653 kernel/trace/ring_buffer.c 		iter->read_stamp += delta;
iter             3658 kernel/trace/ring_buffer.c 		iter->read_stamp = delta;
iter             3662 kernel/trace/ring_buffer.c 		iter->read_stamp += event->time_delta;
iter             3829 kernel/trace/ring_buffer.c static void rb_advance_iter(struct ring_buffer_iter *iter)
iter             3835 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
iter             3840 kernel/trace/ring_buffer.c 	if (iter->head >= rb_page_size(iter->head_page)) {
iter             3842 kernel/trace/ring_buffer.c 		if (iter->head_page == cpu_buffer->commit_page)
iter             3844 kernel/trace/ring_buffer.c 		rb_inc_iter(iter);
iter             3848 kernel/trace/ring_buffer.c 	event = rb_iter_head_event(iter);
iter             3857 kernel/trace/ring_buffer.c 		       (iter->head_page == cpu_buffer->commit_page) &&
iter             3858 kernel/trace/ring_buffer.c 		       (iter->head + length > rb_commit_index(cpu_buffer))))
iter             3861 kernel/trace/ring_buffer.c 	rb_update_iter_read_stamp(iter, event);
iter             3863 kernel/trace/ring_buffer.c 	iter->head += length;
iter             3866 kernel/trace/ring_buffer.c 	if ((iter->head >= rb_page_size(iter->head_page)) &&
iter             3867 kernel/trace/ring_buffer.c 	    (iter->head_page != cpu_buffer->commit_page))
iter             3868 kernel/trace/ring_buffer.c 		rb_inc_iter(iter);
iter             3950 kernel/trace/ring_buffer.c rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
iter             3960 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
iter             3968 kernel/trace/ring_buffer.c 	if (unlikely(iter->cache_read != cpu_buffer->read ||
iter             3969 kernel/trace/ring_buffer.c 		     iter->cache_reader_page != cpu_buffer->reader_page))
iter             3970 kernel/trace/ring_buffer.c 		rb_iter_reset(iter);
iter             3973 kernel/trace/ring_buffer.c 	if (ring_buffer_iter_empty(iter))
iter             3990 kernel/trace/ring_buffer.c 	if (iter->head >= rb_page_size(iter->head_page)) {
iter             3991 kernel/trace/ring_buffer.c 		rb_inc_iter(iter);
iter             3995 kernel/trace/ring_buffer.c 	event = rb_iter_head_event(iter);
iter             4000 kernel/trace/ring_buffer.c 			rb_inc_iter(iter);
iter             4003 kernel/trace/ring_buffer.c 		rb_advance_iter(iter);
iter             4008 kernel/trace/ring_buffer.c 		rb_advance_iter(iter);
iter             4018 kernel/trace/ring_buffer.c 		rb_advance_iter(iter);
iter             4023 kernel/trace/ring_buffer.c 			*ts = iter->read_stamp + event->time_delta;
iter             4115 kernel/trace/ring_buffer.c ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
iter             4117 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
iter             4123 kernel/trace/ring_buffer.c 	event = rb_iter_peek(iter, ts);
iter             4207 kernel/trace/ring_buffer.c 	struct ring_buffer_iter *iter;
iter             4212 kernel/trace/ring_buffer.c 	iter = kmalloc(sizeof(*iter), flags);
iter             4213 kernel/trace/ring_buffer.c 	if (!iter)
iter             4218 kernel/trace/ring_buffer.c 	iter->cpu_buffer = cpu_buffer;
iter             4223 kernel/trace/ring_buffer.c 	return iter;
iter             4253 kernel/trace/ring_buffer.c ring_buffer_read_start(struct ring_buffer_iter *iter)
iter             4258 kernel/trace/ring_buffer.c 	if (!iter)
iter             4261 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
iter             4265 kernel/trace/ring_buffer.c 	rb_iter_reset(iter);
iter             4279 kernel/trace/ring_buffer.c ring_buffer_read_finish(struct ring_buffer_iter *iter)
iter             4281 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
iter             4296 kernel/trace/ring_buffer.c 	kfree(iter);
iter             4308 kernel/trace/ring_buffer.c ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
iter             4311 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
iter             4316 kernel/trace/ring_buffer.c 	event = rb_iter_peek(iter, ts);
iter             4323 kernel/trace/ring_buffer.c 	rb_advance_iter(iter);
iter             1631 kernel/trace/trace.c static int wait_on_pipe(struct trace_iterator *iter, int full)
iter             1634 kernel/trace/trace.c 	if (trace_buffer_iter(iter, iter->cpu_file))
iter             1637 kernel/trace/trace.c 	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
iter             2533 kernel/trace/trace.c 	struct trace_iterator *iter = tracepoint_print_iter;
iter             2536 kernel/trace/trace.c 	if (WARN_ON_ONCE(!iter))
iter             2547 kernel/trace/trace.c 	trace_seq_init(&iter->seq);
iter             2548 kernel/trace/trace.c 	iter->ent = fbuffer->entry;
iter             2549 kernel/trace/trace.c 	event_call->event.funcs->trace(iter, 0, event);
iter             2550 kernel/trace/trace.c 	trace_seq_putc(&iter->seq, 0);
iter             2551 kernel/trace/trace.c 	printk("%s", iter->seq.buffer);
iter             3266 kernel/trace/trace.c static void trace_iterator_increment(struct trace_iterator *iter)
iter             3268 kernel/trace/trace.c 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
iter             3270 kernel/trace/trace.c 	iter->idx++;
iter             3276 kernel/trace/trace.c peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
iter             3280 kernel/trace/trace.c 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
iter             3285 kernel/trace/trace.c 		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
iter             3289 kernel/trace/trace.c 		iter->ent_size = ring_buffer_event_length(event);
iter             3292 kernel/trace/trace.c 	iter->ent_size = 0;
iter             3297 kernel/trace/trace.c __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
iter             3300 kernel/trace/trace.c 	struct ring_buffer *buffer = iter->trace_buffer->buffer;
iter             3303 kernel/trace/trace.c 	int cpu_file = iter->cpu_file;
iter             3316 kernel/trace/trace.c 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
iter             3328 kernel/trace/trace.c 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
iter             3338 kernel/trace/trace.c 			next_size = iter->ent_size;
iter             3342 kernel/trace/trace.c 	iter->ent_size = next_size;
iter             3357 kernel/trace/trace.c struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
iter             3360 kernel/trace/trace.c 	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
iter             3364 kernel/trace/trace.c void *trace_find_next_entry_inc(struct trace_iterator *iter)
iter             3366 kernel/trace/trace.c 	iter->ent = __find_next_entry(iter, &iter->cpu,
iter             3367 kernel/trace/trace.c 				      &iter->lost_events, &iter->ts);
iter             3369 kernel/trace/trace.c 	if (iter->ent)
iter             3370 kernel/trace/trace.c 		trace_iterator_increment(iter);
iter             3372 kernel/trace/trace.c 	return iter->ent ? iter : NULL;
iter             3375 kernel/trace/trace.c static void trace_consume(struct trace_iterator *iter)
iter             3377 kernel/trace/trace.c 	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
iter             3378 kernel/trace/trace.c 			    &iter->lost_events);
iter             3383 kernel/trace/trace.c 	struct trace_iterator *iter = m->private;
iter             3387 kernel/trace/trace.c 	WARN_ON_ONCE(iter->leftover);
iter             3392 kernel/trace/trace.c 	if (iter->idx > i)
iter             3395 kernel/trace/trace.c 	if (iter->idx < 0)
iter             3396 kernel/trace/trace.c 		ent = trace_find_next_entry_inc(iter);
iter             3398 kernel/trace/trace.c 		ent = iter;
iter             3400 kernel/trace/trace.c 	while (ent && iter->idx < i)
iter             3401 kernel/trace/trace.c 		ent = trace_find_next_entry_inc(iter);
iter             3403 kernel/trace/trace.c 	iter->pos = *pos;
iter             3408 kernel/trace/trace.c void tracing_iter_reset(struct trace_iterator *iter, int cpu)
iter             3415 kernel/trace/trace.c 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
iter             3417 kernel/trace/trace.c 	buf_iter = trace_buffer_iter(iter, cpu);
iter             3429 kernel/trace/trace.c 		if (ts >= iter->trace_buffer->time_start)
iter             3435 kernel/trace/trace.c 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
iter             3444 kernel/trace/trace.c 	struct trace_iterator *iter = m->private;
iter             3445 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3446 kernel/trace/trace.c 	int cpu_file = iter->cpu_file;
iter             3458 kernel/trace/trace.c 	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
iter             3459 kernel/trace/trace.c 		*iter->trace = *tr->current_trace;
iter             3463 kernel/trace/trace.c 	if (iter->snapshot && iter->trace->use_max_tr)
iter             3467 kernel/trace/trace.c 	if (!iter->snapshot)
iter             3470 kernel/trace/trace.c 	if (*pos != iter->pos) {
iter             3471 kernel/trace/trace.c 		iter->ent = NULL;
iter             3472 kernel/trace/trace.c 		iter->cpu = 0;
iter             3473 kernel/trace/trace.c 		iter->idx = -1;
iter             3477 kernel/trace/trace.c 				tracing_iter_reset(iter, cpu);
iter             3479 kernel/trace/trace.c 			tracing_iter_reset(iter, cpu_file);
iter             3481 kernel/trace/trace.c 		iter->leftover = 0;
iter             3482 kernel/trace/trace.c 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
iter             3490 kernel/trace/trace.c 		if (iter->leftover)
iter             3491 kernel/trace/trace.c 			p = iter;
iter             3505 kernel/trace/trace.c 	struct trace_iterator *iter = m->private;
iter             3508 kernel/trace/trace.c 	if (iter->snapshot && iter->trace->use_max_tr)
iter             3512 kernel/trace/trace.c 	if (!iter->snapshot)
iter             3515 kernel/trace/trace.c 	trace_access_unlock(iter->cpu_file);
iter             3635 kernel/trace/trace.c print_trace_header(struct seq_file *m, struct trace_iterator *iter)
iter             3638 kernel/trace/trace.c 	struct trace_buffer *buf = iter->trace_buffer;
iter             3640 kernel/trace/trace.c 	struct tracer *type = iter->trace;
iter             3685 kernel/trace/trace.c 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
iter             3686 kernel/trace/trace.c 		trace_print_seq(m, &iter->seq);
iter             3688 kernel/trace/trace.c 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
iter             3689 kernel/trace/trace.c 		trace_print_seq(m, &iter->seq);
iter             3696 kernel/trace/trace.c static void test_cpu_buff_start(struct trace_iterator *iter)
iter             3698 kernel/trace/trace.c 	struct trace_seq *s = &iter->seq;
iter             3699 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3704 kernel/trace/trace.c 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
iter             3707 kernel/trace/trace.c 	if (cpumask_available(iter->started) &&
iter             3708 kernel/trace/trace.c 	    cpumask_test_cpu(iter->cpu, iter->started))
iter             3711 kernel/trace/trace.c 	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
iter             3714 kernel/trace/trace.c 	if (cpumask_available(iter->started))
iter             3715 kernel/trace/trace.c 		cpumask_set_cpu(iter->cpu, iter->started);
iter             3718 kernel/trace/trace.c 	if (iter->idx > 1)
iter             3720 kernel/trace/trace.c 				iter->cpu);
iter             3723 kernel/trace/trace.c static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
iter             3725 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3726 kernel/trace/trace.c 	struct trace_seq *s = &iter->seq;
iter             3731 kernel/trace/trace.c 	entry = iter->ent;
iter             3733 kernel/trace/trace.c 	test_cpu_buff_start(iter);
iter             3738 kernel/trace/trace.c 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
iter             3739 kernel/trace/trace.c 			trace_print_lat_context(iter);
iter             3741 kernel/trace/trace.c 			trace_print_context(iter);
iter             3748 kernel/trace/trace.c 		return event->funcs->trace(iter, sym_flags, event);
iter             3755 kernel/trace/trace.c static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
iter             3757 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3758 kernel/trace/trace.c 	struct trace_seq *s = &iter->seq;
iter             3762 kernel/trace/trace.c 	entry = iter->ent;
iter             3766 kernel/trace/trace.c 				 entry->pid, iter->cpu, iter->ts);
iter             3773 kernel/trace/trace.c 		return event->funcs->raw(iter, 0, event);
iter             3780 kernel/trace/trace.c static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
iter             3782 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3783 kernel/trace/trace.c 	struct trace_seq *s = &iter->seq;
iter             3788 kernel/trace/trace.c 	entry = iter->ent;
iter             3792 kernel/trace/trace.c 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
iter             3793 kernel/trace/trace.c 		SEQ_PUT_HEX_FIELD(s, iter->ts);
iter             3800 kernel/trace/trace.c 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
iter             3810 kernel/trace/trace.c static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
iter             3812 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3813 kernel/trace/trace.c 	struct trace_seq *s = &iter->seq;
iter             3817 kernel/trace/trace.c 	entry = iter->ent;
iter             3821 kernel/trace/trace.c 		SEQ_PUT_FIELD(s, iter->cpu);
iter             3822 kernel/trace/trace.c 		SEQ_PUT_FIELD(s, iter->ts);
iter             3828 kernel/trace/trace.c 	return event ? event->funcs->binary(iter, 0, event) :
iter             3832 kernel/trace/trace.c int trace_empty(struct trace_iterator *iter)
iter             3838 kernel/trace/trace.c 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
iter             3839 kernel/trace/trace.c 		cpu = iter->cpu_file;
iter             3840 kernel/trace/trace.c 		buf_iter = trace_buffer_iter(iter, cpu);
iter             3845 kernel/trace/trace.c 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
iter             3852 kernel/trace/trace.c 		buf_iter = trace_buffer_iter(iter, cpu);
iter             3857 kernel/trace/trace.c 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
iter             3866 kernel/trace/trace.c enum print_line_t print_trace_line(struct trace_iterator *iter)
iter             3868 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3872 kernel/trace/trace.c 	if (iter->lost_events) {
iter             3873 kernel/trace/trace.c 		trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
iter             3874 kernel/trace/trace.c 				 iter->cpu, iter->lost_events);
iter             3875 kernel/trace/trace.c 		if (trace_seq_has_overflowed(&iter->seq))
iter             3879 kernel/trace/trace.c 	if (iter->trace && iter->trace->print_line) {
iter             3880 kernel/trace/trace.c 		ret = iter->trace->print_line(iter);
iter             3885 kernel/trace/trace.c 	if (iter->ent->type == TRACE_BPUTS &&
iter             3888 kernel/trace/trace.c 		return trace_print_bputs_msg_only(iter);
iter             3890 kernel/trace/trace.c 	if (iter->ent->type == TRACE_BPRINT &&
iter             3893 kernel/trace/trace.c 		return trace_print_bprintk_msg_only(iter);
iter             3895 kernel/trace/trace.c 	if (iter->ent->type == TRACE_PRINT &&
iter             3898 kernel/trace/trace.c 		return trace_print_printk_msg_only(iter);
iter             3901 kernel/trace/trace.c 		return print_bin_fmt(iter);
iter             3904 kernel/trace/trace.c 		return print_hex_fmt(iter);
iter             3907 kernel/trace/trace.c 		return print_raw_fmt(iter);
iter             3909 kernel/trace/trace.c 	return print_trace_fmt(iter);
iter             3914 kernel/trace/trace.c 	struct trace_iterator *iter = m->private;
iter             3915 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3918 kernel/trace/trace.c 	if (trace_empty(iter))
iter             3921 kernel/trace/trace.c 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
iter             3922 kernel/trace/trace.c 		print_trace_header(m, iter);
iter             3930 kernel/trace/trace.c 	struct trace_iterator *iter = m->private;
iter             3931 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             3937 kernel/trace/trace.c 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
iter             3939 kernel/trace/trace.c 		if (trace_empty(iter))
iter             3941 kernel/trace/trace.c 		print_trace_header(m, iter);
iter             3947 kernel/trace/trace.c 				print_func_help_header_irq(iter->trace_buffer,
iter             3950 kernel/trace/trace.c 				print_func_help_header(iter->trace_buffer, m,
iter             3990 kernel/trace/trace.c static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
iter             3992 kernel/trace/trace.c 	if (iter->tr->allocated_snapshot)
iter             3998 kernel/trace/trace.c 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
iter             4005 kernel/trace/trace.c static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
iter             4010 kernel/trace/trace.c 	struct trace_iterator *iter = v;
iter             4013 kernel/trace/trace.c 	if (iter->ent == NULL) {
iter             4014 kernel/trace/trace.c 		if (iter->tr) {
iter             4015 kernel/trace/trace.c 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
iter             4019 kernel/trace/trace.c 		if (iter->snapshot && trace_empty(iter))
iter             4020 kernel/trace/trace.c 			print_snapshot_help(m, iter);
iter             4021 kernel/trace/trace.c 		else if (iter->trace && iter->trace->print_header)
iter             4022 kernel/trace/trace.c 			iter->trace->print_header(m);
iter             4026 kernel/trace/trace.c 	} else if (iter->leftover) {
iter             4031 kernel/trace/trace.c 		ret = trace_print_seq(m, &iter->seq);
iter             4034 kernel/trace/trace.c 		iter->leftover = ret;
iter             4037 kernel/trace/trace.c 		print_trace_line(iter);
iter             4038 kernel/trace/trace.c 		ret = trace_print_seq(m, &iter->seq);
iter             4046 kernel/trace/trace.c 		iter->leftover = ret;
iter             4074 kernel/trace/trace.c 	struct trace_iterator *iter;
iter             4080 kernel/trace/trace.c 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
iter             4081 kernel/trace/trace.c 	if (!iter)
iter             4084 kernel/trace/trace.c 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
iter             4086 kernel/trace/trace.c 	if (!iter->buffer_iter)
iter             4094 kernel/trace/trace.c 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
iter             4095 kernel/trace/trace.c 	if (!iter->trace)
iter             4098 kernel/trace/trace.c 	*iter->trace = *tr->current_trace;
iter             4100 kernel/trace/trace.c 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
iter             4103 kernel/trace/trace.c 	iter->tr = tr;
iter             4108 kernel/trace/trace.c 		iter->trace_buffer = &tr->max_buffer;
iter             4111 kernel/trace/trace.c 		iter->trace_buffer = &tr->trace_buffer;
iter             4112 kernel/trace/trace.c 	iter->snapshot = snapshot;
iter             4113 kernel/trace/trace.c 	iter->pos = -1;
iter             4114 kernel/trace/trace.c 	iter->cpu_file = tracing_get_cpu(inode);
iter             4115 kernel/trace/trace.c 	mutex_init(&iter->mutex);
iter             4118 kernel/trace/trace.c 	if (iter->trace && iter->trace->open)
iter             4119 kernel/trace/trace.c 		iter->trace->open(iter);
iter             4122 kernel/trace/trace.c 	if (ring_buffer_overruns(iter->trace_buffer->buffer))
iter             4123 kernel/trace/trace.c 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
iter             4127 kernel/trace/trace.c 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter             4130 kernel/trace/trace.c 	if (!iter->snapshot)
iter             4133 kernel/trace/trace.c 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
iter             4135 kernel/trace/trace.c 			iter->buffer_iter[cpu] =
iter             4136 kernel/trace/trace.c 				ring_buffer_read_prepare(iter->trace_buffer->buffer,
iter             4141 kernel/trace/trace.c 			ring_buffer_read_start(iter->buffer_iter[cpu]);
iter             4142 kernel/trace/trace.c 			tracing_iter_reset(iter, cpu);
iter             4145 kernel/trace/trace.c 		cpu = iter->cpu_file;
iter             4146 kernel/trace/trace.c 		iter->buffer_iter[cpu] =
iter             4147 kernel/trace/trace.c 			ring_buffer_read_prepare(iter->trace_buffer->buffer,
iter             4150 kernel/trace/trace.c 		ring_buffer_read_start(iter->buffer_iter[cpu]);
iter             4151 kernel/trace/trace.c 		tracing_iter_reset(iter, cpu);
iter             4156 kernel/trace/trace.c 	return iter;
iter             4160 kernel/trace/trace.c 	kfree(iter->trace);
iter             4161 kernel/trace/trace.c 	kfree(iter->buffer_iter);
iter             4206 kernel/trace/trace.c 	struct trace_iterator *iter;
iter             4215 kernel/trace/trace.c 	iter = m->private;
iter             4219 kernel/trace/trace.c 		if (iter->buffer_iter[cpu])
iter             4220 kernel/trace/trace.c 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
iter             4223 kernel/trace/trace.c 	if (iter->trace && iter->trace->close)
iter             4224 kernel/trace/trace.c 		iter->trace->close(iter);
iter             4226 kernel/trace/trace.c 	if (!iter->snapshot)
iter             4234 kernel/trace/trace.c 	mutex_destroy(&iter->mutex);
iter             4235 kernel/trace/trace.c 	free_cpumask_var(iter->started);
iter             4236 kernel/trace/trace.c 	kfree(iter->trace);
iter             4237 kernel/trace/trace.c 	kfree(iter->buffer_iter);
iter             4263 kernel/trace/trace.c 	struct trace_iterator *iter;
iter             4287 kernel/trace/trace.c 		iter = __tracing_open(inode, file, false);
iter             4288 kernel/trace/trace.c 		if (IS_ERR(iter))
iter             4289 kernel/trace/trace.c 			ret = PTR_ERR(iter);
iter             4291 kernel/trace/trace.c 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
iter             5859 kernel/trace/trace.c 	struct trace_iterator *iter;
iter             5869 kernel/trace/trace.c 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
iter             5870 kernel/trace/trace.c 	if (!iter) {
iter             5876 kernel/trace/trace.c 	trace_seq_init(&iter->seq);
iter             5877 kernel/trace/trace.c 	iter->trace = tr->current_trace;
iter             5879 kernel/trace/trace.c 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
iter             5885 kernel/trace/trace.c 	cpumask_setall(iter->started);
iter             5888 kernel/trace/trace.c 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
iter             5892 kernel/trace/trace.c 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter             5894 kernel/trace/trace.c 	iter->tr = tr;
iter             5895 kernel/trace/trace.c 	iter->trace_buffer = &tr->trace_buffer;
iter             5896 kernel/trace/trace.c 	iter->cpu_file = tracing_get_cpu(inode);
iter             5897 kernel/trace/trace.c 	mutex_init(&iter->mutex);
iter             5898 kernel/trace/trace.c 	filp->private_data = iter;
iter             5900 kernel/trace/trace.c 	if (iter->trace->pipe_open)
iter             5901 kernel/trace/trace.c 		iter->trace->pipe_open(iter);
iter             5911 kernel/trace/trace.c 	kfree(iter);
iter             5919 kernel/trace/trace.c 	struct trace_iterator *iter = file->private_data;
iter             5926 kernel/trace/trace.c 	if (iter->trace->pipe_close)
iter             5927 kernel/trace/trace.c 		iter->trace->pipe_close(iter);
iter             5931 kernel/trace/trace.c 	free_cpumask_var(iter->started);
iter             5932 kernel/trace/trace.c 	mutex_destroy(&iter->mutex);
iter             5933 kernel/trace/trace.c 	kfree(iter);
iter             5941 kernel/trace/trace.c trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
iter             5943 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             5946 kernel/trace/trace.c 	if (trace_buffer_iter(iter, iter->cpu_file))
iter             5955 kernel/trace/trace.c 		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
iter             5962 kernel/trace/trace.c 	struct trace_iterator *iter = filp->private_data;
iter             5964 kernel/trace/trace.c 	return trace_poll(iter, filp, poll_table);
iter             5970 kernel/trace/trace.c 	struct trace_iterator *iter = filp->private_data;
iter             5973 kernel/trace/trace.c 	while (trace_empty(iter)) {
iter             5988 kernel/trace/trace.c 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
iter             5991 kernel/trace/trace.c 		mutex_unlock(&iter->mutex);
iter             5993 kernel/trace/trace.c 		ret = wait_on_pipe(iter, 0);
iter             5995 kernel/trace/trace.c 		mutex_lock(&iter->mutex);
iter             6011 kernel/trace/trace.c 	struct trace_iterator *iter = filp->private_data;
iter             6019 kernel/trace/trace.c 	mutex_lock(&iter->mutex);
iter             6022 kernel/trace/trace.c 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
iter             6026 kernel/trace/trace.c 	trace_seq_init(&iter->seq);
iter             6028 kernel/trace/trace.c 	if (iter->trace->read) {
iter             6029 kernel/trace/trace.c 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
iter             6040 kernel/trace/trace.c 	if (trace_empty(iter)) {
iter             6049 kernel/trace/trace.c 	memset(&iter->seq, 0,
iter             6052 kernel/trace/trace.c 	cpumask_clear(iter->started);
iter             6053 kernel/trace/trace.c 	trace_seq_init(&iter->seq);
iter             6054 kernel/trace/trace.c 	iter->pos = -1;
iter             6057 kernel/trace/trace.c 	trace_access_lock(iter->cpu_file);
iter             6058 kernel/trace/trace.c 	while (trace_find_next_entry_inc(iter) != NULL) {
iter             6060 kernel/trace/trace.c 		int save_len = iter->seq.seq.len;
iter             6062 kernel/trace/trace.c 		ret = print_trace_line(iter);
iter             6065 kernel/trace/trace.c 			iter->seq.seq.len = save_len;
iter             6069 kernel/trace/trace.c 			trace_consume(iter);
iter             6071 kernel/trace/trace.c 		if (trace_seq_used(&iter->seq) >= cnt)
iter             6079 kernel/trace/trace.c 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
iter             6080 kernel/trace/trace.c 			  iter->ent->type);
iter             6082 kernel/trace/trace.c 	trace_access_unlock(iter->cpu_file);
iter             6086 kernel/trace/trace.c 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
iter             6087 kernel/trace/trace.c 	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
iter             6088 kernel/trace/trace.c 		trace_seq_init(&iter->seq);
iter             6098 kernel/trace/trace.c 	mutex_unlock(&iter->mutex);
iter             6117 kernel/trace/trace.c tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
iter             6125 kernel/trace/trace.c 		save_len = iter->seq.seq.len;
iter             6126 kernel/trace/trace.c 		ret = print_trace_line(iter);
iter             6128 kernel/trace/trace.c 		if (trace_seq_has_overflowed(&iter->seq)) {
iter             6129 kernel/trace/trace.c 			iter->seq.seq.len = save_len;
iter             6139 kernel/trace/trace.c 			iter->seq.seq.len = save_len;
iter             6143 kernel/trace/trace.c 		count = trace_seq_used(&iter->seq) - save_len;
iter             6146 kernel/trace/trace.c 			iter->seq.seq.len = save_len;
iter             6151 kernel/trace/trace.c 			trace_consume(iter);
iter             6153 kernel/trace/trace.c 		if (!trace_find_next_entry_inc(iter))	{
iter             6155 kernel/trace/trace.c 			iter->ent = NULL;
iter             6171 kernel/trace/trace.c 	struct trace_iterator *iter = filp->private_data;
iter             6187 kernel/trace/trace.c 	mutex_lock(&iter->mutex);
iter             6189 kernel/trace/trace.c 	if (iter->trace->splice_read) {
iter             6190 kernel/trace/trace.c 		ret = iter->trace->splice_read(iter, filp,
iter             6200 kernel/trace/trace.c 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
iter             6206 kernel/trace/trace.c 	trace_access_lock(iter->cpu_file);
iter             6214 kernel/trace/trace.c 		rem = tracing_fill_pipe_page(rem, iter);
iter             6217 kernel/trace/trace.c 		ret = trace_seq_to_buffer(&iter->seq,
iter             6219 kernel/trace/trace.c 					  trace_seq_used(&iter->seq));
iter             6225 kernel/trace/trace.c 		spd.partial[i].len = trace_seq_used(&iter->seq);
iter             6227 kernel/trace/trace.c 		trace_seq_init(&iter->seq);
iter             6230 kernel/trace/trace.c 	trace_access_unlock(iter->cpu_file);
iter             6232 kernel/trace/trace.c 	mutex_unlock(&iter->mutex);
iter             6245 kernel/trace/trace.c 	mutex_unlock(&iter->mutex);
iter             6677 kernel/trace/trace.c 	struct trace_iterator	iter;
iter             6687 kernel/trace/trace.c 	struct trace_iterator *iter;
iter             6696 kernel/trace/trace.c 		iter = __tracing_open(inode, file, true);
iter             6697 kernel/trace/trace.c 		if (IS_ERR(iter))
iter             6698 kernel/trace/trace.c 			ret = PTR_ERR(iter);
iter             6705 kernel/trace/trace.c 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
iter             6706 kernel/trace/trace.c 		if (!iter) {
iter             6712 kernel/trace/trace.c 		iter->tr = tr;
iter             6713 kernel/trace/trace.c 		iter->trace_buffer = &tr->max_buffer;
iter             6714 kernel/trace/trace.c 		iter->cpu_file = tracing_get_cpu(inode);
iter             6715 kernel/trace/trace.c 		m->private = iter;
iter             6730 kernel/trace/trace.c 	struct trace_iterator *iter = m->private;
iter             6731 kernel/trace/trace.c 	struct trace_array *tr = iter->tr;
iter             6759 kernel/trace/trace.c 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
iter             6769 kernel/trace/trace.c 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
iter             6776 kernel/trace/trace.c 					&tr->trace_buffer, iter->cpu_file);
iter             6783 kernel/trace/trace.c 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
iter             6786 kernel/trace/trace.c 			update_max_tr_single(tr, current, iter->cpu_file);
iter             6791 kernel/trace/trace.c 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
iter             6794 kernel/trace/trace.c 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
iter             6845 kernel/trace/trace.c 	if (info->iter.trace->use_max_tr) {
iter             6850 kernel/trace/trace.c 	info->iter.snapshot = true;
iter             6851 kernel/trace/trace.c 	info->iter.trace_buffer = &info->iter.tr->max_buffer;
iter             7223 kernel/trace/trace.c 	info->iter.tr		= tr;
iter             7224 kernel/trace/trace.c 	info->iter.cpu_file	= tracing_get_cpu(inode);
iter             7225 kernel/trace/trace.c 	info->iter.trace	= tr->current_trace;
iter             7226 kernel/trace/trace.c 	info->iter.trace_buffer = &tr->trace_buffer;
iter             7248 kernel/trace/trace.c 	struct trace_iterator *iter = &info->iter;
iter             7250 kernel/trace/trace.c 	return trace_poll(iter, filp, poll_table);
iter             7258 kernel/trace/trace.c 	struct trace_iterator *iter = &info->iter;
iter             7266 kernel/trace/trace.c 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
iter             7271 kernel/trace/trace.c 		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
iter             7272 kernel/trace/trace.c 							  iter->cpu_file);
iter             7277 kernel/trace/trace.c 			info->spare_cpu = iter->cpu_file;
iter             7288 kernel/trace/trace.c 	trace_access_lock(iter->cpu_file);
iter             7289 kernel/trace/trace.c 	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
iter             7292 kernel/trace/trace.c 				    iter->cpu_file, 0);
iter             7293 kernel/trace/trace.c 	trace_access_unlock(iter->cpu_file);
iter             7296 kernel/trace/trace.c 		if (trace_empty(iter)) {
iter             7300 kernel/trace/trace.c 			ret = wait_on_pipe(iter, 0);
iter             7330 kernel/trace/trace.c 	struct trace_iterator *iter = &info->iter;
iter             7334 kernel/trace/trace.c 	iter->tr->current_trace->ref--;
iter             7336 kernel/trace/trace.c 	__trace_array_put(iter->tr);
iter             7339 kernel/trace/trace.c 		ring_buffer_free_read_page(iter->trace_buffer->buffer,
iter             7411 kernel/trace/trace.c 	struct trace_iterator *iter = &info->iter;
iter             7426 kernel/trace/trace.c 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
iter             7443 kernel/trace/trace.c 	trace_access_lock(iter->cpu_file);
iter             7444 kernel/trace/trace.c 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
iter             7457 kernel/trace/trace.c 		ref->buffer = iter->trace_buffer->buffer;
iter             7458 kernel/trace/trace.c 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
iter             7465 kernel/trace/trace.c 		ref->cpu = iter->cpu_file;
iter             7468 kernel/trace/trace.c 					  len, iter->cpu_file, 1);
iter             7485 kernel/trace/trace.c 		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
iter             7488 kernel/trace/trace.c 	trace_access_unlock(iter->cpu_file);
iter             7500 kernel/trace/trace.c 		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
iter             8899 kernel/trace/trace.c void trace_init_global_iter(struct trace_iterator *iter)
iter             8901 kernel/trace/trace.c 	iter->tr = &global_trace;
iter             8902 kernel/trace/trace.c 	iter->trace = iter->tr->current_trace;
iter             8903 kernel/trace/trace.c 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
iter             8904 kernel/trace/trace.c 	iter->trace_buffer = &global_trace.trace_buffer;
iter             8906 kernel/trace/trace.c 	if (iter->trace && iter->trace->open)
iter             8907 kernel/trace/trace.c 		iter->trace->open(iter);
iter             8910 kernel/trace/trace.c 	if (ring_buffer_overruns(iter->trace_buffer->buffer))
iter             8911 kernel/trace/trace.c 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
iter             8914 kernel/trace/trace.c 	if (trace_clocks[iter->tr->clock_id].in_ns)
iter             8915 kernel/trace/trace.c 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter             8921 kernel/trace/trace.c 	static struct trace_iterator iter;
iter             8948 kernel/trace/trace.c 	trace_init_global_iter(&iter);
iter             8951 kernel/trace/trace.c 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
iter             8961 kernel/trace/trace.c 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
iter             8964 kernel/trace/trace.c 		iter.cpu_file = raw_smp_processor_id();
iter             8970 kernel/trace/trace.c 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
iter             8988 kernel/trace/trace.c 	while (!trace_empty(&iter)) {
iter             8995 kernel/trace/trace.c 		trace_iterator_reset(&iter);
iter             8996 kernel/trace/trace.c 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter             8998 kernel/trace/trace.c 		if (trace_find_next_entry_inc(&iter) != NULL) {
iter             9001 kernel/trace/trace.c 			ret = print_trace_line(&iter);
iter             9003 kernel/trace/trace.c 				trace_consume(&iter);
iter             9007 kernel/trace/trace.c 		trace_printk_seq(&iter.seq);
iter             9019 kernel/trace/trace.c 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
iter              473 kernel/trace/trace.h 	void			(*open)(struct trace_iterator *iter);
iter              474 kernel/trace/trace.h 	void			(*pipe_open)(struct trace_iterator *iter);
iter              475 kernel/trace/trace.h 	void			(*close)(struct trace_iterator *iter);
iter              476 kernel/trace/trace.h 	void			(*pipe_close)(struct trace_iterator *iter);
iter              477 kernel/trace/trace.h 	ssize_t			(*read)(struct trace_iterator *iter,
iter              480 kernel/trace/trace.h 	ssize_t			(*splice_read)(struct trace_iterator *iter,
iter              491 kernel/trace/trace.h 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
iter              674 kernel/trace/trace.h trace_buffer_iter(struct trace_iterator *iter, int cpu)
iter              676 kernel/trace/trace.h 	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
iter              710 kernel/trace/trace.h struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
iter              716 kernel/trace/trace.h int trace_empty(struct trace_iterator *iter);
iter              718 kernel/trace/trace.h void *trace_find_next_entry_inc(struct trace_iterator *iter);
iter              720 kernel/trace/trace.h void trace_init_global_iter(struct trace_iterator *iter);
iter              722 kernel/trace/trace.h void tracing_iter_reset(struct trace_iterator *iter, int cpu);
iter              737 kernel/trace/trace.h void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
iter              738 kernel/trace/trace.h int trace_empty(struct trace_iterator *iter);
iter              861 kernel/trace/trace.h enum print_line_t print_trace_line(struct trace_iterator *iter);
iter              921 kernel/trace/trace.h print_graph_function_flags(struct trace_iterator *iter, u32 flags);
iter              925 kernel/trace/trace.h extern void graph_trace_open(struct trace_iterator *iter);
iter              926 kernel/trace/trace.h extern void graph_trace_close(struct trace_iterator *iter);
iter             1041 kernel/trace/trace.h print_graph_function_flags(struct trace_iterator *iter, u32 flags)
iter             1994 kernel/trace/trace.h static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
iter             2002 kernel/trace/trace.h 	memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
iter             2004 kernel/trace/trace.h 	iter->pos = -1;
iter              140 kernel/trace/trace_branch.c static enum print_line_t trace_branch_print(struct trace_iterator *iter,
iter              145 kernel/trace/trace_branch.c 	trace_assign_type(field, iter->ent);
iter              147 kernel/trace/trace_branch.c 	trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
iter              153 kernel/trace/trace_branch.c 	return trace_handle_return(&iter->seq);
iter             3081 kernel/trace/trace_events.c 	struct trace_event_call **iter, *call;
iter             3087 kernel/trace/trace_events.c 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
iter             3089 kernel/trace/trace_events.c 		call = *iter;
iter              837 kernel/trace/trace_events_hist.c static enum print_line_t print_synth_event(struct trace_iterator *iter,
iter              841 kernel/trace/trace_events_hist.c 	struct trace_array *tr = iter->tr;
iter              842 kernel/trace/trace_events_hist.c 	struct trace_seq *s = &iter->seq;
iter              849 kernel/trace/trace_events_hist.c 	entry = (struct synth_trace_event *)iter->ent;
iter              420 kernel/trace/trace_functions_graph.c get_return_for_leaf(struct trace_iterator *iter,
iter              423 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              437 kernel/trace/trace_functions_graph.c 		ring_iter = trace_buffer_iter(iter, iter->cpu);
iter              447 kernel/trace/trace_functions_graph.c 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
iter              449 kernel/trace/trace_functions_graph.c 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
iter              502 kernel/trace/trace_functions_graph.c print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
iter              506 kernel/trace/trace_functions_graph.c 	usecs = iter->ts - iter->trace_buffer->time_start;
iter              513 kernel/trace/trace_functions_graph.c print_graph_irq(struct trace_iterator *iter, unsigned long addr,
iter              516 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter              517 kernel/trace/trace_functions_graph.c 	struct trace_seq *s = &iter->seq;
iter              518 kernel/trace/trace_functions_graph.c 	struct trace_entry *ent = iter->ent;
iter              527 kernel/trace/trace_functions_graph.c 			print_graph_abs_time(iter->ts, s);
iter              531 kernel/trace/trace_functions_graph.c 			print_graph_rel_time(iter, s);
iter              626 kernel/trace/trace_functions_graph.c print_graph_entry_leaf(struct trace_iterator *iter,
iter              631 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              632 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter              636 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter              670 kernel/trace/trace_functions_graph.c 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
iter              671 kernel/trace/trace_functions_graph.c 			cpu, iter->ent->pid, flags);
iter              677 kernel/trace/trace_functions_graph.c print_graph_entry_nested(struct trace_iterator *iter,
iter              682 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              683 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter              688 kernel/trace/trace_functions_graph.c 		int cpu = iter->cpu;
iter              719 kernel/trace/trace_functions_graph.c print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
iter              722 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              723 kernel/trace/trace_functions_graph.c 	struct trace_entry *ent = iter->ent;
iter              724 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter              725 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter              732 kernel/trace/trace_functions_graph.c 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
iter              739 kernel/trace/trace_functions_graph.c 		print_graph_abs_time(iter->ts, s);
iter              743 kernel/trace/trace_functions_graph.c 		print_graph_rel_time(iter, s);
iter              774 kernel/trace/trace_functions_graph.c check_irq_entry(struct trace_iterator *iter, u32 flags,
iter              777 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter              779 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              821 kernel/trace/trace_functions_graph.c check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
iter              823 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter              825 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              866 kernel/trace/trace_functions_graph.c 			struct trace_iterator *iter, u32 flags)
iter              868 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              872 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter              874 kernel/trace/trace_functions_graph.c 	if (check_irq_entry(iter, flags, call->func, call->depth))
iter              877 kernel/trace/trace_functions_graph.c 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
iter              879 kernel/trace/trace_functions_graph.c 	leaf_ret = get_return_for_leaf(iter, field);
iter              881 kernel/trace/trace_functions_graph.c 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
iter              883 kernel/trace/trace_functions_graph.c 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
iter              902 kernel/trace/trace_functions_graph.c 		   struct trace_entry *ent, struct trace_iterator *iter,
iter              906 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              907 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter              909 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter              913 kernel/trace/trace_functions_graph.c 	if (check_irq_return(iter, flags, trace->depth))
iter              918 kernel/trace/trace_functions_graph.c 		int cpu = iter->cpu;
iter              937 kernel/trace/trace_functions_graph.c 	print_graph_prologue(iter, s, 0, 0, flags);
iter              963 kernel/trace/trace_functions_graph.c 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
iter              971 kernel/trace/trace_functions_graph.c 		    struct trace_iterator *iter, u32 flags)
iter              973 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter              975 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              982 kernel/trace/trace_functions_graph.c 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
iter              984 kernel/trace/trace_functions_graph.c 	print_graph_prologue(iter, s, 0, 0, flags);
iter              997 kernel/trace/trace_functions_graph.c 	switch (iter->ent->type) {
iter              999 kernel/trace/trace_functions_graph.c 		ret = trace_print_bputs_msg_only(iter);
iter             1004 kernel/trace/trace_functions_graph.c 		ret = trace_print_bprintk_msg_only(iter);
iter             1009 kernel/trace/trace_functions_graph.c 		ret = trace_print_printk_msg_only(iter);
iter             1018 kernel/trace/trace_functions_graph.c 		ret = event->funcs->trace(iter, sym_flags, event);
iter             1039 kernel/trace/trace_functions_graph.c print_graph_function_flags(struct trace_iterator *iter, u32 flags)
iter             1042 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter             1043 kernel/trace/trace_functions_graph.c 	struct trace_entry *entry = iter->ent;
iter             1044 kernel/trace/trace_functions_graph.c 	struct trace_seq *s = &iter->seq;
iter             1045 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
iter             1059 kernel/trace/trace_functions_graph.c 		iter->cpu = data->cpu;
iter             1060 kernel/trace/trace_functions_graph.c 		ret = print_graph_entry(field, s, iter, flags);
iter             1061 kernel/trace/trace_functions_graph.c 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
iter             1062 kernel/trace/trace_functions_graph.c 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
iter             1065 kernel/trace/trace_functions_graph.c 		iter->cpu = cpu;
iter             1080 kernel/trace/trace_functions_graph.c 		return print_graph_entry(&saved, s, iter, flags);
iter             1085 kernel/trace/trace_functions_graph.c 		return print_graph_return(&field->ret, s, entry, iter, flags);
iter             1093 kernel/trace/trace_functions_graph.c 		return print_graph_comment(s, entry, iter, flags);
iter             1100 kernel/trace/trace_functions_graph.c print_graph_function(struct trace_iterator *iter)
iter             1102 kernel/trace/trace_functions_graph.c 	return print_graph_function_flags(iter, tracer_flags.val);
iter             1106 kernel/trace/trace_functions_graph.c print_graph_function_event(struct trace_iterator *iter, int flags,
iter             1109 kernel/trace/trace_functions_graph.c 	return print_graph_function(iter);
iter             1183 kernel/trace/trace_functions_graph.c 	struct trace_iterator *iter = s->private;
iter             1184 kernel/trace/trace_functions_graph.c 	struct trace_array *tr = iter->tr;
iter             1191 kernel/trace/trace_functions_graph.c 		if (trace_empty(iter))
iter             1194 kernel/trace/trace_functions_graph.c 		print_trace_header(s, iter);
iter             1200 kernel/trace/trace_functions_graph.c void graph_trace_open(struct trace_iterator *iter)
iter             1207 kernel/trace/trace_functions_graph.c 	iter->private = NULL;
iter             1232 kernel/trace/trace_functions_graph.c 	iter->private = data;
iter             1242 kernel/trace/trace_functions_graph.c void graph_trace_close(struct trace_iterator *iter)
iter             1244 kernel/trace/trace_functions_graph.c 	struct fgraph_data *data = iter->private;
iter              227 kernel/trace/trace_irqsoff.c static void irqsoff_trace_open(struct trace_iterator *iter)
iter              229 kernel/trace/trace_irqsoff.c 	if (is_graph(iter->tr))
iter              230 kernel/trace/trace_irqsoff.c 		graph_trace_open(iter);
iter              234 kernel/trace/trace_irqsoff.c static void irqsoff_trace_close(struct trace_iterator *iter)
iter              236 kernel/trace/trace_irqsoff.c 	if (iter->private)
iter              237 kernel/trace/trace_irqsoff.c 		graph_trace_close(iter);
iter              245 kernel/trace/trace_irqsoff.c static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
iter              251 kernel/trace/trace_irqsoff.c 	if (is_graph(iter->tr))
iter              252 kernel/trace/trace_irqsoff.c 		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
iter              281 kernel/trace/trace_irqsoff.c static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
iter              286 kernel/trace/trace_irqsoff.c static void irqsoff_trace_open(struct trace_iterator *iter) { }
iter              287 kernel/trace/trace_irqsoff.c static void irqsoff_trace_close(struct trace_iterator *iter) { }
iter               20 kernel/trace/trace_kdb.c static struct trace_iterator iter;
iter               29 kernel/trace/trace_kdb.c 	tr = iter.tr;
iter               40 kernel/trace/trace_kdb.c 	trace_iterator_reset(&iter);
iter               41 kernel/trace/trace_kdb.c 	iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter               45 kernel/trace/trace_kdb.c 			iter.buffer_iter[cpu] =
iter               46 kernel/trace/trace_kdb.c 			ring_buffer_read_prepare(iter.trace_buffer->buffer,
iter               48 kernel/trace/trace_kdb.c 			ring_buffer_read_start(iter.buffer_iter[cpu]);
iter               49 kernel/trace/trace_kdb.c 			tracing_iter_reset(&iter, cpu);
iter               52 kernel/trace/trace_kdb.c 		iter.cpu_file = cpu_file;
iter               53 kernel/trace/trace_kdb.c 		iter.buffer_iter[cpu_file] =
iter               54 kernel/trace/trace_kdb.c 			ring_buffer_read_prepare(iter.trace_buffer->buffer,
iter               56 kernel/trace/trace_kdb.c 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
iter               57 kernel/trace/trace_kdb.c 		tracing_iter_reset(&iter, cpu_file);
iter               60 kernel/trace/trace_kdb.c 	while (trace_find_next_entry_inc(&iter)) {
iter               66 kernel/trace/trace_kdb.c 			print_trace_line(&iter);
iter               67 kernel/trace/trace_kdb.c 			trace_printk_seq(&iter.seq);
iter               85 kernel/trace/trace_kdb.c 		if (iter.buffer_iter[cpu]) {
iter               86 kernel/trace/trace_kdb.c 			ring_buffer_read_finish(iter.buffer_iter[cpu]);
iter               87 kernel/trace/trace_kdb.c 			iter.buffer_iter[cpu] = NULL;
iter              123 kernel/trace/trace_kdb.c 	trace_init_global_iter(&iter);
iter              124 kernel/trace/trace_kdb.c 	iter.buffer_iter = buffer_iter;
iter              127 kernel/trace/trace_kdb.c 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
iter              142 kernel/trace/trace_kdb.c 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
iter             1273 kernel/trace/trace_kprobe.c print_kprobe_event(struct trace_iterator *iter, int flags,
iter             1277 kernel/trace/trace_kprobe.c 	struct trace_seq *s = &iter->seq;
iter             1280 kernel/trace/trace_kprobe.c 	field = (struct kprobe_trace_entry_head *)iter->ent;
iter             1303 kernel/trace/trace_kprobe.c print_kretprobe_event(struct trace_iterator *iter, int flags,
iter             1307 kernel/trace/trace_kprobe.c 	struct trace_seq *s = &iter->seq;
iter             1310 kernel/trace/trace_kprobe.c 	field = (struct kretprobe_trace_entry_head *)iter->ent;
iter               99 kernel/trace/trace_mmiotrace.c static void mmio_pipe_open(struct trace_iterator *iter)
iter              102 kernel/trace/trace_mmiotrace.c 	struct trace_seq *s = &iter->seq;
iter              111 kernel/trace/trace_mmiotrace.c 	iter->private = hiter;
iter              115 kernel/trace/trace_mmiotrace.c static void mmio_close(struct trace_iterator *iter)
iter              117 kernel/trace/trace_mmiotrace.c 	struct header_iter *hiter = iter->private;
iter              119 kernel/trace/trace_mmiotrace.c 	iter->private = NULL;
iter              122 kernel/trace/trace_mmiotrace.c static unsigned long count_overruns(struct trace_iterator *iter)
iter              125 kernel/trace/trace_mmiotrace.c 	unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
iter              133 kernel/trace/trace_mmiotrace.c static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
iter              137 kernel/trace/trace_mmiotrace.c 	struct header_iter *hiter = iter->private;
iter              138 kernel/trace/trace_mmiotrace.c 	struct trace_seq *s = &iter->seq;
iter              141 kernel/trace/trace_mmiotrace.c 	n = count_overruns(iter);
iter              159 kernel/trace/trace_mmiotrace.c 		iter->private = NULL;
iter              167 kernel/trace/trace_mmiotrace.c static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
iter              169 kernel/trace/trace_mmiotrace.c 	struct trace_entry *entry = iter->ent;
iter              172 kernel/trace/trace_mmiotrace.c 	struct trace_seq *s	= &iter->seq;
iter              173 kernel/trace/trace_mmiotrace.c 	unsigned long long t	= ns2usecs(iter->ts);
iter              212 kernel/trace/trace_mmiotrace.c static enum print_line_t mmio_print_map(struct trace_iterator *iter)
iter              214 kernel/trace/trace_mmiotrace.c 	struct trace_entry *entry = iter->ent;
iter              217 kernel/trace/trace_mmiotrace.c 	struct trace_seq *s	= &iter->seq;
iter              218 kernel/trace/trace_mmiotrace.c 	unsigned long long t	= ns2usecs(iter->ts);
iter              246 kernel/trace/trace_mmiotrace.c static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
iter              248 kernel/trace/trace_mmiotrace.c 	struct trace_entry *entry = iter->ent;
iter              251 kernel/trace/trace_mmiotrace.c 	struct trace_seq *s	= &iter->seq;
iter              252 kernel/trace/trace_mmiotrace.c 	unsigned long long t	= ns2usecs(iter->ts);
iter              262 kernel/trace/trace_mmiotrace.c static enum print_line_t mmio_print_line(struct trace_iterator *iter)
iter              264 kernel/trace/trace_mmiotrace.c 	switch (iter->ent->type) {
iter              266 kernel/trace/trace_mmiotrace.c 		return mmio_print_rw(iter);
iter              268 kernel/trace/trace_mmiotrace.c 		return mmio_print_map(iter);
iter              270 kernel/trace/trace_mmiotrace.c 		return mmio_print_mark(iter);
iter               25 kernel/trace/trace_output.c enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
iter               27 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter               28 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter               38 kernel/trace/trace_output.c enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
iter               40 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter               41 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter               51 kernel/trace/trace_output.c enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
iter               53 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter               54 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter              277 kernel/trace/trace_output.c int trace_raw_output_prep(struct trace_iterator *iter,
iter              281 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              282 kernel/trace/trace_output.c 	struct trace_seq *p = &iter->tmp_seq;
iter              286 kernel/trace/trace_output.c 	entry = iter->ent;
iter              300 kernel/trace/trace_output.c static int trace_output_raw(struct trace_iterator *iter, char *name,
iter              303 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              311 kernel/trace/trace_output.c int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
iter              317 kernel/trace/trace_output.c 	ret = trace_output_raw(iter, name, fmt, ap);
iter              521 kernel/trace/trace_output.c lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
iter              523 kernel/trace/trace_output.c 	struct trace_array *tr = iter->tr;
iter              525 kernel/trace/trace_output.c 	unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
iter              526 kernel/trace/trace_output.c 	unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
iter              527 kernel/trace/trace_output.c 	unsigned long long rel_ts = next_ts - iter->ts;
iter              528 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              543 kernel/trace/trace_output.c 			ns2usecs(iter->ts),
iter              550 kernel/trace/trace_output.c 			iter->ts, abs_ts, rel_ts);
iter              565 kernel/trace/trace_output.c int trace_print_context(struct trace_iterator *iter)
iter              567 kernel/trace/trace_output.c 	struct trace_array *tr = iter->tr;
iter              568 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              569 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter              587 kernel/trace/trace_output.c 	trace_seq_printf(s, "[%03d] ", iter->cpu);
iter              592 kernel/trace/trace_output.c 	if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
iter              593 kernel/trace/trace_output.c 		t = ns2usecs(iter->ts);
iter              598 kernel/trace/trace_output.c 		trace_seq_printf(s, " %12llu: ", iter->ts);
iter              603 kernel/trace/trace_output.c int trace_print_lat_context(struct trace_iterator *iter)
iter              605 kernel/trace/trace_output.c 	struct trace_array *tr = iter->tr;
iter              607 kernel/trace/trace_output.c 	int ent_size = iter->ent_size;
iter              608 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              610 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent,
iter              611 kernel/trace/trace_output.c 			   *next_entry = trace_find_next_entry(iter, NULL,
iter              616 kernel/trace/trace_output.c 	iter->ent_size = ent_size;
iter              619 kernel/trace/trace_output.c 		next_ts = iter->ts;
iter              628 kernel/trace/trace_output.c 			comm, entry->pid, iter->cpu, entry->flags,
iter              629 kernel/trace/trace_output.c 			entry->preempt_count, iter->idx);
iter              631 kernel/trace/trace_output.c 		lat_print_generic(s, entry, iter->cpu);
iter              634 kernel/trace/trace_output.c 	lat_print_timestamp(iter, next_ts);
iter              810 kernel/trace/trace_output.c enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
iter              813 kernel/trace/trace_output.c 	trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
iter              815 kernel/trace/trace_output.c 	return trace_handle_return(&iter->seq);
iter              819 kernel/trace/trace_output.c static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
iter              823 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              825 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              839 kernel/trace/trace_output.c static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
iter              844 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              846 kernel/trace/trace_output.c 	trace_seq_printf(&iter->seq, "%lx %lx\n",
iter              850 kernel/trace/trace_output.c 	return trace_handle_return(&iter->seq);
iter              853 kernel/trace/trace_output.c static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
iter              857 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              859 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              867 kernel/trace/trace_output.c static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
iter              871 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              873 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              894 kernel/trace/trace_output.c static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
iter              902 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              907 kernel/trace/trace_output.c 	trace_seq_printf(&iter->seq,
iter              917 kernel/trace/trace_output.c 	return trace_handle_return(&iter->seq);
iter              920 kernel/trace/trace_output.c static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
iter              923 kernel/trace/trace_output.c 	return trace_ctxwake_print(iter, "==>");
iter              926 kernel/trace/trace_output.c static enum print_line_t trace_wake_print(struct trace_iterator *iter,
iter              929 kernel/trace/trace_output.c 	return trace_ctxwake_print(iter, "  +");
iter              932 kernel/trace/trace_output.c static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
iter              937 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              942 kernel/trace/trace_output.c 	trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
iter              951 kernel/trace/trace_output.c 	return trace_handle_return(&iter->seq);
iter              954 kernel/trace/trace_output.c static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
iter              957 kernel/trace/trace_output.c 	return trace_ctxwake_raw(iter, 0);
iter              960 kernel/trace/trace_output.c static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
iter              963 kernel/trace/trace_output.c 	return trace_ctxwake_raw(iter, '+');
iter              967 kernel/trace/trace_output.c static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
iter              970 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter              973 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter              990 kernel/trace/trace_output.c static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
iter              993 kernel/trace/trace_output.c 	return trace_ctxwake_hex(iter, 0);
iter              996 kernel/trace/trace_output.c static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
iter              999 kernel/trace/trace_output.c 	return trace_ctxwake_hex(iter, '+');
iter             1002 kernel/trace/trace_output.c static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
iter             1006 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1008 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1047 kernel/trace/trace_output.c static enum print_line_t trace_stack_print(struct trace_iterator *iter,
iter             1051 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1055 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1056 kernel/trace/trace_output.c 	end = (unsigned long *)((long)iter->ent + iter->ent_size);
iter             1083 kernel/trace/trace_output.c static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
iter             1086 kernel/trace/trace_output.c 	struct trace_array *tr = iter->tr;
iter             1088 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1092 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1137 kernel/trace/trace_output.c trace_hwlat_print(struct trace_iterator *iter, int flags,
iter             1140 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter             1141 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1172 kernel/trace/trace_output.c trace_hwlat_raw(struct trace_iterator *iter, int flags,
iter             1176 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1178 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1202 kernel/trace/trace_output.c trace_bputs_print(struct trace_iterator *iter, int flags,
iter             1205 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter             1206 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1220 kernel/trace/trace_output.c trace_bputs_raw(struct trace_iterator *iter, int flags,
iter             1224 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1226 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1246 kernel/trace/trace_output.c trace_bprint_print(struct trace_iterator *iter, int flags,
iter             1249 kernel/trace/trace_output.c 	struct trace_entry *entry = iter->ent;
iter             1250 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1264 kernel/trace/trace_output.c trace_bprint_raw(struct trace_iterator *iter, int flags,
iter             1268 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1270 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1289 kernel/trace/trace_output.c static enum print_line_t trace_print_print(struct trace_iterator *iter,
iter             1293 kernel/trace/trace_output.c 	struct trace_seq *s = &iter->seq;
iter             1295 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1303 kernel/trace/trace_output.c static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
iter             1308 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1310 kernel/trace/trace_output.c 	trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
iter             1312 kernel/trace/trace_output.c 	return trace_handle_return(&iter->seq);
iter             1325 kernel/trace/trace_output.c static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
iter             1331 kernel/trace/trace_output.c 	trace_assign_type(field, iter->ent);
iter             1333 kernel/trace/trace_output.c 	trace_seq_printf(&iter->seq, "# %x buf:", field->id);
iter             1335 kernel/trace/trace_output.c 	for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
iter             1336 kernel/trace/trace_output.c 		trace_seq_printf(&iter->seq, " %02x",
iter             1339 kernel/trace/trace_output.c 	trace_seq_putc(&iter->seq, '\n');
iter             1341 kernel/trace/trace_output.c 	return trace_handle_return(&iter->seq);
iter                9 kernel/trace/trace_output.h trace_print_bputs_msg_only(struct trace_iterator *iter);
iter               11 kernel/trace/trace_output.h trace_print_bprintk_msg_only(struct trace_iterator *iter);
iter               13 kernel/trace/trace_output.h trace_print_printk_msg_only(struct trace_iterator *iter);
iter               19 kernel/trace/trace_output.h extern int trace_print_context(struct trace_iterator *iter);
iter               20 kernel/trace/trace_output.h extern int trace_print_lat_context(struct trace_iterator *iter);
iter               26 kernel/trace/trace_output.h extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
iter               55 kernel/trace/trace_printk.c 	const char **iter;
iter               63 kernel/trace/trace_printk.c 	for (iter = start; iter < end; iter++) {
iter               64 kernel/trace/trace_printk.c 		struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
iter               67 kernel/trace/trace_printk.c 				*iter = tb_fmt->fmt;
iter               74 kernel/trace/trace_printk.c 			fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
iter               77 kernel/trace/trace_printk.c 				strcpy(fmt, *iter);
iter               82 kernel/trace/trace_printk.c 		*iter = fmt;
iter              170 kernel/trace/trace_sched_wakeup.c static void wakeup_trace_open(struct trace_iterator *iter)
iter              172 kernel/trace/trace_sched_wakeup.c 	if (is_graph(iter->tr))
iter              173 kernel/trace/trace_sched_wakeup.c 		graph_trace_open(iter);
iter              176 kernel/trace/trace_sched_wakeup.c static void wakeup_trace_close(struct trace_iterator *iter)
iter              178 kernel/trace/trace_sched_wakeup.c 	if (iter->private)
iter              179 kernel/trace/trace_sched_wakeup.c 		graph_trace_close(iter);
iter              189 kernel/trace/trace_sched_wakeup.c static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
iter              195 kernel/trace/trace_sched_wakeup.c 	if (is_graph(iter->tr))
iter              196 kernel/trace/trace_sched_wakeup.c 		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
iter              289 kernel/trace/trace_sched_wakeup.c static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
iter              294 kernel/trace/trace_sched_wakeup.c static void wakeup_trace_open(struct trace_iterator *iter) { }
iter              295 kernel/trace/trace_sched_wakeup.c static void wakeup_trace_close(struct trace_iterator *iter) { }
iter              122 kernel/trace/trace_syscalls.c print_syscall_enter(struct trace_iterator *iter, int flags,
iter              125 kernel/trace/trace_syscalls.c 	struct trace_array *tr = iter->tr;
iter              126 kernel/trace/trace_syscalls.c 	struct trace_seq *s = &iter->seq;
iter              127 kernel/trace/trace_syscalls.c 	struct trace_entry *ent = iter->ent;
iter              169 kernel/trace/trace_syscalls.c print_syscall_exit(struct trace_iterator *iter, int flags,
iter              172 kernel/trace/trace_syscalls.c 	struct trace_seq *s = &iter->seq;
iter              173 kernel/trace/trace_syscalls.c 	struct trace_entry *ent = iter->ent;
iter             1001 kernel/trace/trace_uprobe.c print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
iter             1004 kernel/trace/trace_uprobe.c 	struct trace_seq *s = &iter->seq;
iter             1008 kernel/trace/trace_uprobe.c 	entry = (struct uprobe_trace_entry_head *)iter->ent;
iter              366 kernel/tracepoint.c 	tracepoint_ptr_t *iter;
iter              370 kernel/tracepoint.c 	for (iter = begin; iter < end; iter++)
iter              371 kernel/tracepoint.c 		fct(tracepoint_ptr_deref(iter), priv);
iter              192 kernel/ucount.c 	struct ucounts *ucounts, *iter, *bad;
iter              195 kernel/ucount.c 	for (iter = ucounts; iter; iter = tns->ucounts) {
iter              197 kernel/ucount.c 		tns = iter->ns;
iter              199 kernel/ucount.c 		if (!atomic_inc_below(&iter->ucount[type], max))
iter              204 kernel/ucount.c 	bad = iter;
iter              205 kernel/ucount.c 	for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
iter              206 kernel/ucount.c 		atomic_dec(&iter->ucount[type]);
iter              214 kernel/ucount.c 	struct ucounts *iter;
iter              215 kernel/ucount.c 	for (iter = ucounts; iter; iter = iter->ns->ucounts) {
iter              216 kernel/ucount.c 		int dec = atomic_dec_if_positive(&iter->ucount[type]);
iter              727 lib/dynamic_debug.c static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
iter              730 lib/dynamic_debug.c 		iter->table = NULL;
iter              731 lib/dynamic_debug.c 		iter->idx = 0;
iter              734 lib/dynamic_debug.c 	iter->table = list_entry(ddebug_tables.next,
iter              736 lib/dynamic_debug.c 	iter->idx = 0;
iter              737 lib/dynamic_debug.c 	return &iter->table->ddebugs[iter->idx];
iter              746 lib/dynamic_debug.c static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
iter              748 lib/dynamic_debug.c 	if (iter->table == NULL)
iter              750 lib/dynamic_debug.c 	if (++iter->idx == iter->table->num_ddebugs) {
iter              752 lib/dynamic_debug.c 		iter->idx = 0;
iter              753 lib/dynamic_debug.c 		if (list_is_last(&iter->table->link, &ddebug_tables)) {
iter              754 lib/dynamic_debug.c 			iter->table = NULL;
iter              757 lib/dynamic_debug.c 		iter->table = list_entry(iter->table->link.next,
iter              760 lib/dynamic_debug.c 	return &iter->table->ddebugs[iter->idx];
iter              770 lib/dynamic_debug.c 	struct ddebug_iter *iter = m->private;
iter              782 lib/dynamic_debug.c 	dp = ddebug_iter_first(iter);
iter              784 lib/dynamic_debug.c 		dp = ddebug_iter_next(iter);
iter              795 lib/dynamic_debug.c 	struct ddebug_iter *iter = m->private;
iter              802 lib/dynamic_debug.c 		dp = ddebug_iter_first(iter);
iter              804 lib/dynamic_debug.c 		dp = ddebug_iter_next(iter);
iter              817 lib/dynamic_debug.c 	struct ddebug_iter *iter = m->private;
iter              831 lib/dynamic_debug.c 		   iter->table->mod_name, dp->function,
iter             1009 lib/dynamic_debug.c 	struct _ddebug *iter, *iter_start;
iter             1020 lib/dynamic_debug.c 	iter = __start___verbose;
iter             1021 lib/dynamic_debug.c 	modname = iter->modname;
iter             1022 lib/dynamic_debug.c 	iter_start = iter;
iter             1023 lib/dynamic_debug.c 	for (; iter < __stop___verbose; iter++) {
iter             1025 lib/dynamic_debug.c 		verbose_bytes += strlen(iter->modname) + strlen(iter->function)
iter             1026 lib/dynamic_debug.c 			+ strlen(iter->filename) + strlen(iter->format);
iter             1028 lib/dynamic_debug.c 		if (strcmp(modname, iter->modname)) {
iter             1034 lib/dynamic_debug.c 			modname = iter->modname;
iter             1035 lib/dynamic_debug.c 			iter_start = iter;
iter               61 lib/error-inject.c 	struct error_injection_entry *iter;
iter               66 lib/error-inject.c 	for (iter = start; iter < end; iter++) {
iter               67 lib/error-inject.c 		entry = arch_deref_entry_point((void *)iter->addr);
iter               81 lib/error-inject.c 		ent->etype = iter->etype;
iter              162 lib/generic-radix-tree.c void *__genradix_iter_peek(struct genradix_iter *iter,
iter              177 lib/generic-radix-tree.c 	if (ilog2(iter->offset) >= genradix_depth_shift(level))
iter              183 lib/generic-radix-tree.c 		i = (iter->offset >> genradix_depth_shift(level)) &
iter              188 lib/generic-radix-tree.c 			iter->offset = round_down(iter->offset +
iter              191 lib/generic-radix-tree.c 			iter->pos = (iter->offset >> PAGE_SHIFT) *
iter              200 lib/generic-radix-tree.c 	return &n->data[iter->offset & (PAGE_SIZE - 1)];
iter               36 lib/idr.c      	struct radix_tree_iter iter;
iter               45 lib/idr.c      	radix_tree_iter_init(&iter, id);
iter               46 lib/idr.c      	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
iter               50 lib/idr.c      	*nextid = iter.index + base;
iter               52 lib/idr.c      	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
iter               53 lib/idr.c      	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
iter              198 lib/idr.c      	struct radix_tree_iter iter;
iter              202 lib/idr.c      	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
iter              204 lib/idr.c      		unsigned long id = iter.index + base;
iter              229 lib/idr.c      	struct radix_tree_iter iter;
iter              236 lib/idr.c      	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
iter              244 lib/idr.c      		slot = radix_tree_iter_retry(&iter);
iter              249 lib/idr.c      	*nextid = iter.index + base;
iter               50 lib/math/cordic.c 	unsigned iter;
iter               71 lib/math/cordic.c 	for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
iter               73 lib/math/cordic.c 			valtmp = coord.i - (coord.q >> iter);
iter               74 lib/math/cordic.c 			coord.q += (coord.i >> iter);
iter               75 lib/math/cordic.c 			angle += arctan_table[iter];
iter               77 lib/math/cordic.c 			valtmp = coord.i + (coord.q >> iter);
iter               78 lib/math/cordic.c 			coord.q -= (coord.i >> iter);
iter               79 lib/math/cordic.c 			angle -= arctan_table[iter];
iter               75 lib/plist.c    	struct plist_node *first, *iter, *prev = NULL;
iter               85 lib/plist.c    	first = iter = plist_first(head);
iter               88 lib/plist.c    		if (node->prio < iter->prio) {
iter               89 lib/plist.c    			node_next = &iter->node_list;
iter               93 lib/plist.c    		prev = iter;
iter               94 lib/plist.c    		iter = list_entry(iter->prio_list.next,
iter               96 lib/plist.c    	} while (iter != first);
iter               99 lib/plist.c    		list_add_tail(&node->prio_list, &iter->prio_list);
iter              147 lib/plist.c    	struct plist_node *iter;
iter              157 lib/plist.c    	iter = plist_next(node);
iter              159 lib/plist.c    	if (node->prio != iter->prio)
iter              164 lib/plist.c    	plist_for_each_continue(iter, head) {
iter              165 lib/plist.c    		if (node->prio != iter->prio) {
iter              166 lib/plist.c    			node_next = &iter->node_list;
iter              212 lib/radix-tree.c static unsigned int iter_offset(const struct radix_tree_iter *iter)
iter              214 lib/radix-tree.c 	return iter->index & RADIX_TREE_MAP_MASK;
iter              940 lib/radix-tree.c 				const struct radix_tree_iter *iter,
iter              943 lib/radix-tree.c 	__radix_tree_replace(root, iter->node, slot, item);
iter             1069 lib/radix-tree.c 			const struct radix_tree_iter *iter, unsigned int tag)
iter             1071 lib/radix-tree.c 	node_tag_clear(root, iter->node, tag, iter_offset(iter));
iter             1119 lib/radix-tree.c static void set_iter_tags(struct radix_tree_iter *iter,
iter             1127 lib/radix-tree.c 		iter->tags = 1;
iter             1131 lib/radix-tree.c 	iter->tags = node->tags[tag][tag_long] >> tag_bit;
iter             1137 lib/radix-tree.c 			iter->tags |= node->tags[tag][tag_long + 1] <<
iter             1140 lib/radix-tree.c 		iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
iter             1145 lib/radix-tree.c 					struct radix_tree_iter *iter)
iter             1148 lib/radix-tree.c 	iter->index = __radix_tree_iter_add(iter, 1);
iter             1149 lib/radix-tree.c 	iter->next_index = iter->index;
iter             1150 lib/radix-tree.c 	iter->tags = 0;
iter             1164 lib/radix-tree.c 			     struct radix_tree_iter *iter, unsigned flags)
iter             1182 lib/radix-tree.c 	index = iter->next_index;
iter             1183 lib/radix-tree.c 	if (!index && iter->index)
iter             1195 lib/radix-tree.c 		iter->index = index;
iter             1196 lib/radix-tree.c 		iter->next_index = maxindex + 1;
iter             1197 lib/radix-tree.c 		iter->tags = 1;
iter             1198 lib/radix-tree.c 		iter->node = NULL;
iter             1239 lib/radix-tree.c 	iter->index = (index &~ node_maxindex(node)) | offset;
iter             1240 lib/radix-tree.c 	iter->next_index = (index | node_maxindex(node)) + 1;
iter             1241 lib/radix-tree.c 	iter->node = node;
iter             1244 lib/radix-tree.c 		set_iter_tags(iter, node, offset, tag);
iter             1274 lib/radix-tree.c 	struct radix_tree_iter iter;
iter             1281 lib/radix-tree.c 	radix_tree_for_each_slot(slot, root, &iter, first_index) {
iter             1286 lib/radix-tree.c 			slot = radix_tree_iter_retry(&iter);
iter             1315 lib/radix-tree.c 	struct radix_tree_iter iter;
iter             1322 lib/radix-tree.c 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
iter             1327 lib/radix-tree.c 			slot = radix_tree_iter_retry(&iter);
iter             1356 lib/radix-tree.c 	struct radix_tree_iter iter;
iter             1363 lib/radix-tree.c 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
iter             1404 lib/radix-tree.c 				struct radix_tree_iter *iter, void __rcu **slot)
iter             1406 lib/radix-tree.c 	if (__radix_tree_delete(root, iter->node, slot))
iter             1407 lib/radix-tree.c 		iter->index = iter->next_index;
iter             1486 lib/radix-tree.c 			      struct radix_tree_iter *iter, gfp_t gfp,
iter             1491 lib/radix-tree.c 	unsigned long maxindex, start = iter->next_index;
iter             1546 lib/radix-tree.c 	iter->index = start;
iter             1548 lib/radix-tree.c 		iter->next_index = 1 + min(max, (start | node_maxindex(node)));
iter             1550 lib/radix-tree.c 		iter->next_index = 1;
iter             1551 lib/radix-tree.c 	iter->node = node;
iter             1552 lib/radix-tree.c 	set_iter_tags(iter, node, offset, IDR_FREE);
iter              658 lib/rhashtable.c void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
iter              660 lib/rhashtable.c 	iter->ht = ht;
iter              661 lib/rhashtable.c 	iter->p = NULL;
iter              662 lib/rhashtable.c 	iter->slot = 0;
iter              663 lib/rhashtable.c 	iter->skip = 0;
iter              664 lib/rhashtable.c 	iter->end_of_table = 0;
iter              667 lib/rhashtable.c 	iter->walker.tbl =
iter              669 lib/rhashtable.c 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
iter              680 lib/rhashtable.c void rhashtable_walk_exit(struct rhashtable_iter *iter)
iter              682 lib/rhashtable.c 	spin_lock(&iter->ht->lock);
iter              683 lib/rhashtable.c 	if (iter->walker.tbl)
iter              684 lib/rhashtable.c 		list_del(&iter->walker.list);
iter              685 lib/rhashtable.c 	spin_unlock(&iter->ht->lock);
iter              707 lib/rhashtable.c int rhashtable_walk_start_check(struct rhashtable_iter *iter)
iter              710 lib/rhashtable.c 	struct rhashtable *ht = iter->ht;
iter              716 lib/rhashtable.c 	if (iter->walker.tbl)
iter              717 lib/rhashtable.c 		list_del(&iter->walker.list);
iter              720 lib/rhashtable.c 	if (iter->end_of_table)
iter              722 lib/rhashtable.c 	if (!iter->walker.tbl) {
iter              723 lib/rhashtable.c 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
iter              724 lib/rhashtable.c 		iter->slot = 0;
iter              725 lib/rhashtable.c 		iter->skip = 0;
iter              729 lib/rhashtable.c 	if (iter->p && !rhlist) {
iter              736 lib/rhashtable.c 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
iter              738 lib/rhashtable.c 			if (p == iter->p) {
iter              739 lib/rhashtable.c 				iter->skip = skip;
iter              743 lib/rhashtable.c 		iter->p = NULL;
iter              744 lib/rhashtable.c 	} else if (iter->p && rhlist) {
iter              751 lib/rhashtable.c 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
iter              756 lib/rhashtable.c 				if (list == iter->list) {
iter              757 lib/rhashtable.c 					iter->p = p;
iter              758 lib/rhashtable.c 					iter->skip = skip;
iter              763 lib/rhashtable.c 		iter->p = NULL;
iter              780 lib/rhashtable.c static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
iter              782 lib/rhashtable.c 	struct bucket_table *tbl = iter->walker.tbl;
iter              783 lib/rhashtable.c 	struct rhlist_head *list = iter->list;
iter              784 lib/rhashtable.c 	struct rhashtable *ht = iter->ht;
iter              785 lib/rhashtable.c 	struct rhash_head *p = iter->p;
iter              791 lib/rhashtable.c 	for (; iter->slot < tbl->size; iter->slot++) {
iter              792 lib/rhashtable.c 		int skip = iter->skip;
iter              794 lib/rhashtable.c 		rht_for_each_rcu(p, tbl, iter->slot) {
iter              814 lib/rhashtable.c 			iter->skip++;
iter              815 lib/rhashtable.c 			iter->p = p;
iter              816 lib/rhashtable.c 			iter->list = list;
iter              820 lib/rhashtable.c 		iter->skip = 0;
iter              823 lib/rhashtable.c 	iter->p = NULL;
iter              828 lib/rhashtable.c 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
iter              829 lib/rhashtable.c 	if (iter->walker.tbl) {
iter              830 lib/rhashtable.c 		iter->slot = 0;
iter              831 lib/rhashtable.c 		iter->skip = 0;
iter              834 lib/rhashtable.c 		iter->end_of_table = true;
iter              852 lib/rhashtable.c void *rhashtable_walk_next(struct rhashtable_iter *iter)
iter              854 lib/rhashtable.c 	struct rhlist_head *list = iter->list;
iter              855 lib/rhashtable.c 	struct rhashtable *ht = iter->ht;
iter              856 lib/rhashtable.c 	struct rhash_head *p = iter->p;
iter              865 lib/rhashtable.c 			iter->skip++;
iter              866 lib/rhashtable.c 			iter->p = p;
iter              867 lib/rhashtable.c 			iter->list = list;
iter              874 lib/rhashtable.c 		iter->skip = 0;
iter              875 lib/rhashtable.c 		iter->slot++;
iter              878 lib/rhashtable.c 	return __rhashtable_walk_find_next(iter);
iter              891 lib/rhashtable.c void *rhashtable_walk_peek(struct rhashtable_iter *iter)
iter              893 lib/rhashtable.c 	struct rhlist_head *list = iter->list;
iter              894 lib/rhashtable.c 	struct rhashtable *ht = iter->ht;
iter              895 lib/rhashtable.c 	struct rhash_head *p = iter->p;
iter              902 lib/rhashtable.c 	if (iter->skip) {
iter              909 lib/rhashtable.c 		iter->skip--;
iter              912 lib/rhashtable.c 	return __rhashtable_walk_find_next(iter);
iter              923 lib/rhashtable.c void rhashtable_walk_stop(struct rhashtable_iter *iter)
iter              927 lib/rhashtable.c 	struct bucket_table *tbl = iter->walker.tbl;
iter              932 lib/rhashtable.c 	ht = iter->ht;
iter              937 lib/rhashtable.c 		iter->walker.tbl = NULL;
iter              939 lib/rhashtable.c 		list_add(&iter->walker.list, &tbl->walkers);
iter              197 lib/test_meminit.c 	int iter;
iter              205 lib/test_meminit.c 	for (iter = 0; iter < 10; iter++) {
iter              258 lib/test_meminit.c 	int i, iter, maxiter = 1024;
iter              280 lib/test_meminit.c 	for (iter = 0; iter < maxiter; iter++) {
iter              282 lib/test_meminit.c 		used_objects[iter] = buf;
iter              285 lib/test_meminit.c 			for (i = 0; i <= iter; i++)
iter              303 lib/test_meminit.c 	int i, iter, maxiter = 1024;
iter              309 lib/test_meminit.c 	for (iter = 0; (iter < maxiter) && !fail; iter++) {
iter              714 mm/backing-dev.c 	struct radix_tree_iter iter;
iter              721 mm/backing-dev.c 	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
iter             2010 mm/filemap.c   		struct iov_iter *iter, ssize_t written)
iter             2026 mm/filemap.c   	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
iter             2031 mm/filemap.c   	last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
iter             2084 mm/filemap.c   			if (unlikely(iov_iter_is_pipe(iter)))
iter             2092 mm/filemap.c   							offset, iter->count))
iter             2144 mm/filemap.c   		ret = copy_page_to_iter(page, offset, nr, iter);
iter             2152 mm/filemap.c   		if (!iov_iter_count(iter))
iter             2274 mm/filemap.c   generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
iter             2276 mm/filemap.c   	size_t count = iov_iter_count(iter);
iter             2303 mm/filemap.c   		retval = mapping->a_ops->direct_IO(iocb, iter);
iter             2308 mm/filemap.c   		iov_iter_revert(iter, count - iov_iter_count(iter));
iter             2324 mm/filemap.c   	retval = generic_file_buffered_read(iocb, iter, retval);
iter              409 mm/internal.h  static inline struct page *mem_map_next(struct page *iter,
iter              418 mm/internal.h  	return iter + 1;
iter              239 mm/memcontrol.c #define for_each_mem_cgroup_tree(iter, root)		\
iter              240 mm/memcontrol.c 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
iter              241 mm/memcontrol.c 	     iter != NULL;				\
iter              242 mm/memcontrol.c 	     iter = mem_cgroup_iter(root, iter, NULL))
iter              244 mm/memcontrol.c #define for_each_mem_cgroup(iter)			\
iter              245 mm/memcontrol.c 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
iter              246 mm/memcontrol.c 	     iter != NULL;				\
iter              247 mm/memcontrol.c 	     iter = mem_cgroup_iter(NULL, iter, NULL))
iter             1042 mm/memcontrol.c 	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
iter             1068 mm/memcontrol.c 		iter = &mz->iter[reclaim->priority];
iter             1070 mm/memcontrol.c 		if (prev && reclaim->generation != iter->generation)
iter             1074 mm/memcontrol.c 			pos = READ_ONCE(iter->position);
iter             1085 mm/memcontrol.c 			(void)cmpxchg(&iter->position, pos, NULL);
iter             1128 mm/memcontrol.c 		(void)cmpxchg(&iter->position, pos, memcg);
iter             1134 mm/memcontrol.c 			iter->generation++;
iter             1136 mm/memcontrol.c 			reclaim->generation = iter->generation;
iter             1165 mm/memcontrol.c 	struct mem_cgroup_reclaim_iter *iter;
iter             1173 mm/memcontrol.c 			iter = &mz->iter[i];
iter             1174 mm/memcontrol.c 			cmpxchg(&iter->position,
iter             1217 mm/memcontrol.c 	struct mem_cgroup *iter;
iter             1222 mm/memcontrol.c 	for_each_mem_cgroup_tree(iter, memcg) {
iter             1226 mm/memcontrol.c 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
iter             1231 mm/memcontrol.c 			mem_cgroup_iter_break(memcg, iter);
iter             1774 mm/memcontrol.c 	struct mem_cgroup *iter, *failed = NULL;
iter             1778 mm/memcontrol.c 	for_each_mem_cgroup_tree(iter, memcg) {
iter             1779 mm/memcontrol.c 		if (iter->oom_lock) {
iter             1784 mm/memcontrol.c 			failed = iter;
iter             1785 mm/memcontrol.c 			mem_cgroup_iter_break(memcg, iter);
iter             1788 mm/memcontrol.c 			iter->oom_lock = true;
iter             1796 mm/memcontrol.c 		for_each_mem_cgroup_tree(iter, memcg) {
iter             1797 mm/memcontrol.c 			if (iter == failed) {
iter             1798 mm/memcontrol.c 				mem_cgroup_iter_break(memcg, iter);
iter             1801 mm/memcontrol.c 			iter->oom_lock = false;
iter             1813 mm/memcontrol.c 	struct mem_cgroup *iter;
iter             1817 mm/memcontrol.c 	for_each_mem_cgroup_tree(iter, memcg)
iter             1818 mm/memcontrol.c 		iter->oom_lock = false;
iter             1824 mm/memcontrol.c 	struct mem_cgroup *iter;
iter             1827 mm/memcontrol.c 	for_each_mem_cgroup_tree(iter, memcg)
iter             1828 mm/memcontrol.c 		iter->under_oom++;
iter             1834 mm/memcontrol.c 	struct mem_cgroup *iter;
iter             1841 mm/memcontrol.c 	for_each_mem_cgroup_tree(iter, memcg)
iter             1842 mm/memcontrol.c 		if (iter->under_oom > 0)
iter             1843 mm/memcontrol.c 			iter->under_oom--;
iter             3858 mm/memcontrol.c 		struct mem_cgroup *iter;
iter             3861 mm/memcontrol.c 		for_each_mem_cgroup_tree(iter, memcg)
iter             3862 mm/memcontrol.c 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
iter             3866 mm/memcontrol.c 			for_each_mem_cgroup_tree(iter, memcg)
iter             3868 mm/memcontrol.c 					iter, nid, stat->lru_mask);
iter             4113 mm/memcontrol.c 	struct mem_cgroup *iter;
iter             4115 mm/memcontrol.c 	for_each_mem_cgroup_tree(iter, memcg)
iter             4116 mm/memcontrol.c 		mem_cgroup_oom_notify_cb(iter);
iter             8195 mm/page_alloc.c 	unsigned long iter = 0;
iter             8220 mm/page_alloc.c 	for (found = 0; iter < pageblock_nr_pages; iter++) {
iter             8221 mm/page_alloc.c 		unsigned long check = pfn + iter;
iter             8252 mm/page_alloc.c 			iter += skip_pages - 1;
iter             8264 mm/page_alloc.c 				iter += (1 << page_order(page)) - 1;
iter             8300 mm/page_alloc.c 		dump_page(pfn_to_page(pfn + iter), reason);
iter               33 mm/process_vm_access.c 			       struct iov_iter *iter,
iter               37 mm/process_vm_access.c 	while (len && iov_iter_count(iter)) {
iter               46 mm/process_vm_access.c 			copied = copy_page_from_iter(page, offset, copy, iter);
iter               49 mm/process_vm_access.c 			copied = copy_page_to_iter(page, offset, copy, iter);
iter               52 mm/process_vm_access.c 		if (copied < copy && iov_iter_count(iter))
iter               76 mm/process_vm_access.c 				    struct iov_iter *iter,
iter               98 mm/process_vm_access.c 	while (!rc && nr_pages && iov_iter_count(iter)) {
iter              121 mm/process_vm_access.c 					 start_offset, bytes, iter,
iter              151 mm/process_vm_access.c static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
iter              165 mm/process_vm_access.c 	size_t total_len = iov_iter_count(iter);
iter              215 mm/process_vm_access.c 	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
iter              218 mm/process_vm_access.c 			iter, process_pages, mm, task, vm_write);
iter              221 mm/process_vm_access.c 	total_len -= iov_iter_count(iter);
iter              265 mm/process_vm_access.c 	struct iov_iter iter;
iter              273 mm/process_vm_access.c 	rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
iter              276 mm/process_vm_access.c 	if (!iov_iter_count(&iter))
iter              284 mm/process_vm_access.c 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
iter              323 mm/process_vm_access.c 	struct iov_iter iter;
iter              330 mm/process_vm_access.c 	rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
iter              333 mm/process_vm_access.c 	if (!iov_iter_count(&iter))
iter              341 mm/process_vm_access.c 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
iter              288 mm/slab.h      #define for_each_memcg_cache(iter, root) \
iter              289 mm/slab.h      	list_for_each_entry(iter, &(root)->memcg_params.children, \
iter              417 mm/slab.h      #define for_each_memcg_cache(iter, root) \
iter              418 mm/slab.h      	for ((void)(iter), (void)(root); 0; )
iter             1308 mm/slub.c      	char *iter;
iter             1316 mm/slub.c      	iter = slub_debug_slabs;
iter             1317 mm/slub.c      	while (*iter) {
iter             1321 mm/slub.c      		end = strchrnul(iter, ',');
iter             1323 mm/slub.c      		glob = strnchr(iter, end - iter, '*');
iter             1325 mm/slub.c      			cmplen = glob - iter;
iter             1327 mm/slub.c      			cmplen = max_t(size_t, len, (end - iter));
iter             1329 mm/slub.c      		if (!strncmp(name, iter, cmplen)) {
iter             1336 mm/slub.c      		iter = end + 1;
iter              919 net/appletalk/aarp.c static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos)
iter              921 net/appletalk/aarp.c 	int ct = iter->bucket;
iter              922 net/appletalk/aarp.c 	struct aarp_entry **table = iter->table;
iter              930 net/appletalk/aarp.c 				iter->table = table;
iter              931 net/appletalk/aarp.c 				iter->bucket = ct;
iter              954 net/appletalk/aarp.c 	struct aarp_iter_state *iter = seq->private;
iter              957 net/appletalk/aarp.c 	iter->table     = resolved;
iter              958 net/appletalk/aarp.c 	iter->bucket    = 0;
iter              960 net/appletalk/aarp.c 	return *pos ? iter_next(iter, pos) : SEQ_START_TOKEN;
iter              966 net/appletalk/aarp.c 	struct aarp_iter_state *iter = seq->private;
iter              972 net/appletalk/aarp.c 		entry = iter_next(iter, NULL);
iter              980 net/appletalk/aarp.c 		++iter->bucket;
iter              981 net/appletalk/aarp.c 		entry = iter_next(iter, NULL);
iter             1003 net/appletalk/aarp.c 	struct aarp_iter_state *iter = seq->private;
iter             1019 net/appletalk/aarp.c 		if (iter->table == unresolved)
iter             1026 net/appletalk/aarp.c 			   (iter->table == resolved) ? "resolved"
iter             1027 net/appletalk/aarp.c 			   : (iter->table == unresolved) ? "unresolved"
iter             1028 net/appletalk/aarp.c 			   : (iter->table == proxies) ? "proxies"
iter              376 net/bridge/br_mdb.c 	struct list_head *iter;
iter              378 net/bridge/br_mdb.c 	netdev_for_each_lower_dev(dev, lower_dev, iter)
iter               59 net/bridge/netfilter/nf_conntrack_bridge.c 		struct ip_fraglist_iter iter;
iter               78 net/bridge/netfilter/nf_conntrack_bridge.c 		ip_fraglist_init(skb, iph, hlen, &iter);
iter               81 net/bridge/netfilter/nf_conntrack_bridge.c 			if (iter.frag)
iter               82 net/bridge/netfilter/nf_conntrack_bridge.c 				ip_fraglist_prepare(skb, &iter);
iter               86 net/bridge/netfilter/nf_conntrack_bridge.c 			if (err || !iter.frag)
iter               89 net/bridge/netfilter/nf_conntrack_bridge.c 			skb = ip_fraglist_next(&iter);
iter              822 net/ceph/messenger.c 	if (cursor->resid < it->iter.bi_size)
iter              823 net/ceph/messenger.c 		it->iter.bi_size = cursor->resid;
iter              825 net/ceph/messenger.c 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
iter              826 net/ceph/messenger.c 	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
iter              834 net/ceph/messenger.c 					   cursor->bio_iter.iter);
iter              845 net/ceph/messenger.c 	struct page *page = bio_iter_page(it->bio, it->iter);
iter              848 net/ceph/messenger.c 	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
iter              850 net/ceph/messenger.c 	bio_advance_iter(it->bio, &it->iter, bytes);
iter              857 net/ceph/messenger.c 	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
iter              858 net/ceph/messenger.c 		       page == bio_iter_page(it->bio, it->iter)))
iter              861 net/ceph/messenger.c 	if (!it->iter.bi_size) {
iter              863 net/ceph/messenger.c 		it->iter = it->bio->bi_iter;
iter              864 net/ceph/messenger.c 		if (cursor->resid < it->iter.bi_size)
iter              865 net/ceph/messenger.c 			it->iter.bi_size = cursor->resid;
iter              869 net/ceph/messenger.c 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
iter              870 net/ceph/messenger.c 	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
iter              881 net/ceph/messenger.c 	cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
iter              882 net/ceph/messenger.c 	cursor->bvec_iter = data->bvec_pos.iter;
iter             3322 net/ceph/messenger.c 	msg->data_length += bvec_pos->iter.bi_size;
iter              248 net/ceph/osd_client.c 		.iter = { .bi_size = bytes },
iter              312 net/ceph/osd_client.c 		.iter = { .bi_size = bytes },
iter              348 net/ceph/osd_client.c 		return osd_data->bvec_pos.iter.bi_size;
iter             1476 net/core/dev.c 	struct list_head *iter;
iter             1484 net/core/dev.c 	netdev_for_each_lower_dev(dev, lower_dev, iter)
iter             6564 net/core/dev.c 						 struct list_head **iter)
iter             6570 net/core/dev.c 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
iter             6575 net/core/dev.c 	*iter = &upper->list;
iter             6582 net/core/dev.c 						  struct list_head **iter,
iter             6587 net/core/dev.c 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
iter             6592 net/core/dev.c 	*iter = &upper->list;
iter             6599 net/core/dev.c 						    struct list_head **iter)
iter             6605 net/core/dev.c 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
iter             6610 net/core/dev.c 	*iter = &upper->list;
iter             6621 net/core/dev.c 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
iter             6626 net/core/dev.c 	iter = &dev->adj_list.upper;
iter             6637 net/core/dev.c 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
iter             6646 net/core/dev.c 			iter_stack[cur++] = iter;
iter             6658 net/core/dev.c 		iter = niter;
iter             6670 net/core/dev.c 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
iter             6674 net/core/dev.c 	iter = &dev->adj_list.upper;
iter             6685 net/core/dev.c 			udev = netdev_next_upper_dev_rcu(now, &iter);
iter             6692 net/core/dev.c 			iter_stack[cur++] = iter;
iter             6704 net/core/dev.c 		iter = niter;
iter             6732 net/core/dev.c 				    struct list_head **iter)
iter             6736 net/core/dev.c 	lower = list_entry(*iter, struct netdev_adjacent, list);
iter             6741 net/core/dev.c 	*iter = lower->list.next;
iter             6758 net/core/dev.c 					struct list_head **iter)
iter             6764 net/core/dev.c 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
iter             6769 net/core/dev.c 	*iter = &lower->list;
iter             6786 net/core/dev.c void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
iter             6790 net/core/dev.c 	lower = list_entry(*iter, struct netdev_adjacent, list);
iter             6795 net/core/dev.c 	*iter = lower->list.next;
iter             6802 net/core/dev.c 						struct list_head **iter)
iter             6806 net/core/dev.c 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
iter             6811 net/core/dev.c 	*iter = &lower->list;
iter             6817 net/core/dev.c 						  struct list_head **iter,
iter             6822 net/core/dev.c 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
iter             6827 net/core/dev.c 	*iter = &lower->list;
iter             6839 net/core/dev.c 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
iter             6843 net/core/dev.c 	iter = &dev->adj_list.lower;
iter             6854 net/core/dev.c 			ldev = netdev_next_lower_dev(now, &iter);
iter             6861 net/core/dev.c 			iter_stack[cur++] = iter;
iter             6873 net/core/dev.c 		iter = niter;
iter             6886 net/core/dev.c 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
iter             6891 net/core/dev.c 	iter = &dev->adj_list.lower;
iter             6902 net/core/dev.c 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
iter             6911 net/core/dev.c 			iter_stack[cur++] = iter;
iter             6923 net/core/dev.c 		iter = niter;
iter             6930 net/core/dev.c 					     struct list_head **iter)
iter             6934 net/core/dev.c 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
iter             6938 net/core/dev.c 	*iter = &lower->list;
iter             6947 net/core/dev.c 	struct list_head *iter;
iter             6951 net/core/dev.c 	for (iter = &dev->adj_list.upper,
iter             6952 net/core/dev.c 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
iter             6954 net/core/dev.c 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
iter             6967 net/core/dev.c 	struct list_head *iter;
iter             6971 net/core/dev.c 	for (iter = &dev->adj_list.lower,
iter             6972 net/core/dev.c 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
iter             6974 net/core/dev.c 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
iter             7002 net/core/dev.c 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
iter             7006 net/core/dev.c 	iter = &dev->adj_list.lower;
iter             7017 net/core/dev.c 			ldev = netdev_next_lower_dev_rcu(now, &iter);
iter             7024 net/core/dev.c 			iter_stack[cur++] = iter;
iter             7036 net/core/dev.c 		iter = niter;
iter             7526 net/core/dev.c 	struct netdev_adjacent *iter;
iter             7530 net/core/dev.c 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
iter             7531 net/core/dev.c 		if (!net_eq(net, dev_net(iter->dev)))
iter             7533 net/core/dev.c 		netdev_adjacent_sysfs_add(iter->dev, dev,
iter             7534 net/core/dev.c 					  &iter->dev->adj_list.lower);
iter             7535 net/core/dev.c 		netdev_adjacent_sysfs_add(dev, iter->dev,
iter             7539 net/core/dev.c 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
iter             7540 net/core/dev.c 		if (!net_eq(net, dev_net(iter->dev)))
iter             7542 net/core/dev.c 		netdev_adjacent_sysfs_add(iter->dev, dev,
iter             7543 net/core/dev.c 					  &iter->dev->adj_list.upper);
iter             7544 net/core/dev.c 		netdev_adjacent_sysfs_add(dev, iter->dev,
iter             7551 net/core/dev.c 	struct netdev_adjacent *iter;
iter             7555 net/core/dev.c 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
iter             7556 net/core/dev.c 		if (!net_eq(net, dev_net(iter->dev)))
iter             7558 net/core/dev.c 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
iter             7559 net/core/dev.c 					  &iter->dev->adj_list.lower);
iter             7560 net/core/dev.c 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
iter             7564 net/core/dev.c 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
iter             7565 net/core/dev.c 		if (!net_eq(net, dev_net(iter->dev)))
iter             7567 net/core/dev.c 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
iter             7568 net/core/dev.c 					  &iter->dev->adj_list.upper);
iter             7569 net/core/dev.c 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
iter             7576 net/core/dev.c 	struct netdev_adjacent *iter;
iter             7580 net/core/dev.c 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
iter             7581 net/core/dev.c 		if (!net_eq(net, dev_net(iter->dev)))
iter             7583 net/core/dev.c 		netdev_adjacent_sysfs_del(iter->dev, oldname,
iter             7584 net/core/dev.c 					  &iter->dev->adj_list.lower);
iter             7585 net/core/dev.c 		netdev_adjacent_sysfs_add(iter->dev, dev,
iter             7586 net/core/dev.c 					  &iter->dev->adj_list.lower);
iter             7589 net/core/dev.c 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
iter             7590 net/core/dev.c 		if (!net_eq(net, dev_net(iter->dev)))
iter             7592 net/core/dev.c 		netdev_adjacent_sysfs_del(iter->dev, oldname,
iter             7593 net/core/dev.c 					  &iter->dev->adj_list.upper);
iter             7594 net/core/dev.c 		netdev_adjacent_sysfs_add(iter->dev, dev,
iter             7595 net/core/dev.c 					  &iter->dev->adj_list.upper);
iter             8221 net/core/dev.c 	struct list_head *iter;
iter             8237 net/core/dev.c 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
iter             8702 net/core/dev.c 	struct list_head *iter;
iter             8716 net/core/dev.c 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
iter             8744 net/core/dev.c 	netdev_for_each_lower_dev(dev, lower, iter)
iter             2378 net/core/skbuff.c 	struct sk_buff *iter;
iter             2405 net/core/skbuff.c 	skb_walk_frags(skb, iter) {
iter             2406 net/core/skbuff.c 		if (*offset >= iter->len) {
iter             2407 net/core/skbuff.c 			*offset -= iter->len;
iter             2414 net/core/skbuff.c 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
iter             3702 net/core/skbuff.c 			struct sk_buff *iter;
iter             3718 net/core/skbuff.c 			skb_walk_frags(head_skb, iter) {
iter             3719 net/core/skbuff.c 				if (frag_len != iter->len && iter->next)
iter             3721 net/core/skbuff.c 				if (skb_headlen(iter) && !iter->head_frag)
iter             3724 net/core/skbuff.c 				len -= iter->len;
iter             3945 net/core/skbuff.c 		struct sk_buff *iter;
iter             3956 net/core/skbuff.c 		for (iter = segs; iter; iter = iter->next) {
iter             3957 net/core/skbuff.c 			skb_shinfo(iter)->gso_size = gso_size;
iter             3958 net/core/skbuff.c 			skb_shinfo(iter)->gso_segs = partial_segs;
iter             3959 net/core/skbuff.c 			skb_shinfo(iter)->gso_type = type;
iter             3960 net/core/skbuff.c 			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
iter             5234 net/core/skbuff.c 	const struct sk_buff *iter;
iter             5242 net/core/skbuff.c 	skb_walk_frags(skb, iter) {
iter             5243 net/core/skbuff.c 		if (seg_len + skb_headlen(iter) > max_len)
iter               95 net/core/xdp.c 	struct rhashtable_iter iter;
iter               99 net/core/xdp.c 	rhashtable_walk_enter(mem_id_ht, &iter);
iter              101 net/core/xdp.c 		rhashtable_walk_start(&iter);
iter              103 net/core/xdp.c 		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
iter              108 net/core/xdp.c 		rhashtable_walk_stop(&iter);
iter              111 net/core/xdp.c 	rhashtable_walk_exit(&iter);
iter              169 net/ipv4/cipso_ipv4.c 	u32 iter;
iter              177 net/ipv4/cipso_ipv4.c 	for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
iter              178 net/ipv4/cipso_ipv4.c 		spin_lock_init(&cipso_v4_cache[iter].lock);
iter              179 net/ipv4/cipso_ipv4.c 		cipso_v4_cache[iter].size = 0;
iter              180 net/ipv4/cipso_ipv4.c 		INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
iter              197 net/ipv4/cipso_ipv4.c 	u32 iter;
iter              199 net/ipv4/cipso_ipv4.c 	for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
iter              200 net/ipv4/cipso_ipv4.c 		spin_lock_bh(&cipso_v4_cache[iter].lock);
iter              203 net/ipv4/cipso_ipv4.c 					 &cipso_v4_cache[iter].list, list) {
iter              207 net/ipv4/cipso_ipv4.c 		cipso_v4_cache[iter].size = 0;
iter              208 net/ipv4/cipso_ipv4.c 		spin_unlock_bh(&cipso_v4_cache[iter].lock);
iter              361 net/ipv4/cipso_ipv4.c 	struct cipso_v4_doi *iter;
iter              363 net/ipv4/cipso_ipv4.c 	list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
iter              364 net/ipv4/cipso_ipv4.c 		if (iter->doi == doi && refcount_read(&iter->refcount))
iter              365 net/ipv4/cipso_ipv4.c 			return iter;
iter              386 net/ipv4/cipso_ipv4.c 	u32 iter;
iter              396 net/ipv4/cipso_ipv4.c 	for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
iter              397 net/ipv4/cipso_ipv4.c 		switch (doi_def->tags[iter]) {
iter              410 net/ipv4/cipso_ipv4.c 			if (iter == 0)
iter              921 net/ipv4/cipso_ipv4.c 	u32 iter;
iter              926 net/ipv4/cipso_ipv4.c 	for (iter = 0; iter < enumcat_len; iter += 2) {
iter              927 net/ipv4/cipso_ipv4.c 		cat = get_unaligned_be16(&enumcat[iter]);
iter              991 net/ipv4/cipso_ipv4.c 	u32 iter;
iter              993 net/ipv4/cipso_ipv4.c 	for (iter = 0; iter < net_cat_len; iter += 2) {
iter              995 net/ipv4/cipso_ipv4.c 					     get_unaligned_be16(&net_cat[iter]),
iter             1023 net/ipv4/cipso_ipv4.c 	u32 iter;
iter             1028 net/ipv4/cipso_ipv4.c 	for (iter = 0; iter < rngcat_len; iter += 4) {
iter             1029 net/ipv4/cipso_ipv4.c 		cat_high = get_unaligned_be16(&rngcat[iter]);
iter             1030 net/ipv4/cipso_ipv4.c 		if ((iter + 4) <= rngcat_len)
iter             1031 net/ipv4/cipso_ipv4.c 			cat_low = get_unaligned_be16(&rngcat[iter + 2]);
iter             1063 net/ipv4/cipso_ipv4.c 	int iter = -1;
iter             1074 net/ipv4/cipso_ipv4.c 		iter = netlbl_catmap_walk(secattr->attr.mls.cat, iter + 1);
iter             1075 net/ipv4/cipso_ipv4.c 		if (iter < 0)
iter             1077 net/ipv4/cipso_ipv4.c 		cat_size += (iter == 0 ? 0 : sizeof(u16));
iter             1080 net/ipv4/cipso_ipv4.c 		array[array_cnt++] = iter;
iter             1082 net/ipv4/cipso_ipv4.c 		iter = netlbl_catmap_walkrng(secattr->attr.mls.cat, iter);
iter             1083 net/ipv4/cipso_ipv4.c 		if (iter < 0)
iter             1088 net/ipv4/cipso_ipv4.c 		array[array_cnt++] = iter;
iter             1091 net/ipv4/cipso_ipv4.c 	for (iter = 0; array_cnt > 0;) {
iter             1092 net/ipv4/cipso_ipv4.c 		*((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
iter             1093 net/ipv4/cipso_ipv4.c 		iter += 2;
iter             1096 net/ipv4/cipso_ipv4.c 			*((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
iter             1097 net/ipv4/cipso_ipv4.c 			iter += 2;
iter             1772 net/ipv4/cipso_ipv4.c 	u32 iter;
iter             1780 net/ipv4/cipso_ipv4.c 	iter = 0;
iter             1783 net/ipv4/cipso_ipv4.c 		switch (doi_def->tags[iter]) {
iter             1812 net/ipv4/cipso_ipv4.c 		iter++;
iter             1814 net/ipv4/cipso_ipv4.c 		 iter < CIPSO_V4_TAG_MAXCNT &&
iter             1815 net/ipv4/cipso_ipv4.c 		 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
iter             1997 net/ipv4/cipso_ipv4.c 		int iter;
iter             2022 net/ipv4/cipso_ipv4.c 		iter = 0;
iter             2024 net/ipv4/cipso_ipv4.c 		while (iter < opt->opt.optlen)
iter             2025 net/ipv4/cipso_ipv4.c 			if (opt->opt.__data[iter] != IPOPT_NOP) {
iter             2026 net/ipv4/cipso_ipv4.c 				iter += opt->opt.__data[iter + 1];
iter             2027 net/ipv4/cipso_ipv4.c 				optlen_new = iter;
iter             2029 net/ipv4/cipso_ipv4.c 				iter++;
iter             2266 net/ipv4/fib_trie.c static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
iter             2268 net/ipv4/fib_trie.c 	unsigned long cindex = iter->index;
iter             2269 net/ipv4/fib_trie.c 	struct key_vector *pn = iter->tnode;
iter             2273 net/ipv4/fib_trie.c 		 iter->tnode, iter->index, iter->depth);
iter             2283 net/ipv4/fib_trie.c 				iter->tnode = pn;
iter             2284 net/ipv4/fib_trie.c 				iter->index = cindex;
iter             2287 net/ipv4/fib_trie.c 				iter->tnode = n;
iter             2288 net/ipv4/fib_trie.c 				iter->index = 0;
iter             2289 net/ipv4/fib_trie.c 				++iter->depth;
iter             2299 net/ipv4/fib_trie.c 		--iter->depth;
iter             2303 net/ipv4/fib_trie.c 	iter->tnode = pn;
iter             2304 net/ipv4/fib_trie.c 	iter->index = 0;
iter             2309 net/ipv4/fib_trie.c static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
iter             2323 net/ipv4/fib_trie.c 		iter->tnode = n;
iter             2324 net/ipv4/fib_trie.c 		iter->index = 0;
iter             2325 net/ipv4/fib_trie.c 		iter->depth = 1;
iter             2327 net/ipv4/fib_trie.c 		iter->tnode = pn;
iter             2328 net/ipv4/fib_trie.c 		iter->index = 0;
iter             2329 net/ipv4/fib_trie.c 		iter->depth = 0;
iter             2338 net/ipv4/fib_trie.c 	struct fib_trie_iter iter;
iter             2343 net/ipv4/fib_trie.c 	for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
iter             2348 net/ipv4/fib_trie.c 			s->totdepth += iter.depth;
iter             2349 net/ipv4/fib_trie.c 			if (iter.depth > s->maxdepth)
iter             2350 net/ipv4/fib_trie.c 				s->maxdepth = iter.depth;
iter             2487 net/ipv4/fib_trie.c 	struct fib_trie_iter *iter = seq->private;
iter             2499 net/ipv4/fib_trie.c 			for (n = fib_trie_get_first(iter,
iter             2501 net/ipv4/fib_trie.c 			     n; n = fib_trie_get_next(iter))
iter             2503 net/ipv4/fib_trie.c 					iter->tb = tb;
iter             2521 net/ipv4/fib_trie.c 	struct fib_trie_iter *iter = seq->private;
iter             2523 net/ipv4/fib_trie.c 	struct fib_table *tb = iter->tb;
iter             2530 net/ipv4/fib_trie.c 	n = fib_trie_get_next(iter);
iter             2538 net/ipv4/fib_trie.c 		n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
iter             2547 net/ipv4/fib_trie.c 			n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
iter             2555 net/ipv4/fib_trie.c 	iter->tb = tb;
iter             2611 net/ipv4/fib_trie.c 	const struct fib_trie_iter *iter = seq->private;
iter             2615 net/ipv4/fib_trie.c 		fib_table_print(seq, iter->tb);
iter             2620 net/ipv4/fib_trie.c 		seq_indent(seq, iter->depth-1);
iter             2629 net/ipv4/fib_trie.c 		seq_indent(seq, iter->depth);
iter             2635 net/ipv4/fib_trie.c 			seq_indent(seq, iter->depth + 1);
iter             2666 net/ipv4/fib_trie.c static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
iter             2669 net/ipv4/fib_trie.c 	struct key_vector *l, **tp = &iter->tnode;
iter             2673 net/ipv4/fib_trie.c 	if (iter->pos > 0 && pos >= iter->pos) {
iter             2674 net/ipv4/fib_trie.c 		key = iter->key;
iter             2676 net/ipv4/fib_trie.c 		iter->pos = 1;
iter             2680 net/ipv4/fib_trie.c 	pos -= iter->pos;
iter             2684 net/ipv4/fib_trie.c 		iter->pos++;
iter             2693 net/ipv4/fib_trie.c 		iter->key = l->key;	/* remember it */
iter             2695 net/ipv4/fib_trie.c 		iter->pos = 0;		/* forget it */
iter             2703 net/ipv4/fib_trie.c 	struct fib_route_iter *iter = seq->private;
iter             2713 net/ipv4/fib_trie.c 	iter->main_tb = tb;
iter             2715 net/ipv4/fib_trie.c 	iter->tnode = t->kv;
iter             2718 net/ipv4/fib_trie.c 		return fib_route_get_idx(iter, *pos);
iter             2720 net/ipv4/fib_trie.c 	iter->pos = 0;
iter             2721 net/ipv4/fib_trie.c 	iter->key = KEY_MAX;
iter             2728 net/ipv4/fib_trie.c 	struct fib_route_iter *iter = seq->private;
iter             2730 net/ipv4/fib_trie.c 	t_key key = iter->key + 1;
iter             2736 net/ipv4/fib_trie.c 		l = leaf_walk_rcu(&iter->tnode, key);
iter             2739 net/ipv4/fib_trie.c 		iter->key = l->key;
iter             2740 net/ipv4/fib_trie.c 		iter->pos++;
iter             2742 net/ipv4/fib_trie.c 		iter->pos = 0;
iter             2780 net/ipv4/fib_trie.c 	struct fib_route_iter *iter = seq->private;
iter             2781 net/ipv4/fib_trie.c 	struct fib_table *tb = iter->main_tb;
iter              591 net/ipv4/ip_output.c 		      unsigned int hlen, struct ip_fraglist_iter *iter)
iter              595 net/ipv4/ip_output.c 	iter->frag = skb_shinfo(skb)->frag_list;
iter              598 net/ipv4/ip_output.c 	iter->offset = 0;
iter              599 net/ipv4/ip_output.c 	iter->iph = iph;
iter              600 net/ipv4/ip_output.c 	iter->hlen = hlen;
iter              611 net/ipv4/ip_output.c 				     struct ip_fraglist_iter *iter)
iter              613 net/ipv4/ip_output.c 	struct sk_buff *to = iter->frag;
iter              618 net/ipv4/ip_output.c 	if (iter->offset == 0)
iter              622 net/ipv4/ip_output.c void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
iter              624 net/ipv4/ip_output.c 	unsigned int hlen = iter->hlen;
iter              625 net/ipv4/ip_output.c 	struct iphdr *iph = iter->iph;
iter              628 net/ipv4/ip_output.c 	frag = iter->frag;
iter              634 net/ipv4/ip_output.c 	iter->iph = ip_hdr(frag);
iter              635 net/ipv4/ip_output.c 	iph = iter->iph;
iter              638 net/ipv4/ip_output.c 	iter->offset += skb->len - hlen;
iter              639 net/ipv4/ip_output.c 	iph->frag_off = htons(iter->offset >> 3);
iter              773 net/ipv4/ip_output.c 	struct ip_fraglist_iter iter;
iter              840 net/ipv4/ip_output.c 		ip_fraglist_init(skb, iph, hlen, &iter);
iter              845 net/ipv4/ip_output.c 			if (iter.frag) {
iter              846 net/ipv4/ip_output.c 				ip_fraglist_ipcb_prepare(skb, &iter);
iter              847 net/ipv4/ip_output.c 				ip_fraglist_prepare(skb, &iter);
iter              855 net/ipv4/ip_output.c 			if (err || !iter.frag)
iter              858 net/ipv4/ip_output.c 			skb = ip_fraglist_next(&iter);
iter              866 net/ipv4/ip_output.c 		kfree_skb_list(iter.frag);
iter             2919 net/ipv4/ipmr.c 	struct mr_vif_iter *iter = seq->private;
iter             2927 net/ipv4/ipmr.c 	iter->mrt = mrt;
iter             2941 net/ipv4/ipmr.c 	struct mr_vif_iter *iter = seq->private;
iter             2942 net/ipv4/ipmr.c 	struct mr_table *mrt = iter->mrt;
iter              114 net/ipv4/ipmr_base.c void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
iter              116 net/ipv4/ipmr_base.c 	struct mr_table *mrt = iter->mrt;
iter              118 net/ipv4/ipmr_base.c 	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
iter              119 net/ipv4/ipmr_base.c 		if (!VIF_EXISTS(mrt, iter->ct))
iter              122 net/ipv4/ipmr_base.c 			return &mrt->vif_table[iter->ct];
iter              130 net/ipv4/ipmr_base.c 	struct mr_vif_iter *iter = seq->private;
iter              132 net/ipv4/ipmr_base.c 	struct mr_table *mrt = iter->mrt;
iter              136 net/ipv4/ipmr_base.c 		return mr_vif_seq_idx(net, iter, 0);
iter              138 net/ipv4/ipmr_base.c 	while (++iter->ct < mrt->maxvif) {
iter              139 net/ipv4/ipmr_base.c 		if (!VIF_EXISTS(mrt, iter->ct))
iter              141 net/ipv4/ipmr_base.c 		return &mrt->vif_table[iter->ct];
iter              345 net/ipv4/ipmr_base.c 		     struct mr_table *(*iter)(struct net *net,
iter              368 net/ipv4/ipmr_base.c 	for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
iter              524 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter              544 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, entry0, newinfo->size) {
iter              545 net/ipv4/netfilter/arp_tables.c 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
iter              553 net/ipv4/netfilter/arp_tables.c 			offsets[i] = (void *)iter - entry0;
iter              555 net/ipv4/netfilter/arp_tables.c 		if (strcmp(arpt_get_target(iter)->u.user.name,
iter              576 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, entry0, newinfo->size) {
iter              577 net/ipv4/netfilter/arp_tables.c 		ret = find_check_entry(iter, net, repl->name, repl->size,
iter              585 net/ipv4/netfilter/arp_tables.c 		xt_entry_foreach(iter, entry0, newinfo->size) {
iter              588 net/ipv4/netfilter/arp_tables.c 			cleanup_entry(iter, net);
iter              602 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter              610 net/ipv4/netfilter/arp_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
iter              615 net/ipv4/netfilter/arp_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
iter              632 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter              637 net/ipv4/netfilter/arp_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
iter              640 net/ipv4/netfilter/arp_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
iter              767 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter              781 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
iter              782 net/ipv4/netfilter/arp_tables.c 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
iter              891 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter              930 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
iter              931 net/ipv4/netfilter/arp_tables.c 		cleanup_entry(iter, net);
iter              958 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter              993 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
iter              994 net/ipv4/netfilter/arp_tables.c 		cleanup_entry(iter, net);
iter             1009 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter             1032 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter,  private->entries, private->size) {
iter             1035 net/ipv4/netfilter/arp_tables.c 		tmp = xt_get_this_cpu_counter(&iter->counters);
iter             1256 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter             1290 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
iter             1291 net/ipv4/netfilter/arp_tables.c 		cleanup_entry(iter, net);
iter             1365 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter             1373 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, private->entries, total_size) {
iter             1374 net/ipv4/netfilter/arp_tables.c 		ret = compat_copy_entry_to_user(iter, &pos,
iter             1523 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *iter;
iter             1529 net/ipv4/netfilter/arp_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
iter             1530 net/ipv4/netfilter/arp_tables.c 		cleanup_entry(iter, net);
iter              198 net/ipv4/netfilter/ip_tables.c 	const struct ipt_entry *iter;
iter              206 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
iter              207 net/ipv4/netfilter/ip_tables.c 		if (get_chainname_rulenum(iter, e, hookname,
iter              665 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter              684 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, entry0, newinfo->size) {
iter              685 net/ipv4/netfilter/ip_tables.c 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
iter              693 net/ipv4/netfilter/ip_tables.c 			offsets[i] = (void *)iter - entry0;
iter              695 net/ipv4/netfilter/ip_tables.c 		if (strcmp(ipt_get_target(iter)->u.user.name,
iter              716 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, entry0, newinfo->size) {
iter              717 net/ipv4/netfilter/ip_tables.c 		ret = find_check_entry(iter, net, repl->name, repl->size,
iter              725 net/ipv4/netfilter/ip_tables.c 		xt_entry_foreach(iter, entry0, newinfo->size) {
iter              728 net/ipv4/netfilter/ip_tables.c 			cleanup_entry(iter, net);
iter              743 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter              751 net/ipv4/netfilter/ip_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
iter              756 net/ipv4/netfilter/ip_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
iter              773 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter              778 net/ipv4/netfilter/ip_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
iter              781 net/ipv4/netfilter/ip_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
iter              924 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter              938 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
iter              939 net/ipv4/netfilter/ip_tables.c 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
iter             1046 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter             1084 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
iter             1085 net/ipv4/netfilter/ip_tables.c 		cleanup_entry(iter, net);
iter             1112 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter             1147 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
iter             1148 net/ipv4/netfilter/ip_tables.c 		cleanup_entry(iter, net);
iter             1164 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter             1186 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, private->entries, private->size) {
iter             1189 net/ipv4/netfilter/ip_tables.c 		tmp = xt_get_this_cpu_counter(&iter->counters);
iter             1495 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter             1530 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
iter             1531 net/ipv4/netfilter/ip_tables.c 		cleanup_entry(iter, net);
iter             1578 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter             1586 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, private->entries, total_size) {
iter             1587 net/ipv4/netfilter/ip_tables.c 		ret = compat_copy_entry_to_user(iter, &pos,
iter             1742 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *iter;
iter             1748 net/ipv4/netfilter/ip_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
iter             1749 net/ipv4/netfilter/ip_tables.c 		cleanup_entry(iter, net);
iter               48 net/ipv4/tcp_bpf.c 	struct iov_iter *iter = &msg->msg_iter;
iter               72 net/ipv4/tcp_bpf.c 			ret = copy_page_to_iter(page, sge->offset, copy, iter);
iter              131 net/ipv6/calipso.c 	u32 iter;
iter              139 net/ipv6/calipso.c 	for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) {
iter              140 net/ipv6/calipso.c 		spin_lock_init(&calipso_cache[iter].lock);
iter              141 net/ipv6/calipso.c 		calipso_cache[iter].size = 0;
iter              142 net/ipv6/calipso.c 		INIT_LIST_HEAD(&calipso_cache[iter].list);
iter              159 net/ipv6/calipso.c 	u32 iter;
iter              161 net/ipv6/calipso.c 	for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) {
iter              162 net/ipv6/calipso.c 		spin_lock_bh(&calipso_cache[iter].lock);
iter              165 net/ipv6/calipso.c 					 &calipso_cache[iter].list, list) {
iter              169 net/ipv6/calipso.c 		calipso_cache[iter].size = 0;
iter              170 net/ipv6/calipso.c 		spin_unlock_bh(&calipso_cache[iter].lock);
iter              324 net/ipv6/calipso.c 	struct calipso_doi *iter;
iter              326 net/ipv6/calipso.c 	list_for_each_entry_rcu(iter, &calipso_doi_list, list)
iter              327 net/ipv6/calipso.c 		if (iter->doi == doi && refcount_read(&iter->refcount))
iter              328 net/ipv6/calipso.c 			return iter;
iter              383 net/ipv6/ila/ila_xlat.c 	struct rhashtable_iter iter;
iter              388 net/ipv6/ila/ila_xlat.c 	rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
iter              389 net/ipv6/ila/ila_xlat.c 	rhashtable_walk_start(&iter);
iter              392 net/ipv6/ila/ila_xlat.c 		ila = rhashtable_walk_next(&iter);
iter              419 net/ipv6/ila/ila_xlat.c 	rhashtable_walk_stop(&iter);
iter              420 net/ipv6/ila/ila_xlat.c 	rhashtable_walk_exit(&iter);
iter              509 net/ipv6/ila/ila_xlat.c 	struct ila_dump_iter *iter;
iter              511 net/ipv6/ila/ila_xlat.c 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter              512 net/ipv6/ila/ila_xlat.c 	if (!iter)
iter              515 net/ipv6/ila/ila_xlat.c 	rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
iter              517 net/ipv6/ila/ila_xlat.c 	iter->skip = 0;
iter              518 net/ipv6/ila/ila_xlat.c 	cb->args[0] = (long)iter;
iter              525 net/ipv6/ila/ila_xlat.c 	struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
iter              527 net/ipv6/ila/ila_xlat.c 	rhashtable_walk_exit(&iter->rhiter);
iter              529 net/ipv6/ila/ila_xlat.c 	kfree(iter);
iter              536 net/ipv6/ila/ila_xlat.c 	struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
iter              537 net/ipv6/ila/ila_xlat.c 	struct rhashtable_iter *rhiter = &iter->rhiter;
iter              538 net/ipv6/ila/ila_xlat.c 	int skip = iter->skip;
iter              596 net/ipv6/ila/ila_xlat.c 	iter->skip = skip;
iter             1015 net/ipv6/ip6_fib.c 	struct fib6_info *iter = NULL;
iter             1032 net/ipv6/ip6_fib.c 	for (iter = leaf; iter;
iter             1033 net/ipv6/ip6_fib.c 	     iter = rcu_dereference_protected(iter->fib6_next,
iter             1039 net/ipv6/ip6_fib.c 		if (iter->fib6_metric == rt->fib6_metric) {
iter             1049 net/ipv6/ip6_fib.c 				if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
iter             1057 net/ipv6/ip6_fib.c 			if (rt6_duplicate_nexthop(iter, rt)) {
iter             1060 net/ipv6/ip6_fib.c 				if (!(iter->fib6_flags & RTF_EXPIRES))
iter             1063 net/ipv6/ip6_fib.c 					fib6_clean_expires(iter);
iter             1065 net/ipv6/ip6_fib.c 					fib6_set_expires(iter, rt->expires);
iter             1068 net/ipv6/ip6_fib.c 					fib6_metric_set(iter, RTAX_MTU,
iter             1084 net/ipv6/ip6_fib.c 			    rt6_qualify_for_ecmp(iter))
iter             1088 net/ipv6/ip6_fib.c 		if (iter->fib6_metric > rt->fib6_metric)
iter             1092 net/ipv6/ip6_fib.c 		ins = &iter->fib6_next;
iter             1100 net/ipv6/ip6_fib.c 		iter = rcu_dereference_protected(*ins,
iter             1175 net/ipv6/ip6_fib.c 		rcu_assign_pointer(rt->fib6_next, iter);
iter             1208 net/ipv6/ip6_fib.c 		rt->fib6_next = iter->fib6_next;
iter             1216 net/ipv6/ip6_fib.c 		nsiblings = iter->fib6_nsiblings;
iter             1217 net/ipv6/ip6_fib.c 		iter->fib6_node = NULL;
iter             1218 net/ipv6/ip6_fib.c 		fib6_purge_rt(iter, fn, info->nl_net);
iter             1219 net/ipv6/ip6_fib.c 		if (rcu_access_pointer(fn->rr_ptr) == iter)
iter             1221 net/ipv6/ip6_fib.c 		fib6_info_release(iter);
iter             1226 net/ipv6/ip6_fib.c 			iter = rcu_dereference_protected(*ins,
iter             1228 net/ipv6/ip6_fib.c 			while (iter) {
iter             1229 net/ipv6/ip6_fib.c 				if (iter->fib6_metric > rt->fib6_metric)
iter             1231 net/ipv6/ip6_fib.c 				if (rt6_qualify_for_ecmp(iter)) {
iter             1232 net/ipv6/ip6_fib.c 					*ins = iter->fib6_next;
iter             1233 net/ipv6/ip6_fib.c 					iter->fib6_node = NULL;
iter             1234 net/ipv6/ip6_fib.c 					fib6_purge_rt(iter, fn, info->nl_net);
iter             1235 net/ipv6/ip6_fib.c 					if (rcu_access_pointer(fn->rr_ptr) == iter)
iter             1237 net/ipv6/ip6_fib.c 					fib6_info_release(iter);
iter             1241 net/ipv6/ip6_fib.c 					ins = &iter->fib6_next;
iter             1243 net/ipv6/ip6_fib.c 				iter = rcu_dereference_protected(*ins,
iter             1713 net/ipv6/ip6_fib.c 	int iter = 0;
iter             1738 net/ipv6/ip6_fib.c 		RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
iter             1739 net/ipv6/ip6_fib.c 		iter++;
iter             2375 net/ipv6/ip6_fib.c 	struct ipv6_route_iter *iter = seq->private;
iter             2401 net/ipv6/ip6_fib.c 	iter->w.leaf = NULL;
iter             2407 net/ipv6/ip6_fib.c 	struct ipv6_route_iter *iter = w->args;
iter             2409 net/ipv6/ip6_fib.c 	if (!iter->skip)
iter             2413 net/ipv6/ip6_fib.c 		iter->w.leaf = rcu_dereference_protected(
iter             2414 net/ipv6/ip6_fib.c 				iter->w.leaf->fib6_next,
iter             2415 net/ipv6/ip6_fib.c 				lockdep_is_held(&iter->tbl->tb6_lock));
iter             2416 net/ipv6/ip6_fib.c 		iter->skip--;
iter             2417 net/ipv6/ip6_fib.c 		if (!iter->skip && iter->w.leaf)
iter             2419 net/ipv6/ip6_fib.c 	} while (iter->w.leaf);
iter             2424 net/ipv6/ip6_fib.c static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
iter             2427 net/ipv6/ip6_fib.c 	memset(&iter->w, 0, sizeof(iter->w));
iter             2428 net/ipv6/ip6_fib.c 	iter->w.func = ipv6_route_yield;
iter             2429 net/ipv6/ip6_fib.c 	iter->w.root = &iter->tbl->tb6_root;
iter             2430 net/ipv6/ip6_fib.c 	iter->w.state = FWS_INIT;
iter             2431 net/ipv6/ip6_fib.c 	iter->w.node = iter->w.root;
iter             2432 net/ipv6/ip6_fib.c 	iter->w.args = iter;
iter             2433 net/ipv6/ip6_fib.c 	iter->sernum = iter->w.root->fn_sernum;
iter             2434 net/ipv6/ip6_fib.c 	INIT_LIST_HEAD(&iter->w.lh);
iter             2435 net/ipv6/ip6_fib.c 	fib6_walker_link(net, &iter->w);
iter             2459 net/ipv6/ip6_fib.c static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
iter             2461 net/ipv6/ip6_fib.c 	if (iter->sernum != iter->w.root->fn_sernum) {
iter             2462 net/ipv6/ip6_fib.c 		iter->sernum = iter->w.root->fn_sernum;
iter             2463 net/ipv6/ip6_fib.c 		iter->w.state = FWS_INIT;
iter             2464 net/ipv6/ip6_fib.c 		iter->w.node = iter->w.root;
iter             2465 net/ipv6/ip6_fib.c 		WARN_ON(iter->w.skip);
iter             2466 net/ipv6/ip6_fib.c 		iter->w.skip = iter->w.count;
iter             2475 net/ipv6/ip6_fib.c 	struct ipv6_route_iter *iter = seq->private;
iter             2487 net/ipv6/ip6_fib.c 	ipv6_route_check_sernum(iter);
iter             2488 net/ipv6/ip6_fib.c 	spin_lock_bh(&iter->tbl->tb6_lock);
iter             2489 net/ipv6/ip6_fib.c 	r = fib6_walk_continue(&iter->w);
iter             2490 net/ipv6/ip6_fib.c 	spin_unlock_bh(&iter->tbl->tb6_lock);
iter             2494 net/ipv6/ip6_fib.c 		return iter->w.leaf;
iter             2496 net/ipv6/ip6_fib.c 		fib6_walker_unlink(net, &iter->w);
iter             2499 net/ipv6/ip6_fib.c 	fib6_walker_unlink(net, &iter->w);
iter             2501 net/ipv6/ip6_fib.c 	iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
iter             2502 net/ipv6/ip6_fib.c 	if (!iter->tbl)
iter             2505 net/ipv6/ip6_fib.c 	ipv6_route_seq_setup_walk(iter, net);
iter             2513 net/ipv6/ip6_fib.c 	struct ipv6_route_iter *iter = seq->private;
iter             2516 net/ipv6/ip6_fib.c 	iter->tbl = ipv6_route_seq_next_table(NULL, net);
iter             2517 net/ipv6/ip6_fib.c 	iter->skip = *pos;
iter             2519 net/ipv6/ip6_fib.c 	if (iter->tbl) {
iter             2520 net/ipv6/ip6_fib.c 		ipv6_route_seq_setup_walk(iter, net);
iter             2527 net/ipv6/ip6_fib.c static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
iter             2529 net/ipv6/ip6_fib.c 	struct fib6_walker *w = &iter->w;
iter             2537 net/ipv6/ip6_fib.c 	struct ipv6_route_iter *iter = seq->private;
iter             2539 net/ipv6/ip6_fib.c 	if (ipv6_route_iter_active(iter))
iter             2540 net/ipv6/ip6_fib.c 		fib6_walker_unlink(net, &iter->w);
iter              308 net/ipv6/ip6_gre.c 	struct ip6_tnl *iter;
iter              311 net/ipv6/ip6_gre.c 	     (iter = rtnl_dereference(*tp)) != NULL;
iter              312 net/ipv6/ip6_gre.c 	     tp = &iter->next) {
iter              313 net/ipv6/ip6_gre.c 		if (t == iter) {
iter              601 net/ipv6/ip6_output.c 		      struct ip6_fraglist_iter *iter)
iter              608 net/ipv6/ip6_output.c 	iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
iter              609 net/ipv6/ip6_output.c 	if (!iter->tmp_hdr)
iter              612 net/ipv6/ip6_output.c 	iter->frag = skb_shinfo(skb)->frag_list;
iter              615 net/ipv6/ip6_output.c 	iter->offset = 0;
iter              616 net/ipv6/ip6_output.c 	iter->hlen = hlen;
iter              617 net/ipv6/ip6_output.c 	iter->frag_id = frag_id;
iter              618 net/ipv6/ip6_output.c 	iter->nexthdr = nexthdr;
iter              624 net/ipv6/ip6_output.c 	memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
iter              641 net/ipv6/ip6_output.c 			  struct ip6_fraglist_iter *iter)
iter              643 net/ipv6/ip6_output.c 	struct sk_buff *frag = iter->frag;
iter              644 net/ipv6/ip6_output.c 	unsigned int hlen = iter->hlen;
iter              652 net/ipv6/ip6_output.c 	memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
iter              653 net/ipv6/ip6_output.c 	iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
iter              654 net/ipv6/ip6_output.c 	fh->nexthdr = iter->nexthdr;
iter              656 net/ipv6/ip6_output.c 	fh->frag_off = htons(iter->offset);
iter              659 net/ipv6/ip6_output.c 	fh->identification = iter->frag_id;
iter              820 net/ipv6/ip6_output.c 		struct ip6_fraglist_iter iter;
iter              849 net/ipv6/ip6_output.c 					&iter);
iter              856 net/ipv6/ip6_output.c 			if (iter.frag)
iter              857 net/ipv6/ip6_output.c 				ip6_fraglist_prepare(skb, &iter);
iter              865 net/ipv6/ip6_output.c 			if (err || !iter.frag)
iter              868 net/ipv6/ip6_output.c 			skb = ip6_fraglist_next(&iter);
iter              871 net/ipv6/ip6_output.c 		kfree(iter.tmp_hdr);
iter              879 net/ipv6/ip6_output.c 		kfree_skb_list(iter.frag);
iter              230 net/ipv6/ip6_tunnel.c 	struct ip6_tnl *iter;
iter              236 net/ipv6/ip6_tunnel.c 	     (iter = rtnl_dereference(*tp)) != NULL;
iter              237 net/ipv6/ip6_tunnel.c 	     tp = &iter->next) {
iter              238 net/ipv6/ip6_tunnel.c 		if (t == iter) {
iter              164 net/ipv6/ip6_vti.c 	struct ip6_tnl *iter;
iter              167 net/ipv6/ip6_vti.c 	     (iter = rtnl_dereference(*tp)) != NULL;
iter              168 net/ipv6/ip6_vti.c 	     tp = &iter->next) {
iter              169 net/ipv6/ip6_vti.c 		if (t == iter) {
iter              404 net/ipv6/ip6mr.c 	struct mr_vif_iter *iter = seq->private;
iter              412 net/ipv6/ip6mr.c 	iter->mrt = mrt;
iter              426 net/ipv6/ip6mr.c 	struct mr_vif_iter *iter = seq->private;
iter              427 net/ipv6/ip6mr.c 	struct mr_table *mrt = iter->mrt;
iter              155 net/ipv6/netfilter.c 		struct ip6_fraglist_iter iter;
iter              176 net/ipv6/netfilter.c 					&iter);
iter              184 net/ipv6/netfilter.c 			if (iter.frag)
iter              185 net/ipv6/netfilter.c 				ip6_fraglist_prepare(skb, &iter);
iter              189 net/ipv6/netfilter.c 			if (err || !iter.frag)
iter              192 net/ipv6/netfilter.c 			skb = ip6_fraglist_next(&iter);
iter              195 net/ipv6/netfilter.c 		kfree(iter.tmp_hdr);
iter              199 net/ipv6/netfilter.c 		kfree_skb_list(iter.frag);
iter              223 net/ipv6/netfilter/ip6_tables.c 	const struct ip6t_entry *iter;
iter              231 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
iter              232 net/ipv6/netfilter/ip6_tables.c 		if (get_chainname_rulenum(iter, e, hookname,
iter              682 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter              701 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, entry0, newinfo->size) {
iter              702 net/ipv6/netfilter/ip6_tables.c 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
iter              710 net/ipv6/netfilter/ip6_tables.c 			offsets[i] = (void *)iter - entry0;
iter              712 net/ipv6/netfilter/ip6_tables.c 		if (strcmp(ip6t_get_target(iter)->u.user.name,
iter              733 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, entry0, newinfo->size) {
iter              734 net/ipv6/netfilter/ip6_tables.c 		ret = find_check_entry(iter, net, repl->name, repl->size,
iter              742 net/ipv6/netfilter/ip6_tables.c 		xt_entry_foreach(iter, entry0, newinfo->size) {
iter              745 net/ipv6/netfilter/ip6_tables.c 			cleanup_entry(iter, net);
iter              760 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter              768 net/ipv6/netfilter/ip6_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
iter              773 net/ipv6/netfilter/ip6_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
iter              790 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter              795 net/ipv6/netfilter/ip6_tables.c 		xt_entry_foreach(iter, t->entries, t->size) {
iter              798 net/ipv6/netfilter/ip6_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
iter              940 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter              954 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
iter              955 net/ipv6/netfilter/ip6_tables.c 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
iter             1063 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter             1101 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
iter             1102 net/ipv6/netfilter/ip6_tables.c 		cleanup_entry(iter, net);
iter             1129 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter             1164 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
iter             1165 net/ipv6/netfilter/ip6_tables.c 		cleanup_entry(iter, net);
iter             1181 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter             1202 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, private->entries, private->size) {
iter             1205 net/ipv6/netfilter/ip6_tables.c 		tmp = xt_get_this_cpu_counter(&iter->counters);
iter             1504 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter             1539 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
iter             1540 net/ipv6/netfilter/ip6_tables.c 		cleanup_entry(iter, net);
iter             1587 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter             1595 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, private->entries, total_size) {
iter             1596 net/ipv6/netfilter/ip6_tables.c 		ret = compat_copy_entry_to_user(iter, &pos,
iter             1751 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *iter;
iter             1757 net/ipv6/netfilter/ip6_tables.c 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
iter             1758 net/ipv6/netfilter/ip6_tables.c 		cleanup_entry(iter, net);
iter             4531 net/ipv6/route.c 	struct fib6_info *iter;
iter             4536 net/ipv6/route.c 	iter = rcu_dereference_protected(fn->leaf,
iter             4538 net/ipv6/route.c 	while (iter) {
iter             4539 net/ipv6/route.c 		if (iter->fib6_metric == rt->fib6_metric &&
iter             4540 net/ipv6/route.c 		    rt6_qualify_for_ecmp(iter))
iter             4541 net/ipv6/route.c 			return iter;
iter             4542 net/ipv6/route.c 		iter = rcu_dereference_protected(iter->fib6_next,
iter             4562 net/ipv6/route.c 	struct fib6_info *iter;
iter             4568 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
iter             4569 net/ipv6/route.c 		if (!rt6_is_dead(iter))
iter             4570 net/ipv6/route.c 			total += iter->fib6_nh->fib_nh_weight;
iter             4590 net/ipv6/route.c 	struct fib6_info *iter;
iter             4595 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
iter             4596 net/ipv6/route.c 		rt6_upper_bound_set(iter, &weight, total);
iter             4657 net/ipv6/route.c 	struct fib6_info *iter;
iter             4661 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
iter             4662 net/ipv6/route.c 		if (iter->fib6_nh->fib_nh_dev == dev)
iter             4670 net/ipv6/route.c 	struct fib6_info *iter;
iter             4673 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
iter             4674 net/ipv6/route.c 		iter->should_flush = 1;
iter             4680 net/ipv6/route.c 	struct fib6_info *iter;
iter             4686 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
iter             4687 net/ipv6/route.c 		if (iter->fib6_nh->fib_nh_dev == down_dev ||
iter             4688 net/ipv6/route.c 		    iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
iter             4698 net/ipv6/route.c 	struct fib6_info *iter;
iter             4702 net/ipv6/route.c 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
iter             4703 net/ipv6/route.c 		if (iter->fib6_nh->fib_nh_dev == dev)
iter             4704 net/ipv6/route.c 			iter->fib6_nh->fib_nh_flags |= nh_flags;
iter              267 net/ipv6/seg6.c 	struct rhashtable_iter *iter;
iter              270 net/ipv6/seg6.c 	iter = (struct rhashtable_iter *)cb->args[0];
iter              272 net/ipv6/seg6.c 	if (!iter) {
iter              273 net/ipv6/seg6.c 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter              274 net/ipv6/seg6.c 		if (!iter)
iter              277 net/ipv6/seg6.c 		cb->args[0] = (long)iter;
iter              280 net/ipv6/seg6.c 	rhashtable_walk_enter(&sdata->hmac_infos, iter);
iter              287 net/ipv6/seg6.c 	struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
iter              289 net/ipv6/seg6.c 	rhashtable_walk_exit(iter);
iter              291 net/ipv6/seg6.c 	kfree(iter);
iter              298 net/ipv6/seg6.c 	struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
iter              302 net/ipv6/seg6.c 	rhashtable_walk_start(iter);
iter              305 net/ipv6/seg6.c 		hinfo = rhashtable_walk_next(iter);
iter              328 net/ipv6/seg6.c 	rhashtable_walk_stop(iter);
iter              156 net/ipv6/sit.c 	struct ip_tunnel *iter;
iter              159 net/ipv6/sit.c 	     (iter = rtnl_dereference(*tp)) != NULL;
iter              160 net/ipv6/sit.c 	     tp = &iter->next) {
iter              161 net/ipv6/sit.c 		if (t == iter) {
iter             1783 net/mac80211/chan.c 	void (*iter)(struct ieee80211_hw *hw,
iter             1794 net/mac80211/chan.c 			iter(hw, &ctx->conf, iter_data);
iter              156 net/mac80211/iface.c 	struct ieee80211_sub_if_data *iter;
iter              178 net/mac80211/iface.c 	list_for_each_entry(iter, &local->interfaces, list) {
iter              179 net/mac80211/iface.c 		if (iter == sdata)
iter              182 net/mac80211/iface.c 		if (iter->vif.type == NL80211_IFTYPE_MONITOR &&
iter              183 net/mac80211/iface.c 		    !(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE))
iter              186 net/mac80211/iface.c 		m = iter->vif.addr;
iter              881 net/mac80211/key.c 			 void (*iter)(struct ieee80211_hw *hw,
iter              898 net/mac80211/key.c 			iter(hw, &sdata->vif,
iter              905 net/mac80211/key.c 				iter(hw, &sdata->vif,
iter              916 net/mac80211/key.c 			 void (*iter)(struct ieee80211_hw *hw,
iter              932 net/mac80211/key.c 		iter(hw, &sdata->vif,
iter              940 net/mac80211/key.c 			     void (*iter)(struct ieee80211_hw *hw,
iter              952 net/mac80211/key.c 		_ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
iter              955 net/mac80211/key.c 			_ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
iter             3783 net/mac80211/tx.c 	struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
iter             3795 net/mac80211/tx.c 	list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
iter             3797 net/mac80211/tx.c 		if (iter == txqi)
iter             3800 net/mac80211/tx.c 		if (!iter->txq.sta) {
iter             3801 net/mac80211/tx.c 			list_move_tail(&iter->schedule_order,
iter             3805 net/mac80211/tx.c 		sta = container_of(iter->txq.sta, struct sta_info, sta);
iter             3808 net/mac80211/tx.c 		list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
iter             1016 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_iter_state *iter = seq->private;
iter             1024 net/netfilter/ipvs/ip_vs_conn.c 				iter->l = &ip_vs_conn_tab[idx];
iter             1037 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_iter_state *iter = seq->private;
iter             1039 net/netfilter/ipvs/ip_vs_conn.c 	iter->l = NULL;
iter             1047 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_iter_state *iter = seq->private;
iter             1049 net/netfilter/ipvs/ip_vs_conn.c 	struct hlist_head *l = iter->l;
iter             1064 net/netfilter/ipvs/ip_vs_conn.c 			iter->l = &ip_vs_conn_tab[idx];
iter             1069 net/netfilter/ipvs/ip_vs_conn.c 	iter->l = NULL;
iter             2023 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_iter *iter = seq->private;
iter             2031 net/netfilter/ipvs/ip_vs_ctl.c 				iter->table = ip_vs_svc_table;
iter             2032 net/netfilter/ipvs/ip_vs_ctl.c 				iter->bucket = idx;
iter             2043 net/netfilter/ipvs/ip_vs_ctl.c 				iter->table = ip_vs_svc_fwm_table;
iter             2044 net/netfilter/ipvs/ip_vs_ctl.c 				iter->bucket = idx;
iter             2064 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_iter *iter;
iter             2072 net/netfilter/ipvs/ip_vs_ctl.c 	iter = seq->private;
iter             2074 net/netfilter/ipvs/ip_vs_ctl.c 	if (iter->table == ip_vs_svc_table) {
iter             2080 net/netfilter/ipvs/ip_vs_ctl.c 		while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
iter             2082 net/netfilter/ipvs/ip_vs_ctl.c 						 &ip_vs_svc_table[iter->bucket],
iter             2088 net/netfilter/ipvs/ip_vs_ctl.c 		iter->table = ip_vs_svc_fwm_table;
iter             2089 net/netfilter/ipvs/ip_vs_ctl.c 		iter->bucket = -1;
iter             2099 net/netfilter/ipvs/ip_vs_ctl.c 	while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
iter             2101 net/netfilter/ipvs/ip_vs_ctl.c 					 &ip_vs_svc_fwm_table[iter->bucket],
iter             2130 net/netfilter/ipvs/ip_vs_ctl.c 		const struct ip_vs_iter *iter = seq->private;
iter             2137 net/netfilter/ipvs/ip_vs_ctl.c 		if (iter->table == ip_vs_svc_table) {
iter             2057 net/netfilter/nf_conntrack_core.c get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
iter             2074 net/netfilter/nf_conntrack_core.c 				if (iter(ct, data))
iter             2091 net/netfilter/nf_conntrack_core.c static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
iter             2102 net/netfilter/nf_conntrack_core.c 		while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
iter             2117 net/netfilter/nf_conntrack_core.c 	int (*iter)(struct nf_conn *i, void *data);
iter             2129 net/netfilter/nf_conntrack_core.c 	return d->iter(i, d->data);
iter             2173 net/netfilter/nf_conntrack_core.c 			       int (*iter)(struct nf_conn *i, void *data),
iter             2183 net/netfilter/nf_conntrack_core.c 	d.iter = iter;
iter             2203 net/netfilter/nf_conntrack_core.c nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
iter             2229 net/netfilter/nf_conntrack_core.c 	nf_ct_iterate_cleanup(iter, data, 0, 0);
iter              490 net/netfilter/nf_conntrack_expect.c void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
iter              503 net/netfilter/nf_conntrack_expect.c 			if (iter(exp, data) && del_timer(&exp->timeout)) {
iter              515 net/netfilter/nf_conntrack_expect.c 			      bool (*iter)(struct nf_conntrack_expect *e, void *data),
iter              533 net/netfilter/nf_conntrack_expect.c 			if (iter(exp, data) && del_timer(&exp->timeout)) {
iter              295 net/netfilter/nf_flow_table_core.c 		      void (*iter)(struct flow_offload *flow, void *data),
iter              319 net/netfilter/nf_flow_table_core.c 		iter(flow, data);
iter             3831 net/netfilter/nf_tables_api.c 					const struct nft_set_iter *iter,
iter             3848 net/netfilter/nf_tables_api.c 	struct nft_set_iter iter;
iter             3866 net/netfilter/nf_tables_api.c 		iter.genmask	= nft_genmask_next(ctx->net);
iter             3867 net/netfilter/nf_tables_api.c 		iter.skip 	= 0;
iter             3868 net/netfilter/nf_tables_api.c 		iter.count	= 0;
iter             3869 net/netfilter/nf_tables_api.c 		iter.err	= 0;
iter             3870 net/netfilter/nf_tables_api.c 		iter.fn		= nf_tables_bind_check_setelem;
iter             3872 net/netfilter/nf_tables_api.c 		set->ops->walk(ctx, set, &iter);
iter             3873 net/netfilter/nf_tables_api.c 		if (iter.err < 0)
iter             3874 net/netfilter/nf_tables_api.c 			return iter.err;
iter             4082 net/netfilter/nf_tables_api.c 	struct nft_set_iter		iter;
iter             4088 net/netfilter/nf_tables_api.c 				  const struct nft_set_iter *iter,
iter             4093 net/netfilter/nf_tables_api.c 	args = container_of(iter, struct nft_set_dump_args, iter);
iter             4164 net/netfilter/nf_tables_api.c 	args.iter.genmask	= nft_genmask_cur(net);
iter             4165 net/netfilter/nf_tables_api.c 	args.iter.skip		= cb->args[0];
iter             4166 net/netfilter/nf_tables_api.c 	args.iter.count		= 0;
iter             4167 net/netfilter/nf_tables_api.c 	args.iter.err		= 0;
iter             4168 net/netfilter/nf_tables_api.c 	args.iter.fn		= nf_tables_dump_setelem;
iter             4169 net/netfilter/nf_tables_api.c 	set->ops->walk(&dump_ctx->ctx, set, &args.iter);
iter             4175 net/netfilter/nf_tables_api.c 	if (args.iter.err && args.iter.err != -EMSGSIZE)
iter             4176 net/netfilter/nf_tables_api.c 		return args.iter.err;
iter             4177 net/netfilter/nf_tables_api.c 	if (args.iter.count == cb->args[0])
iter             4180 net/netfilter/nf_tables_api.c 	cb->args[0] = args.iter.count;
iter             4892 net/netfilter/nf_tables_api.c 			 const struct nft_set_iter *iter,
iter             4943 net/netfilter/nf_tables_api.c 		struct nft_set_iter iter = {
iter             4947 net/netfilter/nf_tables_api.c 		set->ops->walk(&ctx, set, &iter);
iter             4949 net/netfilter/nf_tables_api.c 		return iter.err;
iter             7230 net/netfilter/nf_tables_api.c 					const struct nft_set_iter *iter,
iter             7257 net/netfilter/nf_tables_api.c 	struct nft_set_iter iter;
iter             7301 net/netfilter/nf_tables_api.c 			iter.genmask	= nft_genmask_next(ctx->net);
iter             7302 net/netfilter/nf_tables_api.c 			iter.skip 	= 0;
iter             7303 net/netfilter/nf_tables_api.c 			iter.count	= 0;
iter             7304 net/netfilter/nf_tables_api.c 			iter.err	= 0;
iter             7305 net/netfilter/nf_tables_api.c 			iter.fn		= nf_tables_loop_check_setelem;
iter             7307 net/netfilter/nf_tables_api.c 			set->ops->walk(ctx, set, &iter);
iter             7308 net/netfilter/nf_tables_api.c 			if (iter.err < 0)
iter             7309 net/netfilter/nf_tables_api.c 				return iter.err;
iter              165 net/netfilter/nft_lookup.c 				       const struct nft_set_iter *iter,
iter              199 net/netfilter/nft_lookup.c 	struct nft_set_iter iter;
iter              205 net/netfilter/nft_lookup.c 	iter.genmask	= nft_genmask_next(ctx->net);
iter              206 net/netfilter/nft_lookup.c 	iter.skip	= 0;
iter              207 net/netfilter/nft_lookup.c 	iter.count	= 0;
iter              208 net/netfilter/nft_lookup.c 	iter.err	= 0;
iter              209 net/netfilter/nft_lookup.c 	iter.fn		= nft_lookup_validate_setelem;
iter              211 net/netfilter/nft_lookup.c 	priv->set->ops->walk(ctx, priv->set, &iter);
iter              212 net/netfilter/nft_lookup.c 	if (iter.err < 0)
iter              213 net/netfilter/nft_lookup.c 		return iter.err;
iter              216 net/netfilter/nft_set_bitmap.c 			    struct nft_set_iter *iter)
iter              223 net/netfilter/nft_set_bitmap.c 		if (iter->count < iter->skip)
iter              225 net/netfilter/nft_set_bitmap.c 		if (!nft_set_elem_active(&be->ext, iter->genmask))
iter              230 net/netfilter/nft_set_bitmap.c 		iter->err = iter->fn(ctx, set, iter, &elem);
iter              232 net/netfilter/nft_set_bitmap.c 		if (iter->err < 0)
iter              235 net/netfilter/nft_set_bitmap.c 		iter->count++;
iter              256 net/netfilter/nft_set_hash.c 			   struct nft_set_iter *iter)
iter              269 net/netfilter/nft_set_hash.c 				iter->err = PTR_ERR(he);
iter              276 net/netfilter/nft_set_hash.c 		if (iter->count < iter->skip)
iter              280 net/netfilter/nft_set_hash.c 		if (!nft_set_elem_active(&he->ext, iter->genmask))
iter              285 net/netfilter/nft_set_hash.c 		iter->err = iter->fn(ctx, set, iter, &elem);
iter              286 net/netfilter/nft_set_hash.c 		if (iter->err < 0)
iter              290 net/netfilter/nft_set_hash.c 		iter->count++;
iter              570 net/netfilter/nft_set_hash.c 			  struct nft_set_iter *iter)
iter              579 net/netfilter/nft_set_hash.c 			if (iter->count < iter->skip)
iter              581 net/netfilter/nft_set_hash.c 			if (!nft_set_elem_active(&he->ext, iter->genmask))
iter              586 net/netfilter/nft_set_hash.c 			iter->err = iter->fn(ctx, set, iter, &elem);
iter              587 net/netfilter/nft_set_hash.c 			if (iter->err < 0)
iter              590 net/netfilter/nft_set_hash.c 			iter->count++;
iter              354 net/netfilter/nft_set_rbtree.c 			    struct nft_set_iter *iter)
iter              365 net/netfilter/nft_set_rbtree.c 		if (iter->count < iter->skip)
iter              369 net/netfilter/nft_set_rbtree.c 		if (!nft_set_elem_active(&rbe->ext, iter->genmask))
iter              374 net/netfilter/nft_set_rbtree.c 		iter->err = iter->fn(ctx, set, iter, &elem);
iter              375 net/netfilter/nft_set_rbtree.c 		if (iter->err < 0) {
iter              380 net/netfilter/nft_set_rbtree.c 		iter->count++;
iter               49 net/netlabel/netlabel_addrlist.c 	struct netlbl_af4list *iter;
iter               51 net/netlabel/netlabel_addrlist.c 	list_for_each_entry_rcu(iter, head, list)
iter               52 net/netlabel/netlabel_addrlist.c 		if (iter->valid && (addr & iter->mask) == iter->addr)
iter               53 net/netlabel/netlabel_addrlist.c 			return iter;
iter               74 net/netlabel/netlabel_addrlist.c 	struct netlbl_af4list *iter;
iter               76 net/netlabel/netlabel_addrlist.c 	list_for_each_entry_rcu(iter, head, list)
iter               77 net/netlabel/netlabel_addrlist.c 		if (iter->valid && iter->addr == addr && iter->mask == mask)
iter               78 net/netlabel/netlabel_addrlist.c 			return iter;
iter               99 net/netlabel/netlabel_addrlist.c 	struct netlbl_af6list *iter;
iter              101 net/netlabel/netlabel_addrlist.c 	list_for_each_entry_rcu(iter, head, list)
iter              102 net/netlabel/netlabel_addrlist.c 		if (iter->valid &&
iter              103 net/netlabel/netlabel_addrlist.c 		    ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0)
iter              104 net/netlabel/netlabel_addrlist.c 			return iter;
iter              125 net/netlabel/netlabel_addrlist.c 	struct netlbl_af6list *iter;
iter              127 net/netlabel/netlabel_addrlist.c 	list_for_each_entry_rcu(iter, head, list)
iter              128 net/netlabel/netlabel_addrlist.c 		if (iter->valid &&
iter              129 net/netlabel/netlabel_addrlist.c 		    ipv6_addr_equal(&iter->addr, addr) &&
iter              130 net/netlabel/netlabel_addrlist.c 		    ipv6_addr_equal(&iter->mask, mask))
iter              131 net/netlabel/netlabel_addrlist.c 			return iter;
iter              150 net/netlabel/netlabel_addrlist.c 	struct netlbl_af4list *iter;
iter              152 net/netlabel/netlabel_addrlist.c 	iter = netlbl_af4list_search(entry->addr, head);
iter              153 net/netlabel/netlabel_addrlist.c 	if (iter != NULL &&
iter              154 net/netlabel/netlabel_addrlist.c 	    iter->addr == entry->addr && iter->mask == entry->mask)
iter              161 net/netlabel/netlabel_addrlist.c 	list_for_each_entry_rcu(iter, head, list)
iter              162 net/netlabel/netlabel_addrlist.c 		if (iter->valid &&
iter              163 net/netlabel/netlabel_addrlist.c 		    ntohl(entry->mask) > ntohl(iter->mask)) {
iter              165 net/netlabel/netlabel_addrlist.c 				       iter->list.prev,
iter              166 net/netlabel/netlabel_addrlist.c 				       &iter->list);
iter              187 net/netlabel/netlabel_addrlist.c 	struct netlbl_af6list *iter;
iter              189 net/netlabel/netlabel_addrlist.c 	iter = netlbl_af6list_search(&entry->addr, head);
iter              190 net/netlabel/netlabel_addrlist.c 	if (iter != NULL &&
iter              191 net/netlabel/netlabel_addrlist.c 	    ipv6_addr_equal(&iter->addr, &entry->addr) &&
iter              192 net/netlabel/netlabel_addrlist.c 	    ipv6_addr_equal(&iter->mask, &entry->mask))
iter              199 net/netlabel/netlabel_addrlist.c 	list_for_each_entry_rcu(iter, head, list)
iter              200 net/netlabel/netlabel_addrlist.c 		if (iter->valid &&
iter              201 net/netlabel/netlabel_addrlist.c 		    ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) {
iter              203 net/netlabel/netlabel_addrlist.c 				       iter->list.prev,
iter              204 net/netlabel/netlabel_addrlist.c 				       &iter->list);
iter              357 net/netlabel/netlabel_addrlist.c 		int iter = -1;
iter              358 net/netlabel/netlabel_addrlist.c 		while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff)
iter              360 net/netlabel/netlabel_addrlist.c 		mask_val = ntohl(mask->s6_addr32[iter]);
iter               82 net/netlabel/netlabel_addrlist.h #define netlbl_af4list_foreach(iter, head)				\
iter               83 net/netlabel/netlabel_addrlist.h 	for (iter = __af4list_valid((head)->next, head);		\
iter               84 net/netlabel/netlabel_addrlist.h 	     &iter->list != (head);					\
iter               85 net/netlabel/netlabel_addrlist.h 	     iter = __af4list_valid(iter->list.next, head))
iter               87 net/netlabel/netlabel_addrlist.h #define netlbl_af4list_foreach_rcu(iter, head)				\
iter               88 net/netlabel/netlabel_addrlist.h 	for (iter = __af4list_valid_rcu((head)->next, head);		\
iter               89 net/netlabel/netlabel_addrlist.h 	     &iter->list != (head);					\
iter               90 net/netlabel/netlabel_addrlist.h 	     iter = __af4list_valid_rcu(iter->list.next, head))
iter               92 net/netlabel/netlabel_addrlist.h #define netlbl_af4list_foreach_safe(iter, tmp, head)			\
iter               93 net/netlabel/netlabel_addrlist.h 	for (iter = __af4list_valid((head)->next, head),		\
iter               94 net/netlabel/netlabel_addrlist.h 		     tmp = __af4list_valid(iter->list.next, head);	\
iter               95 net/netlabel/netlabel_addrlist.h 	     &iter->list != (head);					\
iter               96 net/netlabel/netlabel_addrlist.h 	     iter = tmp, tmp = __af4list_valid(iter->list.next, head))
iter              149 net/netlabel/netlabel_addrlist.h #define netlbl_af6list_foreach(iter, head)				\
iter              150 net/netlabel/netlabel_addrlist.h 	for (iter = __af6list_valid((head)->next, head);		\
iter              151 net/netlabel/netlabel_addrlist.h 	     &iter->list != (head);					\
iter              152 net/netlabel/netlabel_addrlist.h 	     iter = __af6list_valid(iter->list.next, head))
iter              154 net/netlabel/netlabel_addrlist.h #define netlbl_af6list_foreach_rcu(iter, head)				\
iter              155 net/netlabel/netlabel_addrlist.h 	for (iter = __af6list_valid_rcu((head)->next, head);		\
iter              156 net/netlabel/netlabel_addrlist.h 	     &iter->list != (head);					\
iter              157 net/netlabel/netlabel_addrlist.h 	     iter = __af6list_valid_rcu(iter->list.next, head))
iter              159 net/netlabel/netlabel_addrlist.h #define netlbl_af6list_foreach_safe(iter, tmp, head)			\
iter              160 net/netlabel/netlabel_addrlist.h 	for (iter = __af6list_valid((head)->next, head),		\
iter              161 net/netlabel/netlabel_addrlist.h 		     tmp = __af6list_valid(iter->list.next, head);	\
iter              162 net/netlabel/netlabel_addrlist.h 	     &iter->list != (head);					\
iter              163 net/netlabel/netlabel_addrlist.h 	     iter = tmp, tmp = __af6list_valid(iter->list.next, head))
iter               84 net/netlabel/netlabel_cipso_v4.c 	u32 iter = 0;
iter               96 net/netlabel/netlabel_cipso_v4.c 			if (iter >= CIPSO_V4_TAG_MAXCNT)
iter               98 net/netlabel/netlabel_cipso_v4.c 			doi_def->tags[iter++] = nla_get_u8(nla);
iter              100 net/netlabel/netlabel_cipso_v4.c 	while (iter < CIPSO_V4_TAG_MAXCNT)
iter              101 net/netlabel/netlabel_cipso_v4.c 		doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID;
iter              130 net/netlabel/netlabel_cipso_v4.c 	u32 iter;
iter              202 net/netlabel/netlabel_cipso_v4.c 	for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++)
iter              203 net/netlabel/netlabel_cipso_v4.c 		doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL;
iter              204 net/netlabel/netlabel_cipso_v4.c 	for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++)
iter              205 net/netlabel/netlabel_cipso_v4.c 		doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL;
iter              279 net/netlabel/netlabel_cipso_v4.c 		for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++)
iter              280 net/netlabel/netlabel_cipso_v4.c 			doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT;
iter              281 net/netlabel/netlabel_cipso_v4.c 		for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++)
iter              282 net/netlabel/netlabel_cipso_v4.c 			doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT;
iter              458 net/netlabel/netlabel_cipso_v4.c 	u32 iter;
iter              496 net/netlabel/netlabel_cipso_v4.c 	for (iter = 0;
iter              497 net/netlabel/netlabel_cipso_v4.c 	     iter < CIPSO_V4_TAG_MAXCNT &&
iter              498 net/netlabel/netlabel_cipso_v4.c 	       doi_def->tags[iter] != CIPSO_V4_TAG_INVALID;
iter              499 net/netlabel/netlabel_cipso_v4.c 	     iter++) {
iter              502 net/netlabel/netlabel_cipso_v4.c 				     doi_def->tags[iter]);
iter              516 net/netlabel/netlabel_cipso_v4.c 		for (iter = 0;
iter              517 net/netlabel/netlabel_cipso_v4.c 		     iter < doi_def->map.std->lvl.local_size;
iter              518 net/netlabel/netlabel_cipso_v4.c 		     iter++) {
iter              519 net/netlabel/netlabel_cipso_v4.c 			if (doi_def->map.std->lvl.local[iter] ==
iter              531 net/netlabel/netlabel_cipso_v4.c 					      iter);
iter              536 net/netlabel/netlabel_cipso_v4.c 					    doi_def->map.std->lvl.local[iter]);
iter              549 net/netlabel/netlabel_cipso_v4.c 		for (iter = 0;
iter              550 net/netlabel/netlabel_cipso_v4.c 		     iter < doi_def->map.std->cat.local_size;
iter              551 net/netlabel/netlabel_cipso_v4.c 		     iter++) {
iter              552 net/netlabel/netlabel_cipso_v4.c 			if (doi_def->map.std->cat.local[iter] ==
iter              564 net/netlabel/netlabel_cipso_v4.c 					      iter);
iter              569 net/netlabel/netlabel_cipso_v4.c 					    doi_def->map.std->cat.local[iter]);
iter              106 net/netlabel/netlabel_domainhash.c 	u32 iter;
iter              113 net/netlabel/netlabel_domainhash.c 	for (iter = 0, val = 0, len = strlen(key); iter < len; iter++)
iter              114 net/netlabel/netlabel_domainhash.c 		val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter];
iter              141 net/netlabel/netlabel_domainhash.c 	struct netlbl_dom_map *iter;
iter              146 net/netlabel/netlabel_domainhash.c 		list_for_each_entry_rcu(iter, bkt_list, list)
iter              147 net/netlabel/netlabel_domainhash.c 			if (iter->valid &&
iter              148 net/netlabel/netlabel_domainhash.c 			    netlbl_family_match(iter->family, family) &&
iter              149 net/netlabel/netlabel_domainhash.c 			    strcmp(iter->domain, domain) == 0)
iter              150 net/netlabel/netlabel_domainhash.c 				return iter;
iter              362 net/netlabel/netlabel_domainhash.c 	u32 iter;
iter              379 net/netlabel/netlabel_domainhash.c 	for (iter = 0; iter < hsh_tbl->size; iter++)
iter              380 net/netlabel/netlabel_domainhash.c 		INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
iter               43 net/netlabel/netlabel_domainhash.h #define netlbl_domhsh_addr4_entry(iter) \
iter               44 net/netlabel/netlabel_domainhash.h 	container_of(iter, struct netlbl_domaddr4_map, list)
iter               50 net/netlabel/netlabel_domainhash.h #define netlbl_domhsh_addr6_entry(iter) \
iter               51 net/netlabel/netlabel_domainhash.h 	container_of(iter, struct netlbl_domaddr6_map, list)
iter              559 net/netlabel/netlabel_kapi.c 	struct netlbl_lsm_catmap *iter = *catmap;
iter              562 net/netlabel/netlabel_kapi.c 	if (iter == NULL)
iter              564 net/netlabel/netlabel_kapi.c 	if (offset < iter->startbit)
iter              566 net/netlabel/netlabel_kapi.c 	while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
iter              567 net/netlabel/netlabel_kapi.c 		prev = iter;
iter              568 net/netlabel/netlabel_kapi.c 		iter = iter->next;
iter              570 net/netlabel/netlabel_kapi.c 	if (iter == NULL || offset < iter->startbit)
iter              573 net/netlabel/netlabel_kapi.c 	return iter;
iter              577 net/netlabel/netlabel_kapi.c 		return iter;
iter              582 net/netlabel/netlabel_kapi.c 	iter = netlbl_catmap_alloc(gfp_flags);
iter              583 net/netlabel/netlabel_kapi.c 	if (iter == NULL)
iter              585 net/netlabel/netlabel_kapi.c 	iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1);
iter              588 net/netlabel/netlabel_kapi.c 		iter->next = *catmap;
iter              589 net/netlabel/netlabel_kapi.c 		*catmap = iter;
iter              591 net/netlabel/netlabel_kapi.c 		iter->next = prev->next;
iter              592 net/netlabel/netlabel_kapi.c 		prev->next = iter;
iter              595 net/netlabel/netlabel_kapi.c 	return iter;
iter              610 net/netlabel/netlabel_kapi.c 	struct netlbl_lsm_catmap *iter;
iter              615 net/netlabel/netlabel_kapi.c 	iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
iter              616 net/netlabel/netlabel_kapi.c 	if (iter == NULL)
iter              618 net/netlabel/netlabel_kapi.c 	if (offset > iter->startbit) {
iter              619 net/netlabel/netlabel_kapi.c 		offset -= iter->startbit;
iter              626 net/netlabel/netlabel_kapi.c 	bitmap = iter->bitmap[idx] >> bit;
iter              634 net/netlabel/netlabel_kapi.c 			return iter->startbit +
iter              638 net/netlabel/netlabel_kapi.c 			if (iter->next != NULL) {
iter              639 net/netlabel/netlabel_kapi.c 				iter = iter->next;
iter              644 net/netlabel/netlabel_kapi.c 		bitmap = iter->bitmap[idx];
iter              665 net/netlabel/netlabel_kapi.c 	struct netlbl_lsm_catmap *iter;
iter              672 net/netlabel/netlabel_kapi.c 	iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
iter              673 net/netlabel/netlabel_kapi.c 	if (iter == NULL)
iter              675 net/netlabel/netlabel_kapi.c 	if (offset > iter->startbit) {
iter              676 net/netlabel/netlabel_kapi.c 		offset -= iter->startbit;
iter              686 net/netlabel/netlabel_kapi.c 		bitmap = iter->bitmap[idx];
iter              695 net/netlabel/netlabel_kapi.c 			return iter->startbit +
iter              698 net/netlabel/netlabel_kapi.c 			if (iter->next == NULL)
iter              699 net/netlabel/netlabel_kapi.c 				return iter->startbit + NETLBL_CATMAP_SIZE - 1;
iter              700 net/netlabel/netlabel_kapi.c 			prev = iter;
iter              701 net/netlabel/netlabel_kapi.c 			iter = iter->next;
iter              729 net/netlabel/netlabel_kapi.c 	struct netlbl_lsm_catmap *iter;
iter              747 net/netlabel/netlabel_kapi.c 	iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_WALK, 0);
iter              748 net/netlabel/netlabel_kapi.c 	if (iter == NULL) {
iter              753 net/netlabel/netlabel_kapi.c 	if (off < iter->startbit) {
iter              754 net/netlabel/netlabel_kapi.c 		*offset = iter->startbit;
iter              757 net/netlabel/netlabel_kapi.c 		off -= iter->startbit;
iter              759 net/netlabel/netlabel_kapi.c 	*bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_MAPSIZE);
iter              779 net/netlabel/netlabel_kapi.c 	struct netlbl_lsm_catmap *iter;
iter              782 net/netlabel/netlabel_kapi.c 	iter = _netlbl_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags);
iter              783 net/netlabel/netlabel_kapi.c 	if (iter == NULL)
iter              786 net/netlabel/netlabel_kapi.c 	bit -= iter->startbit;
iter              788 net/netlabel/netlabel_kapi.c 	iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE);
iter              847 net/netlabel/netlabel_kapi.c 	struct netlbl_lsm_catmap *iter;
iter              854 net/netlabel/netlabel_kapi.c 	iter = _netlbl_catmap_getnode(catmap, offset, _CM_F_ALLOC, flags);
iter              855 net/netlabel/netlabel_kapi.c 	if (iter == NULL)
iter              858 net/netlabel/netlabel_kapi.c 	offset -= iter->startbit;
iter              860 net/netlabel/netlabel_kapi.c 	iter->bitmap[idx] |= bitmap << (offset % NETLBL_CATMAP_MAPSIZE);
iter               66 net/netlabel/netlabel_unlabeled.c #define netlbl_unlhsh_addr4_entry(iter) \
iter               67 net/netlabel/netlabel_unlabeled.c 	container_of(iter, struct netlbl_unlhsh_addr4, list)
iter               74 net/netlabel/netlabel_unlabeled.c #define netlbl_unlhsh_addr6_entry(iter) \
iter               75 net/netlabel/netlabel_unlabeled.c 	container_of(iter, struct netlbl_unlhsh_addr6, list)
iter              206 net/netlabel/netlabel_unlabeled.c 	struct netlbl_unlhsh_iface *iter;
iter              210 net/netlabel/netlabel_unlabeled.c 	list_for_each_entry_rcu(iter, bkt_list, list)
iter              211 net/netlabel/netlabel_unlabeled.c 		if (iter->valid && iter->ifindex == ifindex)
iter              212 net/netlabel/netlabel_unlabeled.c 			return iter;
iter             1411 net/netlabel/netlabel_unlabeled.c 	u32 iter;
iter             1428 net/netlabel/netlabel_unlabeled.c 	for (iter = 0; iter < hsh_tbl->size; iter++)
iter             1429 net/netlabel/netlabel_unlabeled.c 		INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
iter             2543 net/netlink/af_netlink.c static void netlink_walk_start(struct nl_seq_iter *iter)
iter             2545 net/netlink/af_netlink.c 	rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
iter             2546 net/netlink/af_netlink.c 	rhashtable_walk_start(&iter->hti);
iter             2549 net/netlink/af_netlink.c static void netlink_walk_stop(struct nl_seq_iter *iter)
iter             2551 net/netlink/af_netlink.c 	rhashtable_walk_stop(&iter->hti);
iter             2552 net/netlink/af_netlink.c 	rhashtable_walk_exit(&iter->hti);
iter             2557 net/netlink/af_netlink.c 	struct nl_seq_iter *iter = seq->private;
iter             2562 net/netlink/af_netlink.c 			nlk = rhashtable_walk_next(&iter->hti);
iter             2574 net/netlink/af_netlink.c 			netlink_walk_stop(iter);
iter             2575 net/netlink/af_netlink.c 			if (++iter->link >= MAX_LINKS)
iter             2578 net/netlink/af_netlink.c 			netlink_walk_start(iter);
iter             2587 net/netlink/af_netlink.c 	struct nl_seq_iter *iter = seq->private;
iter             2591 net/netlink/af_netlink.c 	iter->link = 0;
iter             2593 net/netlink/af_netlink.c 	netlink_walk_start(iter);
iter             2609 net/netlink/af_netlink.c 	struct nl_seq_iter *iter = seq->private;
iter             2611 net/netlink/af_netlink.c 	if (iter->link >= MAX_LINKS)
iter             2614 net/netlink/af_netlink.c 	netlink_walk_stop(iter);
iter              604 net/nfc/netlink.c 	struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
iter              608 net/nfc/netlink.c 	if (!iter) {
iter              610 net/nfc/netlink.c 		iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
iter              611 net/nfc/netlink.c 		if (!iter)
iter              613 net/nfc/netlink.c 		cb->args[0] = (long) iter;
iter              621 net/nfc/netlink.c 		nfc_device_iter_init(iter);
iter              622 net/nfc/netlink.c 		dev = nfc_device_iter_next(iter);
iter              633 net/nfc/netlink.c 		dev = nfc_device_iter_next(iter);
iter              645 net/nfc/netlink.c 	struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
iter              647 net/nfc/netlink.c 	nfc_device_iter_exit(iter);
iter              648 net/nfc/netlink.c 	kfree(iter);
iter             1359 net/nfc/netlink.c 	struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
iter             1363 net/nfc/netlink.c 	if (!iter) {
iter             1365 net/nfc/netlink.c 		iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
iter             1366 net/nfc/netlink.c 		if (!iter)
iter             1368 net/nfc/netlink.c 		cb->args[0] = (long) iter;
iter             1376 net/nfc/netlink.c 		nfc_device_iter_init(iter);
iter             1377 net/nfc/netlink.c 		dev = nfc_device_iter_next(iter);
iter             1388 net/nfc/netlink.c 		dev = nfc_device_iter_next(iter);
iter             1400 net/nfc/netlink.c 	struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
iter             1402 net/nfc/netlink.c 	nfc_device_iter_exit(iter);
iter             1403 net/nfc/netlink.c 	kfree(iter);
iter             1786 net/nfc/netlink.c 	struct class_dev_iter iter;
iter             1793 net/nfc/netlink.c 	nfc_device_iter_init(&iter);
iter             1794 net/nfc/netlink.c 	dev = nfc_device_iter_next(&iter);
iter             1806 net/nfc/netlink.c 		dev = nfc_device_iter_next(&iter);
iter             1809 net/nfc/netlink.c 	nfc_device_iter_exit(&iter);
iter              105 net/nfc/nfc.h  static inline void nfc_device_iter_init(struct class_dev_iter *iter)
iter              107 net/nfc/nfc.h  	class_dev_iter_init(iter, &nfc_class, NULL, NULL);
iter              110 net/nfc/nfc.h  static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter)
iter              112 net/nfc/nfc.h  	struct device *d = class_dev_iter_next(iter);
iter              119 net/nfc/nfc.h  static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
iter              121 net/nfc/nfc.h  	class_dev_iter_exit(iter);
iter              732 net/rds/af_rds.c 			      struct rds_info_iterator *iter,
iter              754 net/rds/af_rds.c 				rds_inc_info_copy(inc, iter,
iter              771 net/rds/af_rds.c 			       struct rds_info_iterator *iter,
iter              788 net/rds/af_rds.c 				rds6_inc_info_copy(inc, iter, &inc->i_saddr,
iter              803 net/rds/af_rds.c 			  struct rds_info_iterator *iter,
iter              831 net/rds/af_rds.c 		rds_info_copy(iter, &sinfo, sizeof(sinfo));
iter              844 net/rds/af_rds.c 			   struct rds_info_iterator *iter,
iter              866 net/rds/af_rds.c 		rds_info_copy(iter, &sinfo6, sizeof(sinfo6));
iter              510 net/rds/connection.c 			     struct rds_info_iterator *iter,
iter              515 net/rds/connection.c 		rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
iter              518 net/rds/connection.c 		rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
iter              523 net/rds/connection.c 				      struct rds_info_iterator *iter,
iter              569 net/rds/connection.c 								 iter,
iter              589 net/rds/connection.c 				  struct rds_info_iterator *iter,
iter              593 net/rds/connection.c 	rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
iter              598 net/rds/connection.c 				   struct rds_info_iterator *iter,
iter              602 net/rds/connection.c 	rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
iter              607 net/rds/connection.c 				       struct rds_info_iterator *iter,
iter              610 net/rds/connection.c 	rds_conn_message_info(sock, len, iter, lens, 1);
iter              615 net/rds/connection.c 					struct rds_info_iterator *iter,
iter              618 net/rds/connection.c 	rds6_conn_message_info(sock, len, iter, lens, 1);
iter              624 net/rds/connection.c 					  struct rds_info_iterator *iter,
iter              627 net/rds/connection.c 	rds_conn_message_info(sock, len, iter, lens, 0);
iter              633 net/rds/connection.c 					   struct rds_info_iterator *iter,
iter              636 net/rds/connection.c 	rds6_conn_message_info(sock, len, iter, lens, 0);
iter              641 net/rds/connection.c 			  struct rds_info_iterator *iter,
iter              668 net/rds/connection.c 				rds_info_copy(iter, buffer, item_len);
iter              679 net/rds/connection.c 				    struct rds_info_iterator *iter,
iter              718 net/rds/connection.c 				rds_info_copy(iter, buffer, item_len);
iter              787 net/rds/connection.c 			  struct rds_info_iterator *iter,
iter              792 net/rds/connection.c 	rds_walk_conn_path_info(sock, len, iter, lens,
iter              800 net/rds/connection.c 			   struct rds_info_iterator *iter,
iter              805 net/rds/connection.c 	rds_walk_conn_path_info(sock, len, iter, lens,
iter              366 net/rds/ib.c   			   struct rds_info_iterator *iter,
iter              371 net/rds/ib.c   	rds_for_each_conn_info(sock, len, iter, lens,
iter              380 net/rds/ib.c   			    struct rds_info_iterator *iter,
iter              385 net/rds/ib.c   	rds_for_each_conn_info(sock, len, iter, lens,
iter              448 net/rds/ib.h   unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
iter              436 net/rds/ib_rdma.c 	int iter = 0;
iter              448 net/rds/ib_rdma.c 		if (++iter > 2) {
iter               84 net/rds/ib_stats.c unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
iter              103 net/rds/ib_stats.c 	rds_stats_info_copy(iter, (uint64_t *)&stats, rds_ib_stat_names,
iter              104 net/rds/info.c void rds_info_iter_unmap(struct rds_info_iterator *iter)
iter              106 net/rds/info.c 	if (iter->addr) {
iter              107 net/rds/info.c 		kunmap_atomic(iter->addr);
iter              108 net/rds/info.c 		iter->addr = NULL;
iter              115 net/rds/info.c void rds_info_copy(struct rds_info_iterator *iter, void *data,
iter              121 net/rds/info.c 		if (!iter->addr)
iter              122 net/rds/info.c 			iter->addr = kmap_atomic(*iter->pages);
iter              124 net/rds/info.c 		this = min(bytes, PAGE_SIZE - iter->offset);
iter              127 net/rds/info.c 			  "bytes %lu\n", *iter->pages, iter->addr,
iter              128 net/rds/info.c 			  iter->offset, this, data, bytes);
iter              130 net/rds/info.c 		memcpy(iter->addr + iter->offset, data, this);
iter              134 net/rds/info.c 		iter->offset += this;
iter              136 net/rds/info.c 		if (iter->offset == PAGE_SIZE) {
iter              137 net/rds/info.c 			kunmap_atomic(iter->addr);
iter              138 net/rds/info.c 			iter->addr = NULL;
iter              139 net/rds/info.c 			iter->offset = 0;
iter              140 net/rds/info.c 			iter->pages++;
iter              161 net/rds/info.c 	struct rds_info_iterator iter;
iter              215 net/rds/info.c 	iter.pages = pages;
iter              216 net/rds/info.c 	iter.addr = NULL;
iter              217 net/rds/info.c 	iter.offset = start & (PAGE_SIZE - 1);
iter              219 net/rds/info.c 	func(sock, len, &iter, &lens);
iter              224 net/rds/info.c 	rds_info_iter_unmap(&iter);
iter               19 net/rds/info.h 			      struct rds_info_iterator *iter,
iter               26 net/rds/info.h void rds_info_copy(struct rds_info_iterator *iter, void *data,
iter               28 net/rds/info.h void rds_info_iter_unmap(struct rds_info_iterator *iter);
iter              571 net/rds/rds.h  	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
iter              787 net/rds/rds.h  			  struct rds_info_iterator *iter,
iter              902 net/rds/rds.h  		       struct rds_info_iterator *iter,
iter              905 net/rds/rds.h  			struct rds_info_iterator *iter,
iter              972 net/rds/rds.h  void rds_stats_info_copy(struct rds_info_iterator *iter,
iter             1010 net/rds/rds.h  unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
iter              780 net/rds/recv.c 		       struct rds_info_iterator *iter,
iter              803 net/rds/recv.c 	rds_info_copy(iter, &minfo, sizeof(minfo));
iter              808 net/rds/recv.c 			struct rds_info_iterator *iter,
iter              832 net/rds/recv.c 	rds_info_copy(iter, &minfo6, sizeof(minfo6));
iter               84 net/rds/stats.c void rds_stats_info_copy(struct rds_info_iterator *iter,
iter               96 net/rds/stats.c 		rds_info_copy(iter, &ctr, sizeof(ctr));
iter              112 net/rds/stats.c 			   struct rds_info_iterator *iter,
iter              136 net/rds/stats.c 	rds_stats_info_copy(iter, (uint64_t *)&stats, rds_stat_names,
iter              142 net/rds/stats.c 	lens->nr = rds_trans_stats_info_copy(iter, avail) +
iter              242 net/rds/tcp.c  			    struct rds_info_iterator *iter,
iter              272 net/rds/tcp.c  		rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
iter              288 net/rds/tcp.c  			     struct rds_info_iterator *iter,
iter              315 net/rds/tcp.c  		rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
iter               96 net/rds/tcp.h  unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
iter               51 net/rds/tcp_stats.c unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
iter               70 net/rds/tcp_stats.c 	rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names,
iter              136 net/rds/transport.c unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
iter              145 net/rds/transport.c 	rds_info_iter_unmap(iter);
iter              153 net/rds/transport.c 		part = trans->stats_info_copy(iter, avail);
iter              309 net/rxrpc/recvmsg.c 			      struct msghdr *msg, struct iov_iter *iter,
iter              385 net/rxrpc/recvmsg.c 			ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
iter              642 net/rxrpc/recvmsg.c 			   struct iov_iter *iter,
iter              650 net/rxrpc/recvmsg.c 	       iov_iter_count(iter), want_more);
iter              660 net/rxrpc/recvmsg.c 		ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
iter              661 net/rxrpc/recvmsg.c 					 iov_iter_count(iter), 0,
iter              671 net/rxrpc/recvmsg.c 			if (iov_iter_count(iter) > 0)
iter              708 net/rxrpc/recvmsg.c 	_leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
iter              724 net/rxrpc/recvmsg.c 		if (iov_iter_count(iter) > 0)
iter               80 net/sched/cls_api.c 	struct tcf_proto *iter;
iter               84 net/sched/cls_api.c 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
iter               86 net/sched/cls_api.c 		if (tcf_proto_cmp(tp, iter)) {
iter              207 net/sctp/proc.c 	struct sctp_ht_iter *iter = seq->private;
iter              209 net/sctp/proc.c 	sctp_transport_walk_start(&iter->hti);
iter              211 net/sctp/proc.c 	return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
iter              216 net/sctp/proc.c 	struct sctp_ht_iter *iter = seq->private;
iter              218 net/sctp/proc.c 	sctp_transport_walk_stop(&iter->hti);
iter              223 net/sctp/proc.c 	struct sctp_ht_iter *iter = seq->private;
iter              227 net/sctp/proc.c 	return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
iter             5297 net/sctp/socket.c void sctp_transport_walk_start(struct rhashtable_iter *iter)
iter             5299 net/sctp/socket.c 	rhltable_walk_enter(&sctp_transport_hashtable, iter);
iter             5301 net/sctp/socket.c 	rhashtable_walk_start(iter);
iter             5304 net/sctp/socket.c void sctp_transport_walk_stop(struct rhashtable_iter *iter)
iter             5306 net/sctp/socket.c 	rhashtable_walk_stop(iter);
iter             5307 net/sctp/socket.c 	rhashtable_walk_exit(iter);
iter             5311 net/sctp/socket.c 					       struct rhashtable_iter *iter)
iter             5315 net/sctp/socket.c 	t = rhashtable_walk_next(iter);
iter             5316 net/sctp/socket.c 	for (; t; t = rhashtable_walk_next(iter)) {
iter             5337 net/sctp/socket.c 					      struct rhashtable_iter *iter,
iter             5345 net/sctp/socket.c 	while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
iter              143 net/sctp/tsnmap.c 				  struct sctp_tsnmap_iter *iter)
iter              146 net/sctp/tsnmap.c 	iter->start = map->cumulative_tsn_ack_point + 1;
iter              153 net/sctp/tsnmap.c 				    struct sctp_tsnmap_iter *iter,
iter              160 net/sctp/tsnmap.c 	if (TSN_lte(map->max_tsn_seen, iter->start))
iter              163 net/sctp/tsnmap.c 	offset = iter->start - map->base_tsn;
iter              182 net/sctp/tsnmap.c 		iter->start = map->cumulative_tsn_ack_point + *end + 1;
iter              320 net/sctp/tsnmap.c 	struct sctp_tsnmap_iter iter;
iter              326 net/sctp/tsnmap.c 		sctp_tsnmap_iter_init(map, &iter);
iter              327 net/sctp/tsnmap.c 		while (sctp_tsnmap_next_gap_ack(map, &iter,
iter              458 net/switchdev/switchdev.c 	struct list_head *iter;
iter              477 net/switchdev/switchdev.c 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
iter              512 net/switchdev/switchdev.c 	struct list_head *iter;
iter              528 net/switchdev/switchdev.c 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
iter              562 net/switchdev/switchdev.c 	struct list_head *iter;
iter              578 net/switchdev/switchdev.c 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
iter             2781 net/tipc/socket.c 	struct rhashtable_iter iter;
iter             2785 net/tipc/socket.c 	rhashtable_walk_enter(&tn->sk_rht, &iter);
iter             2788 net/tipc/socket.c 		rhashtable_walk_start(&iter);
iter             2790 net/tipc/socket.c 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
iter             2792 net/tipc/socket.c 			rhashtable_walk_stop(&iter);
iter             2798 net/tipc/socket.c 			rhashtable_walk_start(&iter);
iter             2802 net/tipc/socket.c 		rhashtable_walk_stop(&iter);
iter             2805 net/tipc/socket.c 	rhashtable_walk_exit(&iter);
iter             3366 net/tipc/socket.c 	struct rhashtable_iter *iter = (void *)cb->args[4];
iter             3370 net/tipc/socket.c 	rhashtable_walk_start(iter);
iter             3371 net/tipc/socket.c 	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
iter             3382 net/tipc/socket.c 		rhashtable_walk_stop(iter);
iter             3391 net/tipc/socket.c 		rhashtable_walk_start(iter);
iter             3394 net/tipc/socket.c 	rhashtable_walk_stop(iter);
iter             3409 net/tipc/socket.c 	struct rhashtable_iter *iter = (void *)cb->args[4];
iter             3412 net/tipc/socket.c 	if (!iter) {
iter             3413 net/tipc/socket.c 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter             3414 net/tipc/socket.c 		if (!iter)
iter             3417 net/tipc/socket.c 		cb->args[4] = (long)iter;
iter             3420 net/tipc/socket.c 	rhashtable_walk_enter(&tn->sk_rht, iter);
iter             1993 net/wireless/scan.c 		       void (*iter)(struct wiphy *wiphy,
iter             2005 net/wireless/scan.c 			iter(wiphy, &bss->pub, iter_data);
iter             1719 net/wireless/util.c 			       void (*iter)(const struct ieee80211_iface_combination *c,
iter             1817 net/wireless/util.c 		(*iter)(c, data);
iter              116 net/xfrm/xfrm_interface.c 	struct xfrm_if *iter;
iter              119 net/xfrm/xfrm_interface.c 	     (iter = rtnl_dereference(*xip)) != NULL;
iter              120 net/xfrm/xfrm_interface.c 	     xip = &iter->next) {
iter              121 net/xfrm/xfrm_interface.c 		if (xi == iter) {
iter              783 scripts/kconfig/gconf.c 	GtkTreeIter iter;
iter              788 scripts/kconfig/gconf.c 	if (!gtk_tree_model_get_iter(model2, &iter, path))
iter              791 scripts/kconfig/gconf.c 	gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
iter              794 scripts/kconfig/gconf.c 	gtk_tree_model_get(model2, &iter, COL_VALUE, &old_def, -1);
iter              885 scripts/kconfig/gconf.c 	GtkTreeIter iter;
iter              902 scripts/kconfig/gconf.c 	if (!gtk_tree_model_get_iter(model2, &iter, path))
iter              904 scripts/kconfig/gconf.c 	gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
iter              942 scripts/kconfig/gconf.c 	GtkTreeIter iter;
iter              962 scripts/kconfig/gconf.c 	gtk_tree_model_get_iter(model2, &iter, path);
iter              963 scripts/kconfig/gconf.c 	gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
iter              984 scripts/kconfig/gconf.c 	GtkTreeIter iter;
iter              988 scripts/kconfig/gconf.c 	if (gtk_tree_selection_get_selected(selection, &model2, &iter)) {
iter              989 scripts/kconfig/gconf.c 		gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
iter             1003 scripts/kconfig/gconf.c 	GtkTreeIter iter;
iter             1015 scripts/kconfig/gconf.c 	gtk_tree_model_get_iter(model1, &iter, path);
iter             1016 scripts/kconfig/gconf.c 	gtk_tree_model_get(model1, &iter, COL_MENU, &menu, -1);
iter             1216 scripts/kconfig/gconf.c 	GtkTreeIter iter;
iter             1217 scripts/kconfig/gconf.c 	GtkTreeIter *child = &iter;
iter             1250 scripts/kconfig/gconf.c 	GtkTreeIter iter, tmp;
iter             1251 scripts/kconfig/gconf.c 	GtkTreeIter *child2 = &iter;
iter              223 security/selinux/ibpkey.c 	int iter;
iter              228 security/selinux/ibpkey.c 	for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) {
iter              229 security/selinux/ibpkey.c 		INIT_LIST_HEAD(&sel_ib_pkey_hash[iter].list);
iter              230 security/selinux/ibpkey.c 		sel_ib_pkey_hash[iter].size = 0;
iter              292 security/selinux/netnode.c 	int iter;
iter              297 security/selinux/netnode.c 	for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
iter              298 security/selinux/netnode.c 		INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
iter              299 security/selinux/netnode.c 		sel_netnode_hash[iter].size = 0;
iter              226 security/selinux/netport.c 	int iter;
iter              231 security/selinux/netport.c 	for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
iter              232 security/selinux/netport.c 		INIT_LIST_HEAD(&sel_netport_hash[iter].list);
iter              233 security/selinux/netport.c 		sel_netport_hash[iter].size = 0;
iter             1836 security/selinux/selinuxfs.c 	unsigned int iter;
iter             1842 security/selinux/selinuxfs.c 	for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) {
iter             1843 security/selinux/selinuxfs.c 		if (iter < ARRAY_SIZE(selinux_policycap_names))
iter             1845 security/selinux/selinuxfs.c 					      selinux_policycap_names[iter]);
iter             1859 security/selinux/selinuxfs.c 		inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
iter               97 security/selinux/ss/ebitmap.c 	unsigned int iter;
iter              111 security/selinux/ss/ebitmap.c 		for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
iter              112 security/selinux/ss/ebitmap.c 			e_map = e_iter->maps[iter];
iter              457 security/selinux/xfrm.c 		struct dst_entry *iter;
iter              459 security/selinux/xfrm.c 		for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
iter              460 security/selinux/xfrm.c 			struct xfrm_state *x = iter->xfrm;
iter              320 sound/core/control.c 	unsigned int iter = 100000;
iter              323 sound/core/control.c 		if (--iter == 0) {
iter              246 sound/soc/sof/topology.c 	int i, iter;
iter              256 sound/soc/sof/topology.c 		iter = exp * -1;
iter              258 sound/soc/sof/topology.c 		iter = exp;
iter              261 sound/soc/sof/topology.c 	for (i = 0; i < iter; i++) {
iter              144 tools/bpf/bpftool/cgroup.c 	__u32 prog_cnt, iter;
iter              173 tools/bpf/bpftool/cgroup.c 	for (iter = 0; iter < prog_cnt; iter++)
iter              174 tools/bpf/bpftool/cgroup.c 		show_bpf_prog(prog_ids[iter], attach_type_strings[type],
iter              371 tools/perf/bench/epoll-wait.c 	size_t i, j, iter;
iter              380 tools/perf/bench/epoll-wait.c 	for (iter = 0; !wdone; iter++) {
iter              402 tools/perf/bench/epoll-wait.c 	printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
iter               88 tools/perf/builtin-annotate.c 	struct block_range_iter iter;
iter               97 tools/perf/builtin-annotate.c 	iter = block_range__create(start->addr, end->addr);
iter               98 tools/perf/builtin-annotate.c 	if (!block_range_iter__valid(&iter))
iter              104 tools/perf/builtin-annotate.c 	entry = block_range_iter(&iter);
iter              109 tools/perf/builtin-annotate.c 		entry = block_range_iter(&iter);
iter              117 tools/perf/builtin-annotate.c 	} while (block_range_iter__next(&iter));
iter              122 tools/perf/builtin-annotate.c 	entry = block_range_iter(&iter);
iter              155 tools/perf/builtin-annotate.c static int hist_iter__branch_callback(struct hist_entry_iter *iter,
iter              160 tools/perf/builtin-annotate.c 	struct hist_entry *he = iter->he;
iter              162 tools/perf/builtin-annotate.c 	struct perf_sample *sample = iter->sample;
iter              163 tools/perf/builtin-annotate.c 	struct evsel *evsel = iter->evsel;
iter              184 tools/perf/builtin-annotate.c 	struct hist_entry_iter iter = {
iter              206 tools/perf/builtin-annotate.c 	ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
iter              135 tools/perf/builtin-report.c static int hist_iter__report_callback(struct hist_entry_iter *iter,
iter              141 tools/perf/builtin-report.c 	struct hist_entry *he = iter->he;
iter              142 tools/perf/builtin-report.c 	struct evsel *evsel = iter->evsel;
iter              143 tools/perf/builtin-report.c 	struct perf_sample *sample = iter->sample;
iter              177 tools/perf/builtin-report.c static int hist_iter__branch_callback(struct hist_entry_iter *iter,
iter              182 tools/perf/builtin-report.c 	struct hist_entry *he = iter->he;
iter              185 tools/perf/builtin-report.c 	struct perf_sample *sample = iter->sample;
iter              186 tools/perf/builtin-report.c 	struct evsel *evsel = iter->evsel;
iter              243 tools/perf/builtin-report.c 	struct hist_entry_iter iter = {
iter              279 tools/perf/builtin-report.c 		iter.add_entry_cb = hist_iter__branch_callback;
iter              280 tools/perf/builtin-report.c 		iter.ops = &hist_iter_branch;
iter              282 tools/perf/builtin-report.c 		iter.ops = &hist_iter_mem;
iter              284 tools/perf/builtin-report.c 		iter.ops = &hist_iter_cumulative;
iter              286 tools/perf/builtin-report.c 		iter.ops = &hist_iter_normal;
iter              297 tools/perf/builtin-report.c 	ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
iter              715 tools/perf/builtin-top.c static int hist_iter__top_callback(struct hist_entry_iter *iter,
iter              720 tools/perf/builtin-top.c 	struct hist_entry *he = iter->he;
iter              721 tools/perf/builtin-top.c 	struct evsel *evsel = iter->evsel;
iter              724 tools/perf/builtin-top.c 		perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
iter              726 tools/perf/builtin-top.c 	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
iter              816 tools/perf/builtin-top.c 		struct hist_entry_iter iter = {
iter              823 tools/perf/builtin-top.c 			iter.ops = &hist_iter_cumulative;
iter              825 tools/perf/builtin-top.c 			iter.ops = &hist_iter_normal;
iter              829 tools/perf/builtin-top.c 		err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
iter               88 tools/perf/tests/hists_cumulate.c 		struct hist_entry_iter iter = {
iter               95 tools/perf/tests/hists_cumulate.c 			iter.ops = &hist_iter_cumulative;
iter               97 tools/perf/tests/hists_cumulate.c 			iter.ops = &hist_iter_normal;
iter              108 tools/perf/tests/hists_cumulate.c 		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
iter               63 tools/perf/tests/hists_filter.c 			struct hist_entry_iter iter = {
iter               85 tools/perf/tests/hists_filter.c 			if (hist_entry_iter__add(&iter, &al,
iter               58 tools/perf/tests/hists_output.c 		struct hist_entry_iter iter = {
iter               74 tools/perf/tests/hists_output.c 		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
iter              127 tools/perf/ui/gtk/annotate.c 		GtkTreeIter iter;
iter              130 tools/perf/ui/gtk/annotate.c 		gtk_list_store_append(store, &iter);
iter              146 tools/perf/ui/gtk/annotate.c 			gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
iter              148 tools/perf/ui/gtk/annotate.c 			gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
iter              150 tools/perf/ui/gtk/annotate.c 			gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
iter              106 tools/perf/ui/gtk/hists.c 		GtkTreeIter iter, new_parent;
iter              119 tools/perf/ui/gtk/hists.c 			gtk_tree_store_append(store, &iter, &new_parent);
iter              122 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, 0, buf, -1);
iter              125 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, col, buf, -1);
iter              132 tools/perf/ui/gtk/hists.c 				new_parent = iter;
iter              140 tools/perf/ui/gtk/hists.c 			gtk_tree_store_append(store, &iter, &new_parent);
iter              143 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, 0, buf, -1);
iter              146 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, col, buf, -1);
iter              153 tools/perf/ui/gtk/hists.c 				new_parent = iter;
iter              168 tools/perf/ui/gtk/hists.c 		GtkTreeIter iter;
iter              209 tools/perf/ui/gtk/hists.c 		gtk_tree_store_append(store, &iter, parent);
iter              212 tools/perf/ui/gtk/hists.c 		gtk_tree_store_set(store, &iter, 0, buf, -1);
iter              214 tools/perf/ui/gtk/hists.c 		gtk_tree_store_set(store, &iter, col, str, -1);
iter              229 tools/perf/ui/gtk/hists.c 		GtkTreeIter iter, new_parent;
iter              241 tools/perf/ui/gtk/hists.c 			gtk_tree_store_append(store, &iter, &new_parent);
iter              244 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, 0, buf, -1);
iter              247 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, col, buf, -1);
iter              254 tools/perf/ui/gtk/hists.c 				new_parent = iter;
iter              265 tools/perf/ui/gtk/hists.c 		perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
iter              360 tools/perf/ui/gtk/hists.c 		GtkTreeIter iter;
iter              371 tools/perf/ui/gtk/hists.c 		gtk_tree_store_append(store, &iter, NULL);
iter              384 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, col_idx++, s, -1);
iter              393 tools/perf/ui/gtk/hists.c 			perf_gtk__add_callchain(&h->sorted_chain, store, &iter,
iter              421 tools/perf/ui/gtk/hists.c 		GtkTreeIter iter;
iter              433 tools/perf/ui/gtk/hists.c 		gtk_tree_store_append(store, &iter, parent);
iter              446 tools/perf/ui/gtk/hists.c 			gtk_tree_store_set(store, &iter, col_idx++, hpp->buf, -1);
iter              463 tools/perf/ui/gtk/hists.c 		gtk_tree_store_set(store, &iter, col_idx, strim(bf), -1);
iter              470 tools/perf/ui/gtk/hists.c 							store, &iter, hpp,
iter              480 tools/perf/ui/gtk/hists.c 				gtk_tree_store_append(store, &child, &iter);
iter              490 tools/perf/ui/gtk/hists.c 			perf_gtk__add_callchain(&he->sorted_chain, store, &iter,
iter             2118 tools/perf/util/annotate.c 	struct annotation_line *iter;
iter             2125 tools/perf/util/annotate.c 		iter = rb_entry(parent, struct annotation_line, rb_node);
iter             2127 tools/perf/util/annotate.c 		ret = strcmp(iter->path, al->path);
iter             2130 tools/perf/util/annotate.c 				iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
iter             2166 tools/perf/util/annotate.c 	struct annotation_line *iter;
iter             2172 tools/perf/util/annotate.c 		iter = rb_entry(parent, struct annotation_line, rb_node);
iter             2174 tools/perf/util/annotate.c 		if (cmp_source_line(al, iter))
iter               86 tools/perf/util/block-range.c 	struct block_range_iter iter = { NULL, NULL };
iter              122 tools/perf/util/block-range.c 				return iter;
iter              135 tools/perf/util/block-range.c 			iter.start = head;
iter              145 tools/perf/util/block-range.c 			return iter;
iter              158 tools/perf/util/block-range.c 		iter.start = entry;
iter              159 tools/perf/util/block-range.c 		iter.end   = entry;
iter              169 tools/perf/util/block-range.c 			return iter;
iter              192 tools/perf/util/block-range.c 	iter.start = entry;
iter              199 tools/perf/util/block-range.c 	entry = iter.start;
iter              207 tools/perf/util/block-range.c 				return iter;
iter              229 tools/perf/util/block-range.c 			iter.end = entry;
iter              238 tools/perf/util/block-range.c 			iter.end = entry;
iter              254 tools/perf/util/block-range.c 				return iter;
iter              267 tools/perf/util/block-range.c 			iter.end = tail;
iter              277 tools/perf/util/block-range.c 				return iter;
iter              295 tools/perf/util/block-range.c 	assert(iter.start->start == start && iter.start->is_target);
iter              296 tools/perf/util/block-range.c 	assert(iter.end->end == end && iter.end->is_branch);
iter              300 tools/perf/util/block-range.c 	return iter;
iter               51 tools/perf/util/block-range.h static inline struct block_range *block_range_iter(struct block_range_iter *iter)
iter               53 tools/perf/util/block-range.h 	return iter->start;
iter               56 tools/perf/util/block-range.h static inline bool block_range_iter__next(struct block_range_iter *iter)
iter               58 tools/perf/util/block-range.h 	if (iter->start == iter->end)
iter               61 tools/perf/util/block-range.h 	iter->start = block_range__next(iter->start);
iter               65 tools/perf/util/block-range.h static inline bool block_range_iter__valid(struct block_range_iter *iter)
iter               67 tools/perf/util/block-range.h 	if (!iter->start || !iter->end)
iter               65 tools/perf/util/comm.c 	struct comm_str *iter, *new;
iter               70 tools/perf/util/comm.c 		iter = rb_entry(parent, struct comm_str, rb_node);
iter               77 tools/perf/util/comm.c 		cmp = strcmp(str, iter->str);
iter               78 tools/perf/util/comm.c 		if (!cmp && comm_str__get(iter))
iter               79 tools/perf/util/comm.c 			return iter;
iter              764 tools/perf/util/hist.c iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
iter              771 tools/perf/util/hist.c iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
iter              778 tools/perf/util/hist.c iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
iter              780 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter              787 tools/perf/util/hist.c 	iter->priv = mi;
iter              792 tools/perf/util/hist.c iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
iter              795 tools/perf/util/hist.c 	struct mem_info *mi = iter->priv;
iter              796 tools/perf/util/hist.c 	struct hists *hists = evsel__hists(iter->evsel);
iter              797 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter              816 tools/perf/util/hist.c 	he = hists__add_entry(hists, al, iter->parent, NULL, mi,
iter              821 tools/perf/util/hist.c 	iter->he = he;
iter              826 tools/perf/util/hist.c iter_finish_mem_entry(struct hist_entry_iter *iter,
iter              829 tools/perf/util/hist.c 	struct evsel *evsel = iter->evsel;
iter              831 tools/perf/util/hist.c 	struct hist_entry *he = iter->he;
iter              839 tools/perf/util/hist.c 	err = hist_entry__append_callchain(he, iter->sample);
iter              847 tools/perf/util/hist.c 	iter->priv = NULL;
iter              849 tools/perf/util/hist.c 	iter->he = NULL;
iter              854 tools/perf/util/hist.c iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
iter              857 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter              863 tools/perf/util/hist.c 	iter->curr = 0;
iter              864 tools/perf/util/hist.c 	iter->total = sample->branch_stack->nr;
iter              866 tools/perf/util/hist.c 	iter->priv = bi;
iter              871 tools/perf/util/hist.c iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
iter              878 tools/perf/util/hist.c iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
iter              880 tools/perf/util/hist.c 	struct branch_info *bi = iter->priv;
iter              881 tools/perf/util/hist.c 	int i = iter->curr;
iter              886 tools/perf/util/hist.c 	if (iter->curr >= iter->total)
iter              896 tools/perf/util/hist.c iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
iter              899 tools/perf/util/hist.c 	struct evsel *evsel = iter->evsel;
iter              901 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter              903 tools/perf/util/hist.c 	int i = iter->curr;
iter              906 tools/perf/util/hist.c 	bi = iter->priv;
iter              908 tools/perf/util/hist.c 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
iter              918 tools/perf/util/hist.c 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
iter              926 tools/perf/util/hist.c 	iter->he = he;
iter              927 tools/perf/util/hist.c 	iter->curr++;
iter              932 tools/perf/util/hist.c iter_finish_branch_entry(struct hist_entry_iter *iter,
iter              935 tools/perf/util/hist.c 	zfree(&iter->priv);
iter              936 tools/perf/util/hist.c 	iter->he = NULL;
iter              938 tools/perf/util/hist.c 	return iter->curr >= iter->total ? 0 : -1;
iter              942 tools/perf/util/hist.c iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
iter              949 tools/perf/util/hist.c iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
iter              951 tools/perf/util/hist.c 	struct evsel *evsel = iter->evsel;
iter              952 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter              955 tools/perf/util/hist.c 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
iter              960 tools/perf/util/hist.c 	iter->he = he;
iter              965 tools/perf/util/hist.c iter_finish_normal_entry(struct hist_entry_iter *iter,
iter              968 tools/perf/util/hist.c 	struct hist_entry *he = iter->he;
iter              969 tools/perf/util/hist.c 	struct evsel *evsel = iter->evsel;
iter              970 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter              975 tools/perf/util/hist.c 	iter->he = NULL;
iter              983 tools/perf/util/hist.c iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
iter              999 tools/perf/util/hist.c 	iter->priv = he_cache;
iter             1000 tools/perf/util/hist.c 	iter->curr = 0;
iter             1006 tools/perf/util/hist.c iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
iter             1009 tools/perf/util/hist.c 	struct evsel *evsel = iter->evsel;
iter             1011 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter             1012 tools/perf/util/hist.c 	struct hist_entry **he_cache = iter->priv;
iter             1016 tools/perf/util/hist.c 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
iter             1021 tools/perf/util/hist.c 	iter->he = he;
iter             1022 tools/perf/util/hist.c 	he_cache[iter->curr++] = he;
iter             1038 tools/perf/util/hist.c iter_next_cumulative_entry(struct hist_entry_iter *iter,
iter             1047 tools/perf/util/hist.c 	return fill_callchain_info(al, node, iter->hide_unresolved);
iter             1051 tools/perf/util/hist.c iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
iter             1054 tools/perf/util/hist.c 	struct evsel *evsel = iter->evsel;
iter             1055 tools/perf/util/hist.c 	struct perf_sample *sample = iter->sample;
iter             1056 tools/perf/util/hist.c 	struct hist_entry **he_cache = iter->priv;
iter             1069 tools/perf/util/hist.c 		.parent = iter->parent,
iter             1084 tools/perf/util/hist.c 	for (i = 0; i < iter->curr; i++) {
iter             1087 tools/perf/util/hist.c 			iter->he = NULL;
iter             1092 tools/perf/util/hist.c 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
iter             1097 tools/perf/util/hist.c 	iter->he = he;
iter             1098 tools/perf/util/hist.c 	he_cache[iter->curr++] = he;
iter             1106 tools/perf/util/hist.c iter_finish_cumulative_entry(struct hist_entry_iter *iter,
iter             1109 tools/perf/util/hist.c 	zfree(&iter->priv);
iter             1110 tools/perf/util/hist.c 	iter->he = NULL;
iter             1147 tools/perf/util/hist.c int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
iter             1156 tools/perf/util/hist.c 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter             1157 tools/perf/util/hist.c 					iter->evsel, al, max_stack_depth);
iter             1163 tools/perf/util/hist.c 	err = iter->ops->prepare_entry(iter, al);
iter             1167 tools/perf/util/hist.c 	err = iter->ops->add_single_entry(iter, al);
iter             1171 tools/perf/util/hist.c 	if (iter->he && iter->add_entry_cb) {
iter             1172 tools/perf/util/hist.c 		err = iter->add_entry_cb(iter, al, true, arg);
iter             1177 tools/perf/util/hist.c 	while (iter->ops->next_entry(iter, al)) {
iter             1178 tools/perf/util/hist.c 		err = iter->ops->add_next_entry(iter, al);
iter             1182 tools/perf/util/hist.c 		if (iter->he && iter->add_entry_cb) {
iter             1183 tools/perf/util/hist.c 			err = iter->add_entry_cb(iter, al, false, arg);
iter             1190 tools/perf/util/hist.c 	err2 = iter->ops->finish_entry(iter, al);
iter             1402 tools/perf/util/hist.c 	struct hist_entry *iter, *new;
iter             1409 tools/perf/util/hist.c 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
iter             1413 tools/perf/util/hist.c 			cmp = fmt->collapse(fmt, iter, he);
iter             1419 tools/perf/util/hist.c 			he_stat__add_stat(&iter->stat, &he->stat);
iter             1420 tools/perf/util/hist.c 			return iter;
iter             1519 tools/perf/util/hist.c 	struct hist_entry *iter;
iter             1528 tools/perf/util/hist.c 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
iter             1530 tools/perf/util/hist.c 		cmp = hist_entry__collapse(iter, he);
iter             1535 tools/perf/util/hist.c 			he_stat__add_stat(&iter->stat, &he->stat);
iter             1537 tools/perf/util/hist.c 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
iter             1542 tools/perf/util/hist.c 						    iter->callchain,
iter             1705 tools/perf/util/hist.c 	struct hist_entry *iter;
iter             1711 tools/perf/util/hist.c 		iter = rb_entry(parent, struct hist_entry, rb_node);
iter             1713 tools/perf/util/hist.c 		if (hist_entry__sort(he, iter) > 0)
iter             1792 tools/perf/util/hist.c 	struct hist_entry *iter;
iter             1811 tools/perf/util/hist.c 		iter = rb_entry(parent, struct hist_entry, rb_node);
iter             1813 tools/perf/util/hist.c 		if (hist_entry__sort(he, iter) > 0)
iter             2115 tools/perf/util/hist.c 	struct hist_entry *iter;
iter             2122 tools/perf/util/hist.c 		iter = rb_entry(parent, struct hist_entry, rb_node);
iter             2124 tools/perf/util/hist.c 		if (hist_entry__sort(he, iter) > 0)
iter             2383 tools/perf/util/hist.c 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
iter             2384 tools/perf/util/hist.c 		int64_t cmp = hist_entry__collapse(iter, he);
iter             2391 tools/perf/util/hist.c 			return iter;
iter             2403 tools/perf/util/hist.c 		struct hist_entry *iter;
iter             2407 tools/perf/util/hist.c 		iter = rb_entry(n, struct hist_entry, rb_node_in);
iter             2409 tools/perf/util/hist.c 			cmp = fmt->collapse(fmt, iter, he);
iter             2419 tools/perf/util/hist.c 			return iter;
iter              128 tools/perf/util/hist.h 	int (*add_entry_cb)(struct hist_entry_iter *iter,
iter              158 tools/perf/util/hist.h int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
iter             2013 tools/perf/util/machine.c 			    struct iterations *iter,
iter             2068 tools/perf/util/machine.c 	if (iter) {
iter             2069 tools/perf/util/machine.c 		nr_loop_iter = iter->nr_loop_iter;
iter             2070 tools/perf/util/machine.c 		iter_cycles = iter->cycles;
iter             2097 tools/perf/util/machine.c static void save_iterations(struct iterations *iter,
iter             2102 tools/perf/util/machine.c 	iter->nr_loop_iter++;
iter             2103 tools/perf/util/machine.c 	iter->cycles = 0;
iter             2106 tools/perf/util/machine.c 		iter->cycles += be[i].flags.cycles;
iter             2117 tools/perf/util/machine.c 			struct iterations *iter)
iter             2144 tools/perf/util/machine.c 					save_iterations(iter + i + off,
iter             2147 tools/perf/util/machine.c 					memmove(iter + i, iter + i + off,
iter             2148 tools/perf/util/machine.c 						j * sizeof(*iter));
iter             2322 tools/perf/util/machine.c 		struct iterations iter[nr];
iter             2353 tools/perf/util/machine.c 		memset(iter, 0, sizeof(struct iterations) * nr);
iter             2354 tools/perf/util/machine.c 		nr = remove_loops(be, nr, iter);
iter             2367 tools/perf/util/machine.c 						       &iter[i], 0);
iter              226 tools/perf/util/ordered-events.c 	struct ordered_event *tmp, *iter;
iter              238 tools/perf/util/ordered-events.c 	list_for_each_entry_safe(iter, tmp, head, list) {
iter              242 tools/perf/util/ordered-events.c 		if (iter->timestamp > limit)
iter              244 tools/perf/util/ordered-events.c 		ret = oe->deliver(oe, iter);
iter              248 tools/perf/util/ordered-events.c 		ordered_events__delete(oe, iter);
iter              249 tools/perf/util/ordered-events.c 		oe->last_flush = iter->timestamp;
iter               17 tools/testing/radix-tree/benchmark.c 	struct radix_tree_iter iter;
iter               29 tools/testing/radix-tree/benchmark.c 			radix_tree_for_each_tagged(slot, root, &iter, 0, 0)
iter               32 tools/testing/radix-tree/benchmark.c 			radix_tree_for_each_slot(slot, root, &iter, 0)
iter               34 tools/testing/radix-tree/regression3.c 	struct radix_tree_iter iter;
iter               44 tools/testing/radix-tree/regression3.c 	radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
iter               45 tools/testing/radix-tree/regression3.c 		printv(2, "tagged %ld %p\n", iter.index, *slot);
iter               52 tools/testing/radix-tree/regression3.c 			printv(2, "retry at %ld\n", iter.index);
iter               53 tools/testing/radix-tree/regression3.c 			slot = radix_tree_iter_retry(&iter);
iter               60 tools/testing/radix-tree/regression3.c 	radix_tree_for_each_slot(slot, &root, &iter, 0) {
iter               61 tools/testing/radix-tree/regression3.c 		printv(2, "slot %ld %p\n", iter.index, *slot);
iter               67 tools/testing/radix-tree/regression3.c 			printv(2, "retry at %ld\n", iter.index);
iter               68 tools/testing/radix-tree/regression3.c 			slot = radix_tree_iter_retry(&iter);
iter               73 tools/testing/radix-tree/regression3.c 	radix_tree_for_each_slot(slot, &root, &iter, 0) {
iter               74 tools/testing/radix-tree/regression3.c 		printv(2, "slot %ld %p\n", iter.index, *slot);
iter               75 tools/testing/radix-tree/regression3.c 		if (!iter.index) {
iter               76 tools/testing/radix-tree/regression3.c 			printv(2, "next at %ld\n", iter.index);
iter               77 tools/testing/radix-tree/regression3.c 			slot = radix_tree_iter_resume(slot, &iter);
iter               83 tools/testing/radix-tree/regression3.c 	radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
iter               84 tools/testing/radix-tree/regression3.c 		printv(2, "tagged %ld %p\n", iter.index, *slot);
iter               85 tools/testing/radix-tree/regression3.c 		if (!iter.index) {
iter               86 tools/testing/radix-tree/regression3.c 			printv(2, "next at %ld\n", iter.index);
iter               87 tools/testing/radix-tree/regression3.c 			slot = radix_tree_iter_resume(slot, &iter);
iter              694 tools/testing/selftests/bpf/test_lpm_map.c 	int iter;
iter              704 tools/testing/selftests/bpf/test_lpm_map.c 	int i, j, ret, iter, key_size;
iter              710 tools/testing/selftests/bpf/test_lpm_map.c 	for (iter = 0; iter < info->iter; iter++)
iter              715 tools/testing/selftests/bpf/test_lpm_map.c 			j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1;
iter              742 tools/testing/selftests/bpf/test_lpm_map.c 	info->iter = 2000;
iter               98 tools/testing/selftests/net/nettest.c static int iter = 1;
iter              987 tools/testing/selftests/net/nettest.c 		if (iter != -1) {
iter              988 tools/testing/selftests/net/nettest.c 			--iter;
iter              989 tools/testing/selftests/net/nettest.c 			if (iter == 0)
iter             1637 tools/testing/selftests/net/nettest.c 			iter = atoi(optarg);
iter             1721 tools/testing/selftests/net/nettest.c 	if (iter == 0) {
iter              215 tools/testing/selftests/net/tcp_fastopen_backup_key.c 	static int iter;
iter              221 tools/testing/selftests/net/tcp_fastopen_backup_key.c 	if (iter < N_LISTEN) {
iter              223 tools/testing/selftests/net/tcp_fastopen_backup_key.c 		if (iter == 0) {
iter              238 tools/testing/selftests/net/tcp_fastopen_backup_key.c 	if (++iter >= (N_LISTEN * 2))
iter              239 tools/testing/selftests/net/tcp_fastopen_backup_key.c 		iter = 0;
iter               30 tools/testing/selftests/pidfd/pidfd_poll_test.c 	int iter, nevents;
iter               48 tools/testing/selftests/pidfd/pidfd_poll_test.c 	for (iter = 0; iter < nr_iterations; iter++) {
iter               54 tools/testing/selftests/pidfd/pidfd_poll_test.c 				iter--;
iter               48 tools/testing/selftests/powerpc/security/rfi_flush.c 	int fd, passes = 0, iter, rc = 0;
iter               73 tools/testing/selftests/powerpc/security/rfi_flush.c 	iter = repetitions;
iter               95 tools/testing/selftests/powerpc/security/rfi_flush.c 	while (--iter)
iter              118 tools/testing/selftests/powerpc/security/rfi_flush.c 		iter = repetitions;
iter               36 virt/kvm/arm/vgic/vgic-debug.c static void iter_next(struct vgic_state_iter *iter)
iter               38 virt/kvm/arm/vgic/vgic-debug.c 	if (iter->dist_id == 0) {
iter               39 virt/kvm/arm/vgic/vgic-debug.c 		iter->dist_id++;
iter               43 virt/kvm/arm/vgic/vgic-debug.c 	iter->intid++;
iter               44 virt/kvm/arm/vgic/vgic-debug.c 	if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
iter               45 virt/kvm/arm/vgic/vgic-debug.c 	    ++iter->vcpu_id < iter->nr_cpus)
iter               46 virt/kvm/arm/vgic/vgic-debug.c 		iter->intid = 0;
iter               48 virt/kvm/arm/vgic/vgic-debug.c 	if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS)) {
iter               49 virt/kvm/arm/vgic/vgic-debug.c 		if (iter->lpi_idx < iter->nr_lpis)
iter               50 virt/kvm/arm/vgic/vgic-debug.c 			iter->intid = iter->lpi_array[iter->lpi_idx];
iter               51 virt/kvm/arm/vgic/vgic-debug.c 		iter->lpi_idx++;
iter               55 virt/kvm/arm/vgic/vgic-debug.c static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
iter               60 virt/kvm/arm/vgic/vgic-debug.c 	memset(iter, 0, sizeof(*iter));
iter               62 virt/kvm/arm/vgic/vgic-debug.c 	iter->nr_cpus = nr_cpus;
iter               63 virt/kvm/arm/vgic/vgic-debug.c 	iter->nr_spis = kvm->arch.vgic.nr_spis;
iter               65 virt/kvm/arm/vgic/vgic-debug.c 		iter->nr_lpis = vgic_copy_lpi_list(kvm, NULL, &iter->lpi_array);
iter               66 virt/kvm/arm/vgic/vgic-debug.c 		if (iter->nr_lpis < 0)
iter               67 virt/kvm/arm/vgic/vgic-debug.c 			iter->nr_lpis = 0;
iter               72 virt/kvm/arm/vgic/vgic-debug.c 		iter_next(iter);
iter               75 virt/kvm/arm/vgic/vgic-debug.c static bool end_of_vgic(struct vgic_state_iter *iter)
iter               77 virt/kvm/arm/vgic/vgic-debug.c 	return iter->dist_id > 0 &&
iter               78 virt/kvm/arm/vgic/vgic-debug.c 		iter->vcpu_id == iter->nr_cpus &&
iter               79 virt/kvm/arm/vgic/vgic-debug.c 		iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
iter               80 virt/kvm/arm/vgic/vgic-debug.c 		iter->lpi_idx > iter->nr_lpis;
iter               86 virt/kvm/arm/vgic/vgic-debug.c 	struct vgic_state_iter *iter;
iter               89 virt/kvm/arm/vgic/vgic-debug.c 	iter = kvm->arch.vgic.iter;
iter               90 virt/kvm/arm/vgic/vgic-debug.c 	if (iter) {
iter               91 virt/kvm/arm/vgic/vgic-debug.c 		iter = ERR_PTR(-EBUSY);
iter               95 virt/kvm/arm/vgic/vgic-debug.c 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
iter               96 virt/kvm/arm/vgic/vgic-debug.c 	if (!iter) {
iter               97 virt/kvm/arm/vgic/vgic-debug.c 		iter = ERR_PTR(-ENOMEM);
iter              101 virt/kvm/arm/vgic/vgic-debug.c 	iter_init(kvm, iter, *pos);
iter              102 virt/kvm/arm/vgic/vgic-debug.c 	kvm->arch.vgic.iter = iter;
iter              104 virt/kvm/arm/vgic/vgic-debug.c 	if (end_of_vgic(iter))
iter              105 virt/kvm/arm/vgic/vgic-debug.c 		iter = NULL;
iter              108 virt/kvm/arm/vgic/vgic-debug.c 	return iter;
iter              114 virt/kvm/arm/vgic/vgic-debug.c 	struct vgic_state_iter *iter = kvm->arch.vgic.iter;
iter              117 virt/kvm/arm/vgic/vgic-debug.c 	iter_next(iter);
iter              118 virt/kvm/arm/vgic/vgic-debug.c 	if (end_of_vgic(iter))
iter              119 virt/kvm/arm/vgic/vgic-debug.c 		iter = NULL;
iter              120 virt/kvm/arm/vgic/vgic-debug.c 	return iter;
iter              126 virt/kvm/arm/vgic/vgic-debug.c 	struct vgic_state_iter *iter;
iter              136 virt/kvm/arm/vgic/vgic-debug.c 	iter = kvm->arch.vgic.iter;
iter              137 virt/kvm/arm/vgic/vgic-debug.c 	kfree(iter->lpi_array);
iter              138 virt/kvm/arm/vgic/vgic-debug.c 	kfree(iter);
iter              139 virt/kvm/arm/vgic/vgic-debug.c 	kvm->arch.vgic.iter = NULL;
iter              221 virt/kvm/arm/vgic/vgic-debug.c 	struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
iter              226 virt/kvm/arm/vgic/vgic-debug.c 	if (iter->dist_id == 0) {
iter              234 virt/kvm/arm/vgic/vgic-debug.c 	if (iter->vcpu_id < iter->nr_cpus)
iter              235 virt/kvm/arm/vgic/vgic-debug.c 		vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
iter              237 virt/kvm/arm/vgic/vgic-debug.c 	irq = vgic_get_irq(kvm, vcpu, iter->intid);
iter              239 virt/kvm/arm/vgic/vgic-debug.c 		seq_printf(s, "       LPI %4d freed\n", iter->intid);