smap              176 arch/x86/kvm/mmu.h 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
smap              178 arch/x86/kvm/mmu.h 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
smap             7168 arch/x86/kvm/svm.c 	bool smap = cr4 & X86_CR4_SMAP;
smap             7211 arch/x86/kvm/svm.c 	if (smap && (!smep || is_user)) {
smap              242 fs/nilfs2/page.c 			   struct address_space *smap)
smap              251 fs/nilfs2/page.c 	if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
smap              296 fs/nilfs2/page.c 			   struct address_space *smap)
smap              304 fs/nilfs2/page.c 	n = pagevec_lookup(&pvec, smap, &index);
smap              325 fs/nilfs2/page.c 			xa_lock_irq(&smap->i_pages);
smap              326 fs/nilfs2/page.c 			p = __xa_erase(&smap->i_pages, offset);
smap              328 fs/nilfs2/page.c 			smap->nrpages--;
smap              329 fs/nilfs2/page.c 			xa_unlock_irq(&smap->i_pages);
smap               51 fs/ocfs2/dlm/dlmdomain.c static inline void byte_copymap(u8 dmap[], unsigned long smap[],
smap               61 fs/ocfs2/dlm/dlmdomain.c 		if (test_bit(nn, smap))
smap               61 kernel/bpf/stackmap.c static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
smap               63 kernel/bpf/stackmap.c 	u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
smap               66 kernel/bpf/stackmap.c 	smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
smap               67 kernel/bpf/stackmap.c 					 smap->map.numa_node);
smap               68 kernel/bpf/stackmap.c 	if (!smap->elems)
smap               71 kernel/bpf/stackmap.c 	err = pcpu_freelist_init(&smap->freelist);
smap               75 kernel/bpf/stackmap.c 	pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
smap               76 kernel/bpf/stackmap.c 			       smap->map.max_entries);
smap               80 kernel/bpf/stackmap.c 	bpf_map_area_free(smap->elems);
smap               88 kernel/bpf/stackmap.c 	struct bpf_stack_map *smap;
smap              116 kernel/bpf/stackmap.c 	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
smap              122 kernel/bpf/stackmap.c 	smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
smap              123 kernel/bpf/stackmap.c 	if (!smap) {
smap              128 kernel/bpf/stackmap.c 	bpf_map_init_from_attr(&smap->map, attr);
smap              129 kernel/bpf/stackmap.c 	smap->map.value_size = value_size;
smap              130 kernel/bpf/stackmap.c 	smap->n_buckets = n_buckets;
smap              136 kernel/bpf/stackmap.c 	err = prealloc_elems_and_freelist(smap);
smap              140 kernel/bpf/stackmap.c 	bpf_map_charge_move(&smap->map.memory, &mem);
smap              142 kernel/bpf/stackmap.c 	return &smap->map;
smap              148 kernel/bpf/stackmap.c 	bpf_map_area_free(smap);
smap              349 kernel/bpf/stackmap.c 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
smap              386 kernel/bpf/stackmap.c 	id = hash & (smap->n_buckets - 1);
smap              387 kernel/bpf/stackmap.c 	bucket = READ_ONCE(smap->buckets[id]);
smap              397 kernel/bpf/stackmap.c 			pcpu_freelist_pop(&smap->freelist);
smap              407 kernel/bpf/stackmap.c 			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
smap              411 kernel/bpf/stackmap.c 			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
smap              422 kernel/bpf/stackmap.c 			pcpu_freelist_pop(&smap->freelist);
smap              431 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], new_bucket);
smap              433 kernel/bpf/stackmap.c 		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
smap              522 kernel/bpf/stackmap.c 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
smap              526 kernel/bpf/stackmap.c 	if (unlikely(id >= smap->n_buckets))
smap              529 kernel/bpf/stackmap.c 	bucket = xchg(&smap->buckets[id], NULL);
smap              537 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], bucket);
smap              539 kernel/bpf/stackmap.c 		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
smap              546 kernel/bpf/stackmap.c 	struct bpf_stack_map *smap = container_of(map,
smap              556 kernel/bpf/stackmap.c 		if (id >= smap->n_buckets || !smap->buckets[id])
smap              562 kernel/bpf/stackmap.c 	while (id < smap->n_buckets && !smap->buckets[id])
smap              565 kernel/bpf/stackmap.c 	if (id >= smap->n_buckets)
smap              581 kernel/bpf/stackmap.c 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
smap              585 kernel/bpf/stackmap.c 	if (unlikely(id >= smap->n_buckets))
smap              588 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], NULL);
smap              590 kernel/bpf/stackmap.c 		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
smap              600 kernel/bpf/stackmap.c 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
smap              605 kernel/bpf/stackmap.c 	bpf_map_area_free(smap->elems);
smap              606 kernel/bpf/stackmap.c 	pcpu_freelist_destroy(&smap->freelist);
smap              607 kernel/bpf/stackmap.c 	bpf_map_area_free(smap);
smap               62 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map __rcu *smap;
smap               93 net/core/bpf_sk_storage.c static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
smap               96 net/core/bpf_sk_storage.c 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
smap              121 net/core/bpf_sk_storage.c static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
smap              127 net/core/bpf_sk_storage.c 	if (charge_omem && omem_charge(sk, smap->elem_size))
smap              130 net/core/bpf_sk_storage.c 	selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
smap              133 net/core/bpf_sk_storage.c 			memcpy(SDATA(selem)->data, value, smap->map.value_size);
smap              138 net/core/bpf_sk_storage.c 		atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
smap              151 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map *smap;
smap              155 net/core/bpf_sk_storage.c 	smap = rcu_dereference(SDATA(selem)->smap);
smap              162 net/core/bpf_sk_storage.c 		atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
smap              187 net/core/bpf_sk_storage.c 	if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
smap              189 net/core/bpf_sk_storage.c 		RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
smap              224 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map *smap;
smap              231 net/core/bpf_sk_storage.c 	smap = rcu_dereference(SDATA(selem)->smap);
smap              232 net/core/bpf_sk_storage.c 	b = select_bucket(smap, selem);
smap              239 net/core/bpf_sk_storage.c static void selem_link_map(struct bpf_sk_storage_map *smap,
smap              242 net/core/bpf_sk_storage.c 	struct bucket *b = select_bucket(smap, selem);
smap              245 net/core/bpf_sk_storage.c 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
smap              262 net/core/bpf_sk_storage.c 		    struct bpf_sk_storage_map *smap,
smap              269 net/core/bpf_sk_storage.c 	sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
smap              270 net/core/bpf_sk_storage.c 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
smap              275 net/core/bpf_sk_storage.c 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
smap              290 net/core/bpf_sk_storage.c 			rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
smap              302 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map *smap;
smap              308 net/core/bpf_sk_storage.c 	smap = (struct bpf_sk_storage_map *)map;
smap              309 net/core/bpf_sk_storage.c 	return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
smap              327 net/core/bpf_sk_storage.c 			    struct bpf_sk_storage_map *smap,
smap              347 net/core/bpf_sk_storage.c 	selem_link_map(smap, first_selem);
smap              395 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map *smap;
smap              404 net/core/bpf_sk_storage.c 	smap = (struct bpf_sk_storage_map *)map;
smap              412 net/core/bpf_sk_storage.c 		selem = selem_alloc(smap, sk, value, true);
smap              416 net/core/bpf_sk_storage.c 		err = sk_storage_alloc(sk, smap, selem);
smap              419 net/core/bpf_sk_storage.c 			atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
smap              431 net/core/bpf_sk_storage.c 		old_sdata = __sk_storage_lookup(sk_storage, smap, false);
smap              455 net/core/bpf_sk_storage.c 	old_sdata = __sk_storage_lookup(sk_storage, smap, false);
smap              474 net/core/bpf_sk_storage.c 	selem = selem_alloc(smap, sk, value, !old_sdata);
smap              481 net/core/bpf_sk_storage.c 	selem_link_map(smap, selem);
smap              556 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map *smap;
smap              560 net/core/bpf_sk_storage.c 	smap = (struct bpf_sk_storage_map *)map;
smap              577 net/core/bpf_sk_storage.c 	for (i = 0; i < (1U << smap->bucket_log); i++) {
smap              578 net/core/bpf_sk_storage.c 		b = &smap->buckets[i];
smap              605 net/core/bpf_sk_storage.c 	kvfree(smap->buckets);
smap              635 net/core/bpf_sk_storage.c 	struct bpf_sk_storage_map *smap;
smap              641 net/core/bpf_sk_storage.c 	smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
smap              642 net/core/bpf_sk_storage.c 	if (!smap)
smap              644 net/core/bpf_sk_storage.c 	bpf_map_init_from_attr(&smap->map, attr);
smap              649 net/core/bpf_sk_storage.c 	smap->bucket_log = ilog2(nbuckets);
smap              650 net/core/bpf_sk_storage.c 	cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
smap              652 net/core/bpf_sk_storage.c 	ret = bpf_map_charge_init(&smap->map.memory, cost);
smap              654 net/core/bpf_sk_storage.c 		kfree(smap);
smap              658 net/core/bpf_sk_storage.c 	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
smap              660 net/core/bpf_sk_storage.c 	if (!smap->buckets) {
smap              661 net/core/bpf_sk_storage.c 		bpf_map_charge_finish(&smap->map.memory);
smap              662 net/core/bpf_sk_storage.c 		kfree(smap);
smap              667 net/core/bpf_sk_storage.c 		INIT_HLIST_HEAD(&smap->buckets[i].list);
smap              668 net/core/bpf_sk_storage.c 		raw_spin_lock_init(&smap->buckets[i].lock);
smap              671 net/core/bpf_sk_storage.c 	smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
smap              672 net/core/bpf_sk_storage.c 	smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
smap              675 net/core/bpf_sk_storage.c 	return &smap->map;
smap              754 net/core/bpf_sk_storage.c 			  struct bpf_sk_storage_map *smap,
smap              759 net/core/bpf_sk_storage.c 	copy_selem = selem_alloc(smap, newsk, NULL, true);
smap              763 net/core/bpf_sk_storage.c 	if (map_value_has_spin_lock(&smap->map))
smap              764 net/core/bpf_sk_storage.c 		copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
smap              767 net/core/bpf_sk_storage.c 		copy_map_value(&smap->map, SDATA(copy_selem)->data,
smap              790 net/core/bpf_sk_storage.c 		struct bpf_sk_storage_map *smap;
smap              793 net/core/bpf_sk_storage.c 		smap = rcu_dereference(SDATA(selem)->smap);
smap              794 net/core/bpf_sk_storage.c 		if (!(smap->map.map_flags & BPF_F_CLONE))
smap              802 net/core/bpf_sk_storage.c 		map = bpf_map_inc_not_zero(&smap->map, false);
smap              806 net/core/bpf_sk_storage.c 		copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
smap              814 net/core/bpf_sk_storage.c 			selem_link_map(smap, copy_selem);
smap              817 net/core/bpf_sk_storage.c 			ret = sk_storage_alloc(newsk, smap, copy_selem);
smap              820 net/core/bpf_sk_storage.c 				atomic_sub(smap->elem_size,
smap               33 tools/testing/selftests/vm/map_populate.c static int parent_f(int sock, unsigned long *smap, int child)
smap               40 tools/testing/selftests/vm/map_populate.c 	*smap = 0x22222BAD;
smap               41 tools/testing/selftests/vm/map_populate.c 	ret = msync(smap, MMAP_SZ, MS_SYNC);
smap               53 tools/testing/selftests/vm/map_populate.c static int child_f(int sock, unsigned long *smap, int fd)
smap               57 tools/testing/selftests/vm/map_populate.c 	smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE,
smap               59 tools/testing/selftests/vm/map_populate.c 	BUG_ON(smap == MAP_FAILED, "mmap()");
smap               61 tools/testing/selftests/vm/map_populate.c 	BUG_ON(*smap != 0xdeadbabe, "MAP_PRIVATE | MAP_POPULATE changed file");
smap               69 tools/testing/selftests/vm/map_populate.c 	BUG_ON(*smap == 0x22222BAD, "MAP_POPULATE didn't COW private page");
smap               70 tools/testing/selftests/vm/map_populate.c 	BUG_ON(*smap != 0xdeadbabe, "mapping was corrupted");
smap               79 tools/testing/selftests/vm/map_populate.c 	unsigned long *smap;
smap               87 tools/testing/selftests/vm/map_populate.c 	smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE,
smap               89 tools/testing/selftests/vm/map_populate.c 	BUG_ON(smap == MAP_FAILED, "mmap()");
smap               91 tools/testing/selftests/vm/map_populate.c 	*smap = 0xdeadbabe;
smap               93 tools/testing/selftests/vm/map_populate.c 	ret = msync(smap, MMAP_SZ, MS_SYNC);
smap              106 tools/testing/selftests/vm/map_populate.c 		return parent_f(sock[1], smap, child);
smap              112 tools/testing/selftests/vm/map_populate.c 	return child_f(sock[0], smap, fileno(ftmp));