/linux-4.1.27/include/linux/ |
D | gfp.h | 293 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 297 __alloc_pages(gfp_t gfp_mask, unsigned int order, in __alloc_pages() argument 300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); in __alloc_pages() 303 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument 310 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in alloc_pages_node() 313 static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, in alloc_pages_exact_node() argument 318 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in alloc_pages_exact_node() 322 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 325 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument 327 return alloc_pages_current(gfp_mask, order); in alloc_pages() [all …]
|
D | mempool.h | 11 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 30 gfp_t gfp_mask, int nid); 34 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); 42 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 55 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 67 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
D | cpuset.h | 51 extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); 53 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed() 58 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in cpuset_zone_allowed() 169 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 174 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
D | page_owner.h | 10 unsigned int order, gfp_t gfp_mask); 21 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument 26 __set_page_owner(page, order, gfp_mask); in set_page_owner() 33 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
|
D | idr.h | 80 void idr_preload(gfp_t gfp_mask); 81 int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); 82 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); 162 int ida_pre_get(struct ida *ida, gfp_t gfp_mask); 169 gfp_t gfp_mask);
|
D | oom.h | 60 extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 68 extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 76 extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
D | radix-tree.h | 109 gfp_t gfp_mask; member 115 .gfp_mask = (mask), \ 125 (root)->gfp_mask = (mask); \ 280 int radix_tree_preload(gfp_t gfp_mask); 281 int radix_tree_maybe_preload(gfp_t gfp_mask);
|
D | pagemap.h | 318 pgoff_t offset, gfp_t gfp_mask) in find_or_create_page() argument 322 gfp_mask); in find_or_create_page() 373 pgoff_t index, gfp_t gfp_mask); 650 pgoff_t index, gfp_t gfp_mask); 652 pgoff_t index, gfp_t gfp_mask); 655 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 662 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache() argument 667 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); in add_to_page_cache()
|
D | swap.h | 320 gfp_t gfp_mask, nodemask_t *mask); 324 gfp_t gfp_mask, 327 gfp_t gfp_mask, bool noswap, 464 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument 486 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument 508 gfp_t gfp_mask) in add_to_swap_cache() argument
|
D | mISDNif.h | 538 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) in mI_alloc_skb() argument 542 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); in mI_alloc_skb() 549 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) in _alloc_mISDN_skb() argument 551 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); in _alloc_mISDN_skb() 566 u_int id, u_int len, void *dp, gfp_t gfp_mask) in _queue_data() argument 572 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); in _queue_data()
|
D | bio.h | 394 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) in bio_alloc() argument 396 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); in bio_alloc() 399 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) in bio_clone() argument 401 return bio_clone_bioset(bio, gfp_mask, fs_bio_set); in bio_clone() 404 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) in bio_kmalloc() argument 406 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); in bio_kmalloc() 409 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) in bio_clone_kmalloc() argument 411 return bio_clone_bioset(bio, gfp_mask, NULL); in bio_clone_kmalloc() 745 gfp_t gfp_mask) in bio_integrity_clone() argument
|
D | connector.h | 74 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); 75 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
|
D | compaction.h | 40 extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 55 static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, in try_to_compact_pages() argument
|
D | textsearch.h | 162 gfp_t gfp_mask) in alloc_ts_config() argument 166 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); in alloc_ts_config()
|
D | netlink.h | 70 u32 dst_portid, gfp_t gfp_mask); 90 netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in netlink_skb_clone() argument 94 nskb = skb_clone(skb, gfp_mask); in netlink_skb_clone()
|
D | skbuff.h | 790 gfp_t gfp_mask); 834 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 838 gfp_t gfp_mask, bool fclone); 840 gfp_t gfp_mask) in __pskb_copy() argument 842 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); in __pskb_copy() 845 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); 2104 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags() argument 2108 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags() 2134 gfp_t gfp_mask); 2157 gfp_t gfp_mask) in __dev_alloc_skb() argument [all …]
|
D | memcontrol.h | 77 gfp_t gfp_mask, struct mem_cgroup **memcgp); 180 gfp_t gfp_mask, 211 gfp_t gfp_mask, in mem_cgroup_try_charge() argument 363 gfp_t gfp_mask, in mem_cgroup_soft_limit_reclaim() argument
|
D | vmalloc.h | 77 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 79 unsigned long start, unsigned long end, gfp_t gfp_mask,
|
D | shrinker.h | 12 gfp_t gfp_mask; member
|
D | page_ext.h | 42 gfp_t gfp_mask; member
|
D | audit.h | 449 void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, 452 extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); 500 void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, in audit_log() argument 504 gfp_t gfp_mask, int type) in audit_log_start() argument
|
D | blkdev.h | 808 struct bio_set *bs, gfp_t gfp_mask, 1162 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1164 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1166 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1168 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) in sb_issue_discard() argument 1172 gfp_mask, flags); in sb_issue_discard() 1175 sector_t nr_blocks, gfp_t gfp_mask) in sb_issue_zeroout() argument 1180 gfp_mask, true); in sb_issue_zeroout() 1665 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, in blkdev_issue_flush() argument
|
D | kmod.h | 73 call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask,
|
D | shmem_fs.h | 59 pgoff_t index, gfp_t gfp_mask);
|
D | btree.h | 48 void *btree_alloc(gfp_t gfp_mask, void *pool_data);
|
D | kfifo.h | 332 #define kfifo_alloc(fifo, size, gfp_mask) \ argument 338 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ 772 size_t esize, gfp_t gfp_mask);
|
D | writeback.h | 121 void throttle_vm_writeout(gfp_t gfp_mask);
|
D | elevator.h | 138 struct bio *bio, gfp_t gfp_mask);
|
D | scatterlist.h | 240 gfp_t gfp_mask);
|
D | suspend.h | 343 extern unsigned long get_safe_page(gfp_t gfp_mask);
|
D | jbd2.h | 1119 gfp_t gfp_mask, unsigned int type, 1122 extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
|
D | workqueue.h | 423 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
|
D | lockdep.h | 353 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
|
D | mm.h | 1234 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1774 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
|
D | device.h | 636 gfp_t gfp_mask, unsigned int order);
|
/linux-4.1.27/mm/ |
D | mempool.c | 186 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 189 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 193 gfp_mask, node_id); in mempool_create_node() 211 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_create_node() 312 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 319 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 320 might_sleep_if(gfp_mask & __GFP_WAIT); in mempool_alloc() 322 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc() 323 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ in mempool_alloc() 324 gfp_mask |= __GFP_NOWARN; /* failures are OK */ in mempool_alloc() [all …]
|
D | oom_kill.c | 199 gfp_t gfp_mask, nodemask_t *nodemask, in constrained_alloc() argument 204 enum zone_type high_zoneidx = gfp_zone(gfp_mask); in constrained_alloc() 218 if (gfp_mask & __GFP_THISNODE) in constrained_alloc() 236 if (!cpuset_zone_allowed(zone, gfp_mask)) in constrained_alloc() 249 gfp_t gfp_mask, nodemask_t *nodemask, in constrained_alloc() argument 382 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, in dump_header() argument 388 current->comm, gfp_mask, order, in dump_header() 501 void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, in oom_kill_process() argument 528 dump_header(p, gfp_mask, order, memcg, nodemask); in oom_kill_process() 614 void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, in check_panic_on_oom() argument [all …]
|
D | page_alloc.c | 1817 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 1821 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page() 1823 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page() 1825 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) in should_fail_alloc_page() 1866 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 2109 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument 2120 (gfp_mask & __GFP_WRITE); in get_page_from_freelist() 2140 !cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 2219 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist() 2252 gfp_mask, ac->migratetype); in get_page_from_freelist() [all …]
|
D | vmscan.c | 66 gfp_t gfp_mask; member 375 static unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument 402 .gfp_mask = gfp_mask, in shrink_slab() 896 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list() 897 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list() 1008 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list() 1111 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list() 1191 .gfp_mask = GFP_KERNEL, in reclaim_clean_pages_from_list() 1431 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) in too_many_isolated() 2232 throttle_vm_writeout(sc->gfp_mask); in shrink_lruvec() [all …]
|
D | swap_state.c | 119 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) in add_to_swap_cache() argument 123 error = radix_tree_maybe_preload(gfp_mask); in add_to_swap_cache() 297 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 318 new_page = alloc_page_vma(gfp_mask, vma, addr); in read_swap_cache_async() 326 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); in read_swap_cache_async() 450 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead() argument 474 gfp_mask, vma, addr); in swapin_readahead() 485 return read_swap_cache_async(entry, gfp_mask, vma, addr); in swapin_readahead()
|
D | page_owner.c | 60 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) in __set_page_owner() argument 73 page_ext->gfp_mask = gfp_mask; in __set_page_owner() 97 page_ext->order, page_ext->gfp_mask); in print_page_owner() 104 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); in print_page_owner()
|
D | vmalloc.c | 352 int node, gfp_t gfp_mask) in alloc_vmap_area() argument 365 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area() 373 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area() 816 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument 828 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block() 834 node, gfp_mask); in new_vmap_block() 840 err = radix_tree_preload(gfp_mask); in new_vmap_block() 932 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument 980 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc() 1330 unsigned long end, int node, gfp_t gfp_mask, const void *caller) in __get_vm_area_node() argument [all …]
|
D | filemap.c | 138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete() 461 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 469 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); in replace_page_cache_page() 545 pgoff_t offset, gfp_t gfp_mask, in __add_to_page_cache_locked() argument 557 gfp_mask, &memcg); in __add_to_page_cache_locked() 562 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); in __add_to_page_cache_locked() 605 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument 608 gfp_mask, NULL); in add_to_page_cache_locked() 613 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument 620 gfp_mask, &shadow); in add_to_page_cache_lru() [all …]
|
D | page_isolation.c | 295 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; in alloc_migrate_target() local 311 gfp_mask |= __GFP_HIGHMEM; in alloc_migrate_target() 313 return alloc_page(gfp_mask); in alloc_migrate_target()
|
D | compaction.c | 1288 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); in compact_zone() 1439 gfp_t gfp_mask, enum migrate_mode mode, int *contended, in compact_zone_order() argument 1447 .gfp_mask = gfp_mask, in compact_zone_order() 1479 unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument 1483 int may_enter_fs = gfp_mask & __GFP_FS; in try_to_compact_pages() 1484 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages() 1496 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); in try_to_compact_pages() 1507 status = compact_zone_order(zone, order, gfp_mask, mode, in try_to_compact_pages()
|
D | memcontrol.c | 1524 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument 1543 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); in mem_cgroup_out_of_memory() 1591 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, in mem_cgroup_out_of_memory() 1694 gfp_t gfp_mask, in mem_cgroup_soft_reclaim() argument 1733 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, in mem_cgroup_soft_reclaim() 1882 current->memcg_oom.gfp_mask = mask; in mem_cgroup_oom() 1933 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, in mem_cgroup_oom_synchronize() 2218 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument 2267 if (!(gfp_mask & __GFP_WAIT)) in try_charge() 2273 gfp_mask, may_swap); in try_charge() [all …]
|
D | internal.h | 188 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member
|
D | page-writeback.c | 1624 void throttle_vm_writeout(gfp_t gfp_mask) in throttle_vm_writeout() argument 1649 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) in throttle_vm_writeout()
|
D | nommu.c | 280 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument 286 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc()
|
D | vmstat.c | 1076 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); in pagetypeinfo_showmixedcount_print()
|
D | swapfile.c | 2741 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) in add_swap_count_continuation() argument 2754 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation()
|
/linux-4.1.27/fs/nfs/blocklayout/ |
D | dev.c | 184 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 189 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 194 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 216 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 221 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 232 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 245 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 261 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument 274 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe() 290 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_deviceid() argument [all …]
|
D | blocklayout.h | 183 struct pnfs_device *pdev, gfp_t gfp_mask); 200 struct pnfs_block_volume *b, gfp_t gfp_mask);
|
D | blocklayout.c | 547 gfp_t gfp_mask) in bl_alloc_extent() argument 567 lo->plh_lc_cred, gfp_mask); in bl_alloc_extent() 601 gfp_t gfp_mask) in bl_alloc_lseg() argument 621 lseg = kzalloc(sizeof(*lseg), gfp_mask); in bl_alloc_lseg() 626 scratch = alloc_page(gfp_mask); in bl_alloc_lseg() 647 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask); in bl_alloc_lseg()
|
D | rpc_pipefs.c | 54 gfp_t gfp_mask) in bl_resolve_deviceid() argument 77 msg->data = kzalloc(msg->len, gfp_mask); in bl_resolve_deviceid()
|
/linux-4.1.27/fs/btrfs/ |
D | ulist.h | 55 struct ulist *ulist_alloc(gfp_t gfp_mask); 57 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 59 u64 *old_aux, gfp_t gfp_mask); 63 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 67 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 71 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
D | ulist.c | 92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 177 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 179 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 183 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 194 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
D | extent_io.h | 331 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); 332 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
|
D | backref.c | 197 gfp_t gfp_mask) in __add_prelim_ref() argument 204 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); in __add_prelim_ref()
|
D | extent_io.c | 2717 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) in btrfs_bio_clone() argument 2722 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset); in btrfs_bio_clone() 2733 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) in btrfs_io_bio_alloc() argument 2738 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); in btrfs_io_bio_alloc()
|
/linux-4.1.27/block/ |
D | blk-map.c | 65 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() argument 89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov() 91 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov() 123 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() argument 132 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user() 182 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern() argument 197 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern() 199 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
|
D | blk-lib.c | 41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 89 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_discard() 156 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 179 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_write_same() 230 sector_t nr_sects, gfp_t gfp_mask) in __blkdev_issue_zeroout() argument 244 bio = bio_alloc(gfp_mask, in __blkdev_issue_zeroout() 302 sector_t nr_sects, gfp_t gfp_mask, bool discard) in blkdev_issue_zeroout() argument 307 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) in blkdev_issue_zeroout() 311 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, in blkdev_issue_zeroout() 315 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); in blkdev_issue_zeroout()
|
D | bio.c | 174 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc() argument 211 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc() 214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); in bvec_alloc() 228 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { in bvec_alloc() 410 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) in bio_alloc_bioset() argument 412 gfp_t saved_gfp = gfp_mask; in bio_alloc_bioset() 426 gfp_mask); in bio_alloc_bioset() 455 gfp_mask &= ~__GFP_WAIT; in bio_alloc_bioset() 457 p = mempool_alloc(bs->bio_pool, gfp_mask); in bio_alloc_bioset() 458 if (!p && gfp_mask != saved_gfp) { in bio_alloc_bioset() [all …]
|
D | blk.h | 56 gfp_t gfp_mask); 241 gfp_t gfp_mask); 244 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 258 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) in create_io_context() argument 262 create_task_io_context(current, gfp_mask, node); in create_io_context()
|
D | bio-integrity.c | 46 gfp_t gfp_mask, in bio_integrity_alloc() argument 56 sizeof(struct bio_vec) * nr_vecs, gfp_mask); in bio_integrity_alloc() 59 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); in bio_integrity_alloc() 69 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, in bio_integrity_alloc() 451 gfp_t gfp_mask) in bio_integrity_clone() argument 458 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); in bio_integrity_clone()
|
D | blk-core.c | 563 static void *alloc_request_struct(gfp_t gfp_mask, void *data) in alloc_request_struct() argument 566 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); in alloc_request_struct() 575 gfp_t gfp_mask) in blk_init_rl() argument 588 (void *)(long)q->node, gfp_mask, in blk_init_rl() 602 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) in blk_alloc_queue() argument 604 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); in blk_alloc_queue() 608 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) in blk_alloc_queue_node() argument 614 gfp_mask | __GFP_ZERO, node_id); in blk_alloc_queue_node() 618 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); in blk_alloc_queue_node() 975 struct bio *bio, gfp_t gfp_mask) in __get_request() argument [all …]
|
D | blk-ioc.c | 358 gfp_t gfp_mask) in ioc_create_icq() argument 364 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, in ioc_create_icq() 369 if (radix_tree_maybe_preload(gfp_mask) < 0) { in ioc_create_icq()
|
D | bounce.c | 74 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) in mempool_alloc_pages_isa() argument 76 return mempool_alloc_pages(gfp_mask | GFP_DMA, data); in mempool_alloc_pages_isa()
|
D | blk-flush.c | 449 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, in blkdev_issue_flush() argument 472 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
|
D | blk-cgroup.c | 70 gfp_t gfp_mask) in blkg_alloc() argument 76 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc() 87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc() 100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); in blkg_alloc()
|
D | cfq-iosched.c | 862 gfp_t gfp_mask); 3582 struct bio *bio, gfp_t gfp_mask) in cfq_find_alloc_queue() argument 3609 } else if (gfp_mask & __GFP_WAIT) { in cfq_find_alloc_queue() 3613 gfp_mask | __GFP_ZERO, in cfq_find_alloc_queue() 3622 gfp_mask | __GFP_ZERO, in cfq_find_alloc_queue() 3662 struct bio *bio, gfp_t gfp_mask) in cfq_get_queue() argument 3680 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); in cfq_get_queue() 4216 gfp_t gfp_mask) in cfq_set_request() argument 4224 might_sleep_if(gfp_mask & __GFP_WAIT); in cfq_set_request() 4233 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); in cfq_set_request()
|
D | elevator.c | 703 struct bio *bio, gfp_t gfp_mask) in elv_set_request() argument 708 return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); in elv_set_request()
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_alloc.c | 39 static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, in c2_alloc_mqsp_chunk() argument 47 &dma_addr, gfp_mask); in c2_alloc_mqsp_chunk() 71 int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, in c2_init_mqsp_pool() argument 74 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root); in c2_init_mqsp_pool() 90 dma_addr_t *dma_addr, gfp_t gfp_mask) in c2_alloc_mqsp() argument 100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == in c2_alloc_mqsp()
|
D | c2.h | 541 extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, 545 dma_addr_t *dma_addr, gfp_t gfp_mask);
|
/linux-4.1.27/lib/ |
D | idr.c | 94 static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) in idr_layer_alloc() argument 109 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); in idr_layer_alloc() 134 return kmem_cache_zalloc(idr_layer_cache, gfp_mask); in idr_layer_alloc() 192 static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) in __idr_pre_get() argument 196 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); in __idr_pre_get() 221 gfp_t gfp_mask, struct idr *layer_idr) in sub_alloc() argument 273 new = idr_layer_alloc(gfp_mask, layer_idr); in sub_alloc() 290 struct idr_layer **pa, gfp_t gfp_mask, in idr_get_empty_slot() argument 302 if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) in idr_get_empty_slot() 322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { in idr_get_empty_slot() [all …]
|
D | scatterlist.c | 136 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() argument 148 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc() 149 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc() 152 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); in sg_kmalloc() 249 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) in __sg_alloc_table() argument 280 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table() 331 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() argument 336 NULL, gfp_mask, sg_kmalloc); in sg_alloc_table() 367 gfp_t gfp_mask) in sg_alloc_table_from_pages() argument 381 ret = sg_alloc_table(sgt, chunks, gfp_mask); in sg_alloc_table_from_pages()
|
D | radix-tree.c | 84 return root->gfp_mask & __GFP_BITS_MASK; in root_gfp_mask() 107 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); in root_tag_set() 112 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); in root_tag_clear() 117 root->gfp_mask &= __GFP_BITS_MASK; in root_tag_clear_all() 122 return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); in root_tag_get() 183 gfp_t gfp_mask = root_gfp_mask(root); in radix_tree_node_alloc() local 190 if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) { in radix_tree_node_alloc() 211 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); in radix_tree_node_alloc() 252 static int __radix_tree_preload(gfp_t gfp_mask) in __radix_tree_preload() argument 262 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); in __radix_tree_preload() [all …]
|
D | textsearch.c | 262 unsigned int len, gfp_t gfp_mask, int flags) in textsearch_prepare() argument 287 conf = ops->init(pattern, len, gfp_mask, flags); in textsearch_prepare()
|
D | ts_kmp.c | 96 gfp_t gfp_mask, int flags) in kmp_init() argument 104 conf = alloc_ts_config(priv_size, gfp_mask); in kmp_init()
|
D | ts_bm.c | 146 gfp_t gfp_mask, int flags) in bm_init() argument 154 conf = alloc_ts_config(priv_size, gfp_mask); in bm_init()
|
D | ts_fsm.c | 260 gfp_t gfp_mask, int flags) in fsm_init() argument 286 conf = alloc_ts_config(priv_size, gfp_mask); in fsm_init()
|
D | kfifo.c | 39 size_t esize, gfp_t gfp_mask) in __kfifo_alloc() argument 57 fifo->data = kmalloc(size * esize, gfp_mask); in __kfifo_alloc()
|
D | kobject.c | 146 char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) in kobject_get_path() argument 154 path = kzalloc(len, gfp_mask); in kobject_get_path()
|
D | btree.c | 81 void *btree_alloc(gfp_t gfp_mask, void *pool_data) in btree_alloc() argument 83 return kmem_cache_alloc(btree_cachep, gfp_mask); in btree_alloc()
|
/linux-4.1.27/arch/tile/include/asm/ |
D | homecache.h | 92 extern struct page *homecache_alloc_pages(gfp_t gfp_mask, 94 extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, 96 #define homecache_alloc_page(gfp_mask, home) \ argument 97 homecache_alloc_pages(gfp_mask, 0, home)
|
D | kexec.h | 50 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_page_pool.c | 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 116 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, in ion_page_pool_shrink() argument 125 high = !!(gfp_mask & __GFP_HIGHMEM); in ion_page_pool_shrink() 149 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument 159 pool->gfp_mask = gfp_mask | __GFP_COMP; in ion_page_pool_create()
|
D | ion_priv.h | 123 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 374 gfp_t gfp_mask; member 379 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); 391 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
D | ion_heap.c | 277 total += heap->ops->shrink(heap, sc->gfp_mask, 0); in ion_heap_shrink_count() 305 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); in ion_heap_shrink_scan()
|
D | ion_system_heap.c | 210 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, in ion_system_heap_shrink() argument 222 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); in ion_system_heap_shrink()
|
D | ion.c | 1476 sc.gfp_mask = -1; in debug_shrink_set() 1495 sc.gfp_mask = -1; in debug_shrink_get()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 98 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 102 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 104 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 114 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 117 &sg_dma_address(mem), gfp_mask); in mlx4_alloc_icm_coherent() 128 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 136 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 139 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 143 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 156 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
D | icm.h | 71 gfp_t gfp_mask, int coherent);
|
/linux-4.1.27/fs/ntfs/ |
D | malloc.h | 42 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument 47 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc() 51 return __vmalloc(size, gfp_mask, PAGE_KERNEL); in __ntfs_malloc()
|
/linux-4.1.27/net/sunrpc/auth_gss/ |
D | gss_krb5_mech.c | 378 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument 405 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3() 496 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument 512 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 527 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 542 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 552 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 562 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 572 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 608 gfp_t gfp_mask) in gss_import_v2_context() argument [all …]
|
D | gss_krb5_keys.c | 145 gfp_t gfp_mask) in krb5_derive_key() argument 170 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_derive_key() 174 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_derive_key() 178 rawkey = kmalloc(keybytes, gfp_mask); in krb5_derive_key()
|
D | gss_mech_switch.c | 391 gfp_t gfp_mask) in gss_import_sec_context() argument 393 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) in gss_import_sec_context() 398 *ctx_id, endtime, gfp_mask); in gss_import_sec_context()
|
/linux-4.1.27/drivers/connector/ |
D | connector.c | 74 gfp_t gfp_mask) in cn_netlink_send_mult() argument 108 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 126 gfp_mask); in cn_netlink_send_mult() 127 return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); in cn_netlink_send_mult() 133 gfp_t gfp_mask) in cn_netlink_send() argument 135 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()
|
/linux-4.1.27/drivers/scsi/ |
D | scsi.c | 154 gfp_t gfp_mask; member 167 .gfp_mask = __GFP_DMA, 200 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) in scsi_host_alloc_command() argument 205 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 210 gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 215 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); in scsi_host_alloc_command() 239 __scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) in __scsi_get_command() argument 241 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); in __scsi_get_command() 277 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) in scsi_get_command() argument 279 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); in scsi_get_command() [all …]
|
D | hosts.c | 372 gfp_t gfp_mask = GFP_KERNEL; in scsi_host_alloc() local 375 gfp_mask |= __GFP_DMA; in scsi_host_alloc() 377 shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask); in scsi_host_alloc()
|
D | sg.c | 1824 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; in sg_build_indirect() local 1851 gfp_mask |= GFP_DMA; in sg_build_indirect() 1854 gfp_mask |= __GFP_ZERO; in sg_build_indirect() 1866 schp->pages[k] = alloc_pages(gfp_mask, order); in sg_build_indirect()
|
D | eata.c | 1347 gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC; in port_detect() local 1348 ha->cp[i].sglist = kmalloc(sz, gfp_mask); in port_detect()
|
D | scsi_lib.c | 576 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) in scsi_sg_alloc() argument 581 return mempool_alloc(sgp->pool, gfp_mask); in scsi_sg_alloc()
|
/linux-4.1.27/include/trace/events/ |
D | compaction.h | 171 gfp_t gfp_mask, 174 TP_ARGS(order, gfp_mask, mode), 178 __field(gfp_t, gfp_mask) 184 __entry->gfp_mask = gfp_mask; 190 __entry->gfp_mask,
|
D | vmscan.h | 209 __entry->gfp_flags = sc->gfp_mask;
|
/linux-4.1.27/drivers/infiniband/core/ |
D | sa_query.c | 570 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) in alloc_mad() argument 586 gfp_mask); in alloc_mad() 619 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) in send_mad() argument 621 bool preload = !!(gfp_mask & __GFP_WAIT); in send_mad() 626 idr_preload(gfp_mask); in send_mad() 722 int timeout_ms, gfp_t gfp_mask, in ib_sa_path_rec_get() argument 742 query = kmalloc(sizeof *query, gfp_mask); in ib_sa_path_rec_get() 747 ret = alloc_mad(&query->sa_query, gfp_mask); in ib_sa_path_rec_get() 769 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); in ib_sa_path_rec_get() 839 int timeout_ms, gfp_t gfp_mask, in ib_sa_service_rec_query() argument [all …]
|
D | sa.h | 56 int timeout_ms, gfp_t gfp_mask,
|
D | multicast.c | 563 union ib_gid *mgid, gfp_t gfp_mask) in acquire_group() argument 578 group = kzalloc(sizeof *group, gfp_mask); in acquire_group() 615 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, in ib_sa_join_multicast() argument 629 member = kmalloc(sizeof *member, gfp_mask); in ib_sa_join_multicast() 644 &rec->mgid, gfp_mask); in ib_sa_join_multicast()
|
D | mad.c | 873 gfp_t gfp_mask) in alloc_send_rmpp_list() argument 886 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); in alloc_send_rmpp_list() 890 sizeof (*seg) + seg_size, gfp_mask); in alloc_send_rmpp_list() 923 gfp_t gfp_mask) in ib_create_send_mad() argument 943 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); in ib_create_send_mad() 970 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask); in ib_create_send_mad()
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
D | obd_support.h | 641 #define OBD_ALLOC_GFP(ptr, size, gfp_mask) \ argument 642 __OBD_MALLOC_VERBOSE(ptr, NULL, 0, size, gfp_mask) 649 #define OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, gfp_mask) \ argument 650 __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, gfp_mask) 820 #define __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask) \ argument 823 alloc_page(gfp_mask) : \ 824 alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), gfp_mask, 0);\ 845 #define OBD_PAGE_ALLOC(ptr, gfp_mask) \ argument 846 __OBD_PAGE_ALLOC_VERBOSE(ptr, NULL, 0, gfp_mask) 847 #define OBD_PAGE_CPT_ALLOC(ptr, cptab, cpt, gfp_mask) \ argument [all …]
|
D | lustre_dlm.h | 213 gfp_t gfp_mask); 1464 gfp_t gfp_mask);
|
/linux-4.1.27/kernel/power/ |
D | snapshot.c | 97 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument 101 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 107 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 116 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument 118 return (unsigned long)get_image_page(gfp_mask, PG_SAFE); in get_safe_page() 121 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument 125 page = alloc_page(gfp_mask); in alloc_image_page() 191 gfp_t gfp_mask; /* mask for allocating pages */ member 196 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) in chain_init() argument 200 ca->gfp_mask = gfp_mask; in chain_init() [all …]
|
/linux-4.1.27/arch/tile/mm/ |
D | homecache.c | 384 struct page *homecache_alloc_pages(gfp_t gfp_mask, in homecache_alloc_pages() argument 388 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ in homecache_alloc_pages() 389 page = alloc_pages(gfp_mask, order); in homecache_alloc_pages() 396 struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, in homecache_alloc_pages_node() argument 400 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ in homecache_alloc_pages_node() 401 page = alloc_pages_node(nid, gfp_mask, order); in homecache_alloc_pages_node()
|
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/ |
D | ldlm_pool.c | 377 int nr, gfp_t gfp_mask) in ldlm_srv_pool_shrink() argument 529 int nr, gfp_t gfp_mask) in ldlm_cli_pool_shrink() argument 622 gfp_t gfp_mask) in ldlm_pool_shrink() argument 627 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); in ldlm_pool_shrink() 1054 static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) in ldlm_pools_count() argument 1061 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) in ldlm_pools_count() 1099 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask); in ldlm_pools_count() 1107 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) in ldlm_pools_scan() argument 1114 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) in ldlm_pools_scan() 1145 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); in ldlm_pools_scan() [all …]
|
/linux-4.1.27/net/core/ |
D | skbuff.c | 160 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) in __alloc_skb_head() argument 166 gfp_mask & ~__GFP_DMA, node); in __alloc_skb_head() 202 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, in __alloc_skb() argument 215 gfp_mask |= __GFP_MEMALLOC; in __alloc_skb() 218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb() 230 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); in __alloc_skb() 363 gfp_t gfp_mask) in __page_frag_refill() argument 367 gfp_t gfp = gfp_mask; in __page_frag_refill() 370 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | in __page_frag_refill() 372 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); in __page_frag_refill() [all …]
|
/linux-4.1.27/drivers/staging/android/ |
D | lowmemorykiller.c | 107 sc->nr_to_scan, sc->gfp_mask, other_free, in lowmem_scan() 112 sc->nr_to_scan, sc->gfp_mask); in lowmem_scan() 174 sc->nr_to_scan, sc->gfp_mask, rem); in lowmem_scan()
|
D | ashmem.c | 441 if (!(sc->gfp_mask & __GFP_FS)) in ashmem_shrink_scan() 783 .gfp_mask = GFP_KERNEL, in ashmem_ioctl()
|
/linux-4.1.27/kernel/ |
D | audit.c | 182 gfp_t gfp_mask; member 434 static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask) in kauditd_send_multicast_skb() argument 453 copy = skb_copy(skb, gfp_mask); in kauditd_send_multicast_skb() 457 nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask); in kauditd_send_multicast_skb() 1235 gfp_t gfp_mask, int type) in audit_buffer_alloc() argument 1251 ab = kmalloc(sizeof(*ab), gfp_mask); in audit_buffer_alloc() 1257 ab->gfp_mask = gfp_mask; in audit_buffer_alloc() 1259 ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); in audit_buffer_alloc() 1344 struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, in audit_log_start() argument 1360 if (gfp_mask & __GFP_WAIT) { in audit_log_start() [all …]
|
D | kmod.c | 502 char **envp, gfp_t gfp_mask, in call_usermodehelper_setup() argument 508 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); in call_usermodehelper_setup() 601 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; in call_usermodehelper() local 603 info = call_usermodehelper_setup(path, argv, envp, gfp_mask, in call_usermodehelper()
|
D | kexec.c | 141 gfp_t gfp_mask, 640 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() argument 644 pages = alloc_pages(gfp_mask, order); in kimage_alloc_pages() 966 gfp_t gfp_mask, in kimage_alloc_page() argument 1006 page = kimage_alloc_pages(gfp_mask, 0); in kimage_alloc_page() 1046 if (!(gfp_mask & __GFP_HIGHMEM) && in kimage_alloc_page()
|
D | cpuset.c | 2491 int __cpuset_node_allowed(int node, gfp_t gfp_mask) in __cpuset_node_allowed() argument 2507 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ in __cpuset_node_allowed()
|
D | workqueue.c | 3079 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) in alloc_workqueue_attrs() argument 3083 attrs = kzalloc(sizeof(*attrs), gfp_mask); in alloc_workqueue_attrs() 3086 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) in alloc_workqueue_attrs()
|
D | cgroup.c | 200 gfp_t gfp_mask) in cgroup_idr_alloc() argument 204 idr_preload(gfp_mask); in cgroup_idr_alloc() 206 ret = idr_alloc(idr, ptr, start, end, gfp_mask); in cgroup_idr_alloc()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) in mthca_alloc_icm_pages() argument 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in mthca_alloc_icm_pages() 124 int order, gfp_t gfp_mask) in mthca_alloc_icm_coherent() argument 127 gfp_mask); in mthca_alloc_icm_coherent() 138 gfp_t gfp_mask, int coherent) in mthca_alloc_icm() argument 146 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mthca_alloc_icm() 148 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mthca_alloc_icm() 160 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mthca_alloc_icm() 176 cur_order, gfp_mask); in mthca_alloc_icm() 179 cur_order, gfp_mask); in mthca_alloc_icm()
|
D | mthca_memfree.h | 83 gfp_t gfp_mask, int coherent);
|
D | mthca_cmd.h | 252 gfp_t gfp_mask);
|
D | mthca_cmd.c | 608 gfp_t gfp_mask) in mthca_alloc_mailbox() argument 612 mailbox = kmalloc(sizeof *mailbox, gfp_mask); in mthca_alloc_mailbox() 616 mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); in mthca_alloc_mailbox()
|
/linux-4.1.27/drivers/net/wireless/ath/ |
D | main.c | 31 gfp_t gfp_mask) in ath_rxbuf_alloc() argument 49 skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask); in ath_rxbuf_alloc()
|
D | ath.h | 198 gfp_t gfp_mask);
|
/linux-4.1.27/arch/tile/kernel/ |
D | machine_kexec.c | 215 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages_arch() argument 217 gfp_mask |= __GFP_THISNODE | __GFP_NORETRY; in kimage_alloc_pages_arch() 218 return alloc_pages_node(0, gfp_mask, order); in kimage_alloc_pages_arch()
|
/linux-4.1.27/net/ceph/ |
D | msgpool.c | 10 static void *msgpool_alloc(gfp_t gfp_mask, void *arg) in msgpool_alloc() argument 15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); in msgpool_alloc()
|
/linux-4.1.27/include/rdma/ |
D | ib_sa.h | 303 int timeout_ms, gfp_t gfp_mask, 315 int timeout_ms, gfp_t gfp_mask, 361 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, 424 int timeout_ms, gfp_t gfp_mask,
|
D | ib_mad.h | 636 gfp_t gfp_mask);
|
/linux-4.1.27/drivers/md/ |
D | dm-bufio.c | 369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument 377 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); in alloc_buffer_data() 381 gfp_mask & __GFP_NORETRY) { in alloc_buffer_data() 383 return (void *)__get_free_pages(gfp_mask, in alloc_buffer_data() 399 if (gfp_mask & __GFP_NORETRY) in alloc_buffer_data() 402 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL); in alloc_buffer_data() 404 if (gfp_mask & __GFP_NORETRY) in alloc_buffer_data() 439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument 442 gfp_mask); in alloc_buffer() 449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer() [all …]
|
D | dm.c | 613 gfp_t gfp_mask) in alloc_rq_tio() argument 615 return mempool_alloc(md->io_pool, gfp_mask); in alloc_rq_tio() 624 gfp_t gfp_mask) in alloc_clone_request() argument 626 return mempool_alloc(md->rq_pool, gfp_mask); in alloc_clone_request() 1835 struct dm_rq_target_io *tio, gfp_t gfp_mask) in setup_clone() argument 1839 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, in setup_clone() 1856 struct dm_rq_target_io *tio, gfp_t gfp_mask) in clone_rq() argument 1866 clone = alloc_clone_request(md, gfp_mask); in clone_rq() 1873 if (setup_clone(clone, rq, tio, gfp_mask)) { in clone_rq() 1899 struct mapped_device *md, gfp_t gfp_mask) in prep_tio() argument [all …]
|
D | md.h | 666 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 668 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
D | dm-crypt.c | 978 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; in crypt_alloc_buffer() local 984 if (unlikely(gfp_mask & __GFP_WAIT)) in crypt_alloc_buffer() 996 page = mempool_alloc(cc->page_pool, gfp_mask); in crypt_alloc_buffer() 1000 gfp_mask |= __GFP_WAIT; in crypt_alloc_buffer() 1017 if (unlikely(gfp_mask & __GFP_WAIT)) in crypt_alloc_buffer()
|
/linux-4.1.27/include/linux/sunrpc/ |
D | gss_api.h | 52 gfp_t gfp_mask); 110 gfp_t gfp_mask);
|
D | gss_krb5.h | 299 gfp_t gfp_mask);
|
/linux-4.1.27/security/integrity/ima/ |
D | ima_crypto.c | 129 gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY; in ima_alloc_pages() local 135 ptr = (void *)__get_free_pages(gfp_mask, order); in ima_alloc_pages() 144 gfp_mask = GFP_KERNEL; in ima_alloc_pages() 147 gfp_mask |= __GFP_NOWARN; in ima_alloc_pages() 149 ptr = (void *)__get_free_pages(gfp_mask, 0); in ima_alloc_pages()
|
/linux-4.1.27/Documentation/connector/ |
D | connector.txt | 27 void cn_netlink_send_multi(struct cn_msg *msg, u16 len, u32 portid, u32 __group, int gfp_mask); 28 void cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, int gfp_mask); 75 int cn_netlink_send_multi(struct cn_msg *msg, u16 len, u32 portid, u32 __groups, int gfp_mask); 76 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __groups, int gfp_mask); 95 int gfp_mask - GFP mask.
|
/linux-4.1.27/fs/nfs/ |
D | nfs4session.c | 103 u32 slotid, u32 seq_init, gfp_t gfp_mask) in nfs4_new_slot() argument 107 slot = kzalloc(sizeof(*slot), gfp_mask); in nfs4_new_slot() 117 u32 slotid, u32 seq_init, gfp_t gfp_mask) in nfs4_find_or_create_slot() argument 125 seq_init, gfp_mask); in nfs4_find_or_create_slot()
|
D | pnfs_dev.c | 189 gfp_t gfp_mask) in nfs4_find_get_deviceid() argument 198 new = nfs4_get_device_info(server, id, cred, gfp_mask); in nfs4_find_get_deviceid()
|
D | nfs4_fs.h | 244 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait); 443 extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
D | nfs4state.c | 745 fmode_t fmode, gfp_t gfp_mask, int wait) in __nfs4_close() argument 786 nfs4_do_close(state, gfp_mask, wait); in __nfs4_close() 1015 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) in nfs_alloc_seqid() argument 1019 new = kmalloc(sizeof(*new), gfp_mask); in nfs_alloc_seqid()
|
D | pnfs.h | 317 gfp_t gfp_mask);
|
D | nfs4proc.c | 1006 gfp_t gfp_mask) in nfs4_opendata_alloc() argument 1014 p = kzalloc(sizeof(*p), gfp_mask); in nfs4_opendata_alloc() 1018 p->f_label = nfs4_label_alloc(server, gfp_mask); in nfs4_opendata_alloc() 1023 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); in nfs4_opendata_alloc() 2784 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) in nfs4_do_close() argument 2807 calldata = kzalloc(sizeof(*calldata), gfp_mask); in nfs4_do_close() 2816 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); in nfs4_do_close() 5590 gfp_t gfp_mask) in nfs4_alloc_lockdata() argument 5597 p = kzalloc(sizeof(*p), gfp_mask); in nfs4_alloc_lockdata() 5603 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); in nfs4_alloc_lockdata() [all …]
|
D | dir.c | 2158 gfp_t gfp_mask = sc->gfp_mask; in nfs_access_cache_scan() local 2160 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) in nfs_access_cache_scan()
|
/linux-4.1.27/drivers/net/wireless/ipw2x00/ |
D | libipw_tx.c | 192 int headroom, gfp_t gfp_mask) in libipw_alloc_txb() argument 197 gfp_mask); in libipw_alloc_txb() 207 gfp_mask); in libipw_alloc_txb()
|
/linux-4.1.27/drivers/staging/fwserial/ |
D | dma_fifo.h | 86 int tx_limit, int open_limit, gfp_t gfp_mask);
|
D | dma_fifo.c | 68 int tx_limit, int open_limit, gfp_t gfp_mask) in dma_fifo_alloc() argument 77 fifo->data = kmalloc(capacity, gfp_mask); in dma_fifo_alloc()
|
/linux-4.1.27/drivers/usb/wusbcore/ |
D | wa-hc.h | 270 static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) in wa_nep_arm() argument 275 return usb_submit_urb(urb, gfp_mask); in wa_nep_arm()
|
/linux-4.1.27/include/linux/netfilter/ |
D | nfnetlink.h | 38 u32 dst_portid, gfp_t gfp_mask);
|
/linux-4.1.27/security/selinux/ss/ |
D | mls.h | 40 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
|
D | mls.c | 377 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask) in mls_from_string() argument 387 tmpstr = freestr = kstrdup(str, gfp_mask); in mls_from_string()
|
/linux-4.1.27/fs/ |
D | mbcache.c | 191 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) in __mb_cache_entry_forget() argument 253 gfp_t gfp_mask = sc->gfp_mask; in mb_cache_shrink_scan() local 284 __mb_cache_entry_forget(entry, gfp_mask); in mb_cache_shrink_scan()
|
D | buffer.c | 983 gfp_t gfp_mask; in grow_dev_page() local 985 gfp_mask = (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp; in grow_dev_page() 993 gfp_mask |= __GFP_NOFAIL; in grow_dev_page() 995 page = find_or_create_page(inode->i_mapping, index, gfp_mask); in grow_dev_page()
|
D | super.c | 71 if (!(sc->gfp_mask & __GFP_FS)) in super_cache_scan()
|
/linux-4.1.27/fs/nilfs2/ |
D | mdt.h | 89 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
|
D | mdt.c | 473 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) in nilfs_mdt_init() argument 485 mapping_set_gfp_mask(inode->i_mapping, gfp_mask); in nilfs_mdt_init()
|
/linux-4.1.27/fs/jbd2/ |
D | transaction.c | 258 gfp_t gfp_mask) in start_this_handle() argument 282 gfp_mask); in start_this_handle() 291 if ((gfp_mask & __GFP_FS) == 0) { in start_this_handle() 415 gfp_t gfp_mask, unsigned int type, in jbd2__journal_start() argument 446 err = start_this_handle(journal, handle, gfp_mask); in jbd2__journal_start() 622 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) in jbd2__journal_restart() argument 665 ret = start_this_handle(journal, handle, gfp_mask); in jbd2__journal_restart() 1880 struct page *page, gfp_t gfp_mask) in jbd2_journal_try_to_free_buffers() argument
|
/linux-4.1.27/arch/s390/pci/ |
D | pci_clp.c | 49 static void *clp_alloc_block(gfp_t gfp_mask) in clp_alloc_block() argument 51 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE)); in clp_alloc_block()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/ |
D | rx.c | 272 gfp_t gfp_mask = priority; in iwl_pcie_rxq_alloc_rbs() local 283 gfp_mask |= __GFP_NOWARN; in iwl_pcie_rxq_alloc_rbs() 286 gfp_mask |= __GFP_COMP; in iwl_pcie_rxq_alloc_rbs() 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); in iwl_pcie_rxq_alloc_rbs()
|
/linux-4.1.27/fs/gfs2/ |
D | inode.h | 18 extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
|
D | aops.c | 1116 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) in gfs2_releasepage() argument
|
/linux-4.1.27/drivers/net/wireless/ |
D | zd1201.c | 521 gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; in zd1201_setconfig() local 528 request = kmalloc(16, gfp_mask); in zd1201_setconfig() 531 urb = usb_alloc_urb(0, gfp_mask); in zd1201_setconfig() 557 err = usb_submit_urb(urb, gfp_mask); in zd1201_setconfig() 562 request = kmalloc(16, gfp_mask); in zd1201_setconfig() 565 urb = usb_alloc_urb(0, gfp_mask); in zd1201_setconfig() 578 err = usb_submit_urb(urb, gfp_mask); in zd1201_setconfig()
|
/linux-4.1.27/fs/jfs/ |
D | jfs_metapage.c | 186 static inline struct metapage *alloc_metapage(gfp_t gfp_mask) in alloc_metapage() argument 188 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask); in alloc_metapage() 539 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) in metapage_releasepage() argument
|
/linux-4.1.27/net/netfilter/ |
D | nfnetlink.c | 126 u32 dst_portid, gfp_t gfp_mask) in nfnetlink_alloc_skb() argument 128 return netlink_alloc_skb(net->nfnl, size, dst_portid, gfp_mask); in nfnetlink_alloc_skb()
|
/linux-4.1.27/kernel/locking/ |
D | lockdep.c | 2733 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) in __lockdep_trace_alloc() argument 2741 if (!(gfp_mask & __GFP_WAIT)) in __lockdep_trace_alloc() 2745 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) in __lockdep_trace_alloc() 2749 if (!(gfp_mask & __GFP_FS)) in __lockdep_trace_alloc() 2763 void lockdep_trace_alloc(gfp_t gfp_mask) in lockdep_trace_alloc() argument 2773 __lockdep_trace_alloc(gfp_mask, flags); in lockdep_trace_alloc() 2889 void lockdep_trace_alloc(gfp_t gfp_mask) in lockdep_trace_alloc() argument 3668 void lockdep_set_current_reclaim_state(gfp_t gfp_mask) in lockdep_set_current_reclaim_state() argument 3670 current->lockdep_reclaim_gfp = gfp_mask; in lockdep_set_current_reclaim_state()
|
/linux-4.1.27/include/net/ |
D | sch_generic.h | 752 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, in skb_act_clone() argument 757 n = skb_clone(skb, gfp_mask); in skb_act_clone()
|
/linux-4.1.27/drivers/uwb/ |
D | uwb-internal.h | 234 extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
|
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/ |
D | ieee80211_tx.c | 241 gfp_t gfp_mask) in ieee80211_alloc_txb() argument 247 gfp_mask); in ieee80211_alloc_txb()
|
/linux-4.1.27/drivers/base/ |
D | devres.c | 946 gfp_t gfp_mask, unsigned int order) in devm_get_free_pages() argument 951 addr = __get_free_pages(gfp_mask, order); in devm_get_free_pages()
|
/linux-4.1.27/drivers/scsi/aic94xx/ |
D | aic94xx_hwi.h | 382 gfp_t gfp_mask);
|
/linux-4.1.27/fs/xfs/ |
D | xfs_iops.c | 1225 gfp_t gfp_mask; in xfs_setup_inode() local 1294 gfp_mask = mapping_gfp_mask(inode->i_mapping); in xfs_setup_inode() 1295 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
|
D | xfs_buf.c | 292 gfp_t gfp_mask = xb_to_gfp(flags); in xfs_buf_allocate_memory() local 341 page = alloc_page(gfp_mask); in xfs_buf_allocate_memory() 358 __func__, gfp_mask); in xfs_buf_allocate_memory()
|
/linux-4.1.27/drivers/staging/rtl8192e/ |
D | rtllib_tx.c | 223 gfp_t gfp_mask) in rtllib_alloc_txb() argument 229 gfp_mask); in rtllib_alloc_txb()
|
/linux-4.1.27/drivers/isdn/mISDN/ |
D | socket.c | 44 _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) in _l2_alloc_skb() argument 48 skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask); in _l2_alloc_skb()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.c | 545 u16 index, gfp_t gfp_mask) in bnx2x_alloc_rx_sge() argument 547 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); in bnx2x_alloc_rx_sge() 670 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) in bnx2x_frag_alloc() argument 674 if (unlikely(gfp_mask & __GFP_WAIT)) in bnx2x_frag_alloc() 675 return (void *)__get_free_page(gfp_mask); in bnx2x_frag_alloc() 680 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc() 813 u16 index, gfp_t gfp_mask) in bnx2x_alloc_rx_data() argument 820 data = bnx2x_frag_alloc(fp, gfp_mask); in bnx2x_alloc_rx_data()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 117 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) in ll_releasepage() argument
|
/linux-4.1.27/net/netlink/ |
D | af_netlink.c | 127 gfp_t gfp_mask) in netlink_to_full_skb() argument 132 new = alloc_skb(len, gfp_mask); in netlink_to_full_skb() 1861 u32 dst_portid, gfp_t gfp_mask) in netlink_alloc_skb() argument 1882 skb = alloc_skb_head(gfp_mask); in netlink_alloc_skb() 1923 return alloc_skb(size, gfp_mask); in netlink_alloc_skb()
|
/linux-4.1.27/Documentation/scsi/ |
D | libsas.txt | 306 @gfp_mask is the gfp_mask defining the context of the caller.
|
/linux-4.1.27/drivers/firewire/ |
D | core-cdev.c | 487 struct client_resource *resource, gfp_t gfp_mask) in add_client_resource() argument 489 bool preload = !!(gfp_mask & __GFP_WAIT); in add_client_resource() 494 idr_preload(gfp_mask); in add_client_resource()
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/ |
D | usb.c | 426 struct urb *urb, gfp_t gfp_mask) in _rtl_prep_rx_urb() argument 431 buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask, in _rtl_prep_rx_urb()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | mcg.c | 811 gfp_t gfp_mask) in acquire_group() argument 827 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/ |
D | echo_client.c | 1559 gfp_t gfp_mask; in echo_client_kbrw() local 1566 gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER; in echo_client_kbrw() 1599 OBD_PAGE_ALLOC(pgp->pg, gfp_mask); in echo_client_kbrw()
|
/linux-4.1.27/fs/ecryptfs/ |
D | crypto.c | 1161 static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, in ecryptfs_get_zeroed_pages() argument 1166 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in ecryptfs_get_zeroed_pages()
|
/linux-4.1.27/drivers/net/ethernet/sgi/ |
D | ioc3-eth.c | 125 unsigned int gfp_mask) in ioc3_alloc_skb() argument 129 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask); in ioc3_alloc_skb()
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
D | 3945-mac.c | 1011 gfp_t gfp_mask = priority; in il3945_rx_allocate() local 1022 gfp_mask |= __GFP_NOWARN; in il3945_rx_allocate() 1025 gfp_mask |= __GFP_COMP; in il3945_rx_allocate() 1028 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); in il3945_rx_allocate()
|
/linux-4.1.27/net/sunrpc/ |
D | auth.c | 509 if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL) in rpcauth_cache_shrink_scan()
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/ |
D | lu_object.c | 1849 if (!(sc->gfp_mask & __GFP_FS)) in lu_cache_shrink_count() 1873 if (!(sc->gfp_mask & __GFP_FS)) in lu_cache_shrink_scan()
|
/linux-4.1.27/drivers/md/bcache/ |
D | sysfs.c | 622 sc.gfp_mask = GFP_KERNEL; in STORE()
|
/linux-4.1.27/drivers/net/ |
D | virtio_net.c | 197 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument 206 p = alloc_page(gfp_mask); in get_a_page()
|
/linux-4.1.27/net/ipv4/ |
D | tcp_output.c | 907 gfp_t gfp_mask) in tcp_transmit_skb() argument 925 skb = pskb_copy(skb, gfp_mask); in tcp_transmit_skb() 927 skb = skb_clone(skb, gfp_mask); in tcp_transmit_skb()
|
/linux-4.1.27/Documentation/block/ |
D | data-integrity.txt | 216 struct bip * bio_integrity_alloc(bio, gfp_mask, nr_pages);
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_driver.c | 1062 gfp_t gfp_mask) in ipath_alloc_skb() argument 1089 skb = __dev_alloc_skb(len, gfp_mask); in ipath_alloc_skb()
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 212 gfp_t gfp_mask, in srp_alloc_iu() argument 217 iu = kmalloc(sizeof *iu, gfp_mask); in srp_alloc_iu() 221 iu->buf = kzalloc(size, gfp_mask); in srp_alloc_iu()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_main.c | 149 struct bio *bio_alloc_drbd(gfp_t gfp_mask) in bio_alloc_drbd() argument 154 return bio_alloc(gfp_mask, 1); in bio_alloc_drbd() 156 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); in bio_alloc_drbd()
|
D | drbd_receiver.c | 342 unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local) in drbd_alloc_peer_req() argument 352 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); in drbd_alloc_peer_req() 354 if (!(gfp_mask & __GFP_NOWARN)) in drbd_alloc_peer_req() 360 page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT)); in drbd_alloc_peer_req()
|
/linux-4.1.27/fs/jbd/ |
D | transaction.c | 1727 struct page *page, gfp_t gfp_mask) in journal_try_to_free_buffers() argument
|