alloc_order 266 arch/powerpc/include/asm/opal.h int64_t opal_xive_alloc_vp_block(uint32_t alloc_order); alloc_order 1540 arch/powerpc/sysdev/xive/common.c unsigned int alloc_order; alloc_order 1544 arch/powerpc/sysdev/xive/common.c alloc_order = xive_alloc_order(queue_shift); alloc_order 1545 arch/powerpc/sysdev/xive/common.c pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); alloc_order 231 arch/powerpc/sysdev/xive/native.c unsigned int alloc_order; alloc_order 238 arch/powerpc/sysdev/xive/native.c alloc_order = xive_alloc_order(xive_queue_shift); alloc_order 239 arch/powerpc/sysdev/xive/native.c free_pages((unsigned long)q->qpage, alloc_order); alloc_order 527 arch/powerpc/sysdev/xive/spapr.c unsigned int alloc_order; alloc_order 536 arch/powerpc/sysdev/xive/spapr.c alloc_order = xive_alloc_order(xive_queue_shift); alloc_order 537 arch/powerpc/sysdev/xive/spapr.c free_pages((unsigned long)q->qpage, alloc_order); alloc_order 127 drivers/dma/ioat/dma.h u16 alloc_order; alloc_order 310 drivers/dma/ioat/dma.h return 1 << ioat_chan->alloc_order; alloc_order 616 drivers/dma/ioat/init.c const int total_descs = 1 << ioat_chan->alloc_order; alloc_order 664 drivers/dma/ioat/init.c ioat_chan->alloc_order = 0; alloc_order 689 drivers/dma/ioat/init.c return 1 << ioat_chan->alloc_order; alloc_order 718 drivers/dma/ioat/init.c ioat_chan->alloc_order = order; alloc_order 754 drivers/dma/ioat/init.c return 1 << ioat_chan->alloc_order; alloc_order 115 drivers/dma/ioat/sysfs.c return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1); alloc_order 80 drivers/lightnvm/pblk-rb.c unsigned int alloc_order, order, iter; alloc_order 104 drivers/lightnvm/pblk-rb.c alloc_order = power_size; alloc_order 105 drivers/lightnvm/pblk-rb.c if (alloc_order >= max_order) { alloc_order 107 drivers/lightnvm/pblk-rb.c iter = (1 << (alloc_order - max_order)); alloc_order 109 drivers/lightnvm/pblk-rb.c order = alloc_order; alloc_order 286 drivers/net/ethernet/amd/xgbe/xgbe-desc.c struct xgbe_page_alloc *pa, int alloc_order, alloc_order 295 drivers/net/ethernet/amd/xgbe/xgbe-desc.c order = alloc_order; alloc_order 252 drivers/net/ethernet/brocade/bna/bnad.c unmap_q->alloc_order = -1; alloc_order 271 drivers/net/ethernet/brocade/bna/bnad.c unmap_q->alloc_order = 0; alloc_order 275 drivers/net/ethernet/brocade/bna/bnad.c unmap_q->alloc_order = 0; alloc_order 279 drivers/net/ethernet/brocade/bna/bnad.c unmap_q->alloc_order = order; alloc_order 352 drivers/net/ethernet/brocade/bna/bnad.c alloc_size = PAGE_SIZE << unmap_q->alloc_order; alloc_order 360 drivers/net/ethernet/brocade/bna/bnad.c unmap_q->alloc_order); alloc_order 253 drivers/net/ethernet/brocade/bna/bnad.h int alloc_order; alloc_order 281 include/trace/events/kmem.h int alloc_order, int fallback_order, alloc_order 285 include/trace/events/kmem.h alloc_order, fallback_order, alloc_order 290 include/trace/events/kmem.h __field( int, alloc_order ) alloc_order 299 include/trace/events/kmem.h __entry->alloc_order = alloc_order; alloc_order 310 include/trace/events/kmem.h __entry->alloc_order, alloc_order 2512 mm/page_alloc.c unsigned int alloc_order) alloc_order 3801 mm/vmscan.c static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, alloc_order 3832 mm/vmscan.c wakeup_kcompactd(pgdat, alloc_order, classzone_idx); alloc_order 3896 mm/vmscan.c unsigned int alloc_order, reclaim_order; alloc_order 3925 mm/vmscan.c alloc_order = reclaim_order = pgdat->kswapd_order; alloc_order 3929 mm/vmscan.c kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, alloc_order 3933 mm/vmscan.c alloc_order = reclaim_order = pgdat->kswapd_order; alloc_order 3958 mm/vmscan.c alloc_order); alloc_order 3959 mm/vmscan.c reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); alloc_order 3960 mm/vmscan.c if (reclaim_order < alloc_order)