Lines Matching refs:nvbo

138 	struct nouveau_bo *nvbo = nouveau_bo(bo);  in nouveau_bo_del_ttm()  local
140 if (unlikely(nvbo->gem.filp)) in nouveau_bo_del_ttm()
142 WARN_ON(nvbo->pin_refcnt > 0); in nouveau_bo_del_ttm()
143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); in nouveau_bo_del_ttm()
144 kfree(nvbo); in nouveau_bo_del_ttm()
148 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, in nouveau_bo_fixup_align() argument
151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align()
155 if (nvbo->tile_mode) { in nouveau_bo_fixup_align()
158 *size = roundup(*size, 64 * nvbo->tile_mode); in nouveau_bo_fixup_align()
162 *size = roundup(*size, 64 * nvbo->tile_mode); in nouveau_bo_fixup_align()
166 *size = roundup(*size, 64 * nvbo->tile_mode); in nouveau_bo_fixup_align()
170 *size = roundup(*size, 32 * nvbo->tile_mode); in nouveau_bo_fixup_align()
174 *size = roundup(*size, (1 << nvbo->page_shift)); in nouveau_bo_fixup_align()
175 *align = max((1 << nvbo->page_shift), *align); in nouveau_bo_fixup_align()
188 struct nouveau_bo *nvbo; in nouveau_bo_new() local
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); in nouveau_bo_new()
208 if (!nvbo) in nouveau_bo_new()
210 INIT_LIST_HEAD(&nvbo->head); in nouveau_bo_new()
211 INIT_LIST_HEAD(&nvbo->entry); in nouveau_bo_new()
212 INIT_LIST_HEAD(&nvbo->vma_list); in nouveau_bo_new()
213 nvbo->tile_mode = tile_mode; in nouveau_bo_new()
214 nvbo->tile_flags = tile_flags; in nouveau_bo_new()
215 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_new()
218 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; in nouveau_bo_new()
220 nvbo->page_shift = 12; in nouveau_bo_new()
223 nvbo->page_shift = drm->client.vm->mmu->lpg_shift; in nouveau_bo_new()
226 nouveau_bo_fixup_align(nvbo, flags, &align, &size); in nouveau_bo_new()
227 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; in nouveau_bo_new()
228 nouveau_bo_placement_set(nvbo, flags, 0); in nouveau_bo_new()
233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, in nouveau_bo_new()
234 type, &nvbo->placement, in nouveau_bo_new()
242 *pnvbo = nvbo; in nouveau_bo_new()
260 set_placement_range(struct nouveau_bo *nvbo, uint32_t type) in set_placement_range() argument
262 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range()
267 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && in set_placement_range()
268 nvbo->bo.mem.num_pages < vram_pages / 4) { in set_placement_range()
275 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { in set_placement_range()
282 for (i = 0; i < nvbo->placement.num_placement; ++i) { in set_placement_range()
283 nvbo->placements[i].fpfn = fpfn; in set_placement_range()
284 nvbo->placements[i].lpfn = lpfn; in set_placement_range()
286 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in set_placement_range()
287 nvbo->busy_placements[i].fpfn = fpfn; in set_placement_range()
288 nvbo->busy_placements[i].lpfn = lpfn; in set_placement_range()
294 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) in nouveau_bo_placement_set() argument
296 struct ttm_placement *pl = &nvbo->placement; in nouveau_bo_placement_set()
297 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : in nouveau_bo_placement_set()
299 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); in nouveau_bo_placement_set()
301 pl->placement = nvbo->placements; in nouveau_bo_placement_set()
302 set_placement_list(nvbo->placements, &pl->num_placement, in nouveau_bo_placement_set()
305 pl->busy_placement = nvbo->busy_placements; in nouveau_bo_placement_set()
306 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, in nouveau_bo_placement_set()
309 set_placement_range(nvbo, type); in nouveau_bo_placement_set()
313 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) in nouveau_bo_pin() argument
315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin()
316 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin()
326 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { in nouveau_bo_pin()
332 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; in nouveau_bo_pin()
337 if (nvbo->pin_refcnt) { in nouveau_bo_pin()
344 nvbo->pin_refcnt++; in nouveau_bo_pin()
349 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); in nouveau_bo_pin()
350 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_bo_pin()
355 nvbo->pin_refcnt++; in nouveau_bo_pin()
356 nouveau_bo_placement_set(nvbo, memtype, 0); in nouveau_bo_pin()
362 nvbo->pin_refcnt--; in nouveau_bo_pin()
363 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_bo_pin()
366 nvbo->pin_refcnt++; in nouveau_bo_pin()
381 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG; in nouveau_bo_pin()
387 nouveau_bo_unpin(struct nouveau_bo *nvbo) in nouveau_bo_unpin() argument
389 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin()
390 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin()
397 ref = --nvbo->pin_refcnt; in nouveau_bo_unpin()
402 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); in nouveau_bo_unpin()
404 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_bo_unpin()
424 nouveau_bo_map(struct nouveau_bo *nvbo) in nouveau_bo_map() argument
428 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); in nouveau_bo_map()
436 if (!nvbo->force_coherent) in nouveau_bo_map()
437 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, in nouveau_bo_map()
438 &nvbo->kmap); in nouveau_bo_map()
440 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_map()
445 nouveau_bo_unmap(struct nouveau_bo *nvbo) in nouveau_bo_unmap() argument
447 if (!nvbo) in nouveau_bo_unmap()
454 if (!nvbo->force_coherent) in nouveau_bo_unmap()
455 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
459 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) in nouveau_bo_sync_for_device() argument
461 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device()
463 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
470 if (nvbo->force_coherent) in nouveau_bo_sync_for_device()
479 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) in nouveau_bo_sync_for_cpu() argument
481 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu()
483 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
490 if (nvbo->force_coherent) in nouveau_bo_sync_for_cpu()
499 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, in nouveau_bo_validate() argument
504 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, in nouveau_bo_validate()
509 nouveau_bo_sync_for_device(nvbo); in nouveau_bo_validate()
515 _nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz) in _nouveau_bo_mem_index() argument
527 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm; in _nouveau_bo_mem_index()
537 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) in nouveau_bo_wr16() argument
540 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
542 mem = nouveau_bo_mem_index(nvbo, index, mem); in nouveau_bo_wr16()
551 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) in nouveau_bo_rd32() argument
554 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
556 mem = nouveau_bo_mem_index(nvbo, index, mem); in nouveau_bo_rd32()
565 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) in nouveau_bo_wr32() argument
568 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()
570 mem = nouveau_bo_mem_index(nvbo, index, mem); in nouveau_bo_wr32()
666 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_evict_flags() local
670 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, in nouveau_bo_evict_flags()
674 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); in nouveau_bo_evict_flags()
678 *pl = nvbo->placement; in nouveau_bo_evict_flags()
1235 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_move_ntfy() local
1242 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
1245 nvbo->page_shift != vma->vm->mmu->lpg_shift)) { in nouveau_bo_move_ntfy()
1259 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_vm_bind() local
1268 nvbo->tile_mode, in nouveau_bo_vm_bind()
1269 nvbo->tile_flags); in nouveau_bo_vm_bind()
1293 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_move() local
1298 if (nvbo->pin_refcnt) in nouveau_bo_move()
1299 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); in nouveau_bo_move()
1340 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); in nouveau_bo_move()
1349 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_verify_access() local
1351 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp); in nouveau_bo_verify_access()
1423 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_ttm_fault_reserve_notify() local
1433 !nouveau_bo_tile_layout(nvbo)) in nouveau_ttm_fault_reserve_notify()
1437 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); in nouveau_ttm_fault_reserve_notify()
1439 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_ttm_fault_reserve_notify()
1451 for (i = 0; i < nvbo->placement.num_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1452 nvbo->placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1453 nvbo->placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1456 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1457 nvbo->busy_placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1458 nvbo->busy_placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1461 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); in nouveau_ttm_fault_reserve_notify()
1462 return nouveau_bo_validate(nvbo, false, false); in nouveau_ttm_fault_reserve_notify()
1593 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) in nouveau_bo_fence() argument
1595 struct reservation_object *resv = nvbo->bo.resv; in nouveau_bo_fence()
1619 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm) in nouveau_bo_vma_find() argument
1622 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_vma_find()
1631 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, in nouveau_bo_vma_add() argument
1634 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; in nouveau_bo_vma_add()
1637 ret = nvkm_vm_get(vm, size, nvbo->page_shift, in nouveau_bo_vma_add()
1642 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && in nouveau_bo_vma_add()
1643 (nvbo->bo.mem.mem_type == TTM_PL_VRAM || in nouveau_bo_vma_add()
1644 nvbo->page_shift != vma->vm->mmu->lpg_shift)) in nouveau_bo_vma_add()
1645 nvkm_vm_map(vma, nvbo->bo.mem.mm_node); in nouveau_bo_vma_add()
1647 list_add_tail(&vma->head, &nvbo->vma_list); in nouveau_bo_vma_add()
1653 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma) in nouveau_bo_vma_del() argument
1656 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) in nouveau_bo_vma_del()