Lines Matching refs:nvbo
133 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_del_ttm() local
135 if (unlikely(nvbo->gem.filp)) in nouveau_bo_del_ttm()
137 WARN_ON(nvbo->pin_refcnt > 0); in nouveau_bo_del_ttm()
138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); in nouveau_bo_del_ttm()
139 kfree(nvbo); in nouveau_bo_del_ttm()
143 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, in nouveau_bo_fixup_align() argument
146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align()
150 if (nvbo->tile_mode) { in nouveau_bo_fixup_align()
153 *size = roundup(*size, 64 * nvbo->tile_mode); in nouveau_bo_fixup_align()
157 *size = roundup(*size, 64 * nvbo->tile_mode); in nouveau_bo_fixup_align()
161 *size = roundup(*size, 64 * nvbo->tile_mode); in nouveau_bo_fixup_align()
165 *size = roundup(*size, 32 * nvbo->tile_mode); in nouveau_bo_fixup_align()
169 *size = roundup(*size, (1 << nvbo->page_shift)); in nouveau_bo_fixup_align()
170 *align = max((1 << nvbo->page_shift), *align); in nouveau_bo_fixup_align()
183 struct nouveau_bo *nvbo; in nouveau_bo_new() local
202 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); in nouveau_bo_new()
203 if (!nvbo) in nouveau_bo_new()
205 INIT_LIST_HEAD(&nvbo->head); in nouveau_bo_new()
206 INIT_LIST_HEAD(&nvbo->entry); in nouveau_bo_new()
207 INIT_LIST_HEAD(&nvbo->vma_list); in nouveau_bo_new()
208 nvbo->tile_mode = tile_mode; in nouveau_bo_new()
209 nvbo->tile_flags = tile_flags; in nouveau_bo_new()
210 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_new()
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; in nouveau_bo_new()
215 nvbo->page_shift = 12; in nouveau_bo_new()
218 nvbo->page_shift = drm->client.vm->mmu->lpg_shift; in nouveau_bo_new()
221 nouveau_bo_fixup_align(nvbo, flags, &align, &size); in nouveau_bo_new()
222 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; in nouveau_bo_new()
223 nouveau_bo_placement_set(nvbo, flags, 0); in nouveau_bo_new()
228 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, in nouveau_bo_new()
229 type, &nvbo->placement, in nouveau_bo_new()
237 *pnvbo = nvbo; in nouveau_bo_new()
255 set_placement_range(struct nouveau_bo *nvbo, uint32_t type) in set_placement_range() argument
257 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range()
262 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && in set_placement_range()
263 nvbo->bo.mem.num_pages < vram_pages / 4) { in set_placement_range()
270 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { in set_placement_range()
277 for (i = 0; i < nvbo->placement.num_placement; ++i) { in set_placement_range()
278 nvbo->placements[i].fpfn = fpfn; in set_placement_range()
279 nvbo->placements[i].lpfn = lpfn; in set_placement_range()
281 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in set_placement_range()
282 nvbo->busy_placements[i].fpfn = fpfn; in set_placement_range()
283 nvbo->busy_placements[i].lpfn = lpfn; in set_placement_range()
289 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) in nouveau_bo_placement_set() argument
291 struct ttm_placement *pl = &nvbo->placement; in nouveau_bo_placement_set()
292 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : in nouveau_bo_placement_set()
294 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); in nouveau_bo_placement_set()
296 pl->placement = nvbo->placements; in nouveau_bo_placement_set()
297 set_placement_list(nvbo->placements, &pl->num_placement, in nouveau_bo_placement_set()
300 pl->busy_placement = nvbo->busy_placements; in nouveau_bo_placement_set()
301 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, in nouveau_bo_placement_set()
304 set_placement_range(nvbo, type); in nouveau_bo_placement_set()
308 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) in nouveau_bo_pin() argument
310 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin()
311 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin()
321 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { in nouveau_bo_pin()
327 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; in nouveau_bo_pin()
332 if (nvbo->pin_refcnt) { in nouveau_bo_pin()
339 nvbo->pin_refcnt++; in nouveau_bo_pin()
344 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); in nouveau_bo_pin()
345 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_bo_pin()
350 nvbo->pin_refcnt++; in nouveau_bo_pin()
351 nouveau_bo_placement_set(nvbo, memtype, 0); in nouveau_bo_pin()
357 nvbo->pin_refcnt--; in nouveau_bo_pin()
358 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_bo_pin()
361 nvbo->pin_refcnt++; in nouveau_bo_pin()
376 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG; in nouveau_bo_pin()
382 nouveau_bo_unpin(struct nouveau_bo *nvbo) in nouveau_bo_unpin() argument
384 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin()
385 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin()
392 ref = --nvbo->pin_refcnt; in nouveau_bo_unpin()
397 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); in nouveau_bo_unpin()
399 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_bo_unpin()
419 nouveau_bo_map(struct nouveau_bo *nvbo) in nouveau_bo_map() argument
423 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); in nouveau_bo_map()
431 if (!nvbo->force_coherent) in nouveau_bo_map()
432 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, in nouveau_bo_map()
433 &nvbo->kmap); in nouveau_bo_map()
435 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_map()
440 nouveau_bo_unmap(struct nouveau_bo *nvbo) in nouveau_bo_unmap() argument
442 if (!nvbo) in nouveau_bo_unmap()
449 if (!nvbo->force_coherent) in nouveau_bo_unmap()
450 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
454 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) in nouveau_bo_sync_for_device() argument
456 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device()
458 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
465 if (nvbo->force_coherent) in nouveau_bo_sync_for_device()
474 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) in nouveau_bo_sync_for_cpu() argument
476 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu()
478 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
485 if (nvbo->force_coherent) in nouveau_bo_sync_for_cpu()
494 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, in nouveau_bo_validate() argument
499 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, in nouveau_bo_validate()
504 nouveau_bo_sync_for_device(nvbo); in nouveau_bo_validate()
510 _nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz) in _nouveau_bo_mem_index() argument
522 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm; in _nouveau_bo_mem_index()
532 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) in nouveau_bo_wr16() argument
535 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
537 mem = nouveau_bo_mem_index(nvbo, index, mem); in nouveau_bo_wr16()
546 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) in nouveau_bo_rd32() argument
549 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
551 mem = nouveau_bo_mem_index(nvbo, index, mem); in nouveau_bo_rd32()
560 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) in nouveau_bo_wr32() argument
563 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()
565 mem = nouveau_bo_mem_index(nvbo, index, mem); in nouveau_bo_wr32()
660 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_evict_flags() local
664 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, in nouveau_bo_evict_flags()
668 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); in nouveau_bo_evict_flags()
672 *pl = nvbo->placement; in nouveau_bo_evict_flags()
1229 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_move_ntfy() local
1236 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
1239 nvbo->page_shift != vma->vm->mmu->lpg_shift)) { in nouveau_bo_move_ntfy()
1253 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_vm_bind() local
1262 nvbo->tile_mode, in nouveau_bo_vm_bind()
1263 nvbo->tile_flags); in nouveau_bo_vm_bind()
1287 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_move() local
1292 if (nvbo->pin_refcnt) in nouveau_bo_move()
1293 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); in nouveau_bo_move()
1334 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); in nouveau_bo_move()
1343 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_verify_access() local
1345 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp); in nouveau_bo_verify_access()
1421 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_ttm_fault_reserve_notify() local
1431 !nouveau_bo_tile_layout(nvbo)) in nouveau_ttm_fault_reserve_notify()
1435 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); in nouveau_ttm_fault_reserve_notify()
1437 ret = nouveau_bo_validate(nvbo, false, false); in nouveau_ttm_fault_reserve_notify()
1449 for (i = 0; i < nvbo->placement.num_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1450 nvbo->placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1451 nvbo->placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1454 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1455 nvbo->busy_placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1456 nvbo->busy_placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1459 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); in nouveau_ttm_fault_reserve_notify()
1460 return nouveau_bo_validate(nvbo, false, false); in nouveau_ttm_fault_reserve_notify()
1591 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) in nouveau_bo_fence() argument
1593 struct reservation_object *resv = nvbo->bo.resv; in nouveau_bo_fence()
1617 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm) in nouveau_bo_vma_find() argument
1620 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_vma_find()
1629 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, in nouveau_bo_vma_add() argument
1632 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; in nouveau_bo_vma_add()
1635 ret = nvkm_vm_get(vm, size, nvbo->page_shift, in nouveau_bo_vma_add()
1640 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && in nouveau_bo_vma_add()
1641 (nvbo->bo.mem.mem_type == TTM_PL_VRAM || in nouveau_bo_vma_add()
1642 nvbo->page_shift != vma->vm->mmu->lpg_shift)) in nouveau_bo_vma_add()
1643 nvkm_vm_map(vma, nvbo->bo.mem.mm_node); in nouveau_bo_vma_add()
1645 list_add_tail(&vma->head, &nvbo->vma_list); in nouveau_bo_vma_add()
1651 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma) in nouveau_bo_vma_del() argument
1654 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) in nouveau_bo_vma_del()