1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the 6 * "Software"), to deal in the Software without restriction, including 7 * without limitation the rights to use, copy, modify, merge, publish, 8 * distribute, sub license, and/or sell copies of the Software, and to 9 * permit persons to whom the Software is furnished to do so, subject to 10 * the following conditions: 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 18 * USE OR OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * The above copyright notice and this permission notice (including the 21 * next paragraph) shall be included in all copies or substantial portions 22 * of the Software. 23 * 24 */ 25/* 26 * Authors: Dave Airlie <airlied@redhat.com> 27 */ 28#include <drm/drmP.h> 29#include "mgag200_drv.h" 30#include <ttm/ttm_page_alloc.h> 31 32static inline struct mga_device * 33mgag200_bdev(struct ttm_bo_device *bd) 34{ 35 return container_of(bd, struct mga_device, ttm.bdev); 36} 37 38static int 39mgag200_ttm_mem_global_init(struct drm_global_reference *ref) 40{ 41 return ttm_mem_global_init(ref->object); 42} 43 44static void 45mgag200_ttm_mem_global_release(struct drm_global_reference *ref) 46{ 47 ttm_mem_global_release(ref->object); 48} 49 50static int mgag200_ttm_global_init(struct mga_device *ast) 51{ 52 struct drm_global_reference *global_ref; 53 int r; 54 55 global_ref = &ast->ttm.mem_global_ref; 56 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 57 global_ref->size = sizeof(struct ttm_mem_global); 58 global_ref->init = &mgag200_ttm_mem_global_init; 59 global_ref->release = &mgag200_ttm_mem_global_release; 60 r = drm_global_item_ref(global_ref); 61 if (r != 0) { 62 DRM_ERROR("Failed setting up TTM memory accounting " 63 "subsystem.\n"); 64 return r; 65 } 66 67 ast->ttm.bo_global_ref.mem_glob = 68 ast->ttm.mem_global_ref.object; 69 global_ref = &ast->ttm.bo_global_ref.ref; 70 global_ref->global_type = DRM_GLOBAL_TTM_BO; 71 global_ref->size = sizeof(struct ttm_bo_global); 72 global_ref->init = &ttm_bo_global_init; 73 global_ref->release = &ttm_bo_global_release; 74 r = drm_global_item_ref(global_ref); 75 if (r != 0) { 76 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 77 drm_global_item_unref(&ast->ttm.mem_global_ref); 78 return r; 79 } 80 return 0; 81} 82 83static void 84mgag200_ttm_global_release(struct mga_device *ast) 85{ 86 if (ast->ttm.mem_global_ref.release == NULL) 87 return; 88 89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); 90 drm_global_item_unref(&ast->ttm.mem_global_ref); 91 ast->ttm.mem_global_ref.release = NULL; 92} 93 94 95static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo) 96{ 97 struct mgag200_bo *bo; 98 99 bo = container_of(tbo, struct mgag200_bo, bo); 100 101 drm_gem_object_release(&bo->gem); 102 kfree(bo); 103} 104 105static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo) 106{ 107 if (bo->destroy == &mgag200_bo_ttm_destroy) 108 return true; 109 return false; 110} 111 112static int 113mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 114 struct ttm_mem_type_manager *man) 115{ 116 switch (type) { 117 case TTM_PL_SYSTEM: 118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 119 man->available_caching = TTM_PL_MASK_CACHING; 120 man->default_caching = TTM_PL_FLAG_CACHED; 121 break; 122 case TTM_PL_VRAM: 123 man->func = &ttm_bo_manager_func; 124 man->flags = TTM_MEMTYPE_FLAG_FIXED | 125 TTM_MEMTYPE_FLAG_MAPPABLE; 126 man->available_caching = TTM_PL_FLAG_UNCACHED | 127 TTM_PL_FLAG_WC; 128 man->default_caching = TTM_PL_FLAG_WC; 129 break; 130 default: 131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 132 return -EINVAL; 133 } 134 return 0; 135} 136 137static void 138mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 139{ 140 struct mgag200_bo *mgabo = mgag200_bo(bo); 141 142 if (!mgag200_ttm_bo_is_mgag200_bo(bo)) 143 return; 144 145 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM); 146 *pl = mgabo->placement; 147} 148 149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 150{ 151 struct mgag200_bo *mgabo = mgag200_bo(bo); 152 153 return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp); 154} 155 156static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 157 struct ttm_mem_reg *mem) 158{ 159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 160 struct mga_device *mdev = mgag200_bdev(bdev); 161 162 mem->bus.addr = NULL; 163 mem->bus.offset = 0; 164 mem->bus.size = mem->num_pages << PAGE_SHIFT; 165 mem->bus.base = 0; 166 mem->bus.is_iomem = false; 167 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 168 return -EINVAL; 169 switch (mem->mem_type) { 170 case TTM_PL_SYSTEM: 171 /* system memory */ 172 return 0; 173 case TTM_PL_VRAM: 174 mem->bus.offset = mem->start << PAGE_SHIFT; 175 mem->bus.base = pci_resource_start(mdev->dev->pdev, 0); 176 mem->bus.is_iomem = true; 177 break; 178 default: 179 return -EINVAL; 180 break; 181 } 182 return 0; 183} 184 185static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 186{ 187} 188 189static int mgag200_bo_move(struct ttm_buffer_object *bo, 190 bool evict, bool interruptible, 191 bool no_wait_gpu, 192 struct ttm_mem_reg *new_mem) 193{ 194 int r; 195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 196 return r; 197} 198 199 200static void mgag200_ttm_backend_destroy(struct ttm_tt *tt) 201{ 202 ttm_tt_fini(tt); 203 kfree(tt); 204} 205 206static struct ttm_backend_func mgag200_tt_backend_func = { 207 .destroy = &mgag200_ttm_backend_destroy, 208}; 209 210 211static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, 212 unsigned long size, uint32_t page_flags, 213 struct page *dummy_read_page) 214{ 215 struct ttm_tt *tt; 216 217 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); 218 if (tt == NULL) 219 return NULL; 220 tt->func = &mgag200_tt_backend_func; 221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { 222 kfree(tt); 223 return NULL; 224 } 225 return tt; 226} 227 228static int mgag200_ttm_tt_populate(struct ttm_tt *ttm) 229{ 230 return ttm_pool_populate(ttm); 231} 232 233static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm) 234{ 235 ttm_pool_unpopulate(ttm); 236} 237 238struct ttm_bo_driver mgag200_bo_driver = { 239 .ttm_tt_create = mgag200_ttm_tt_create, 240 .ttm_tt_populate = mgag200_ttm_tt_populate, 241 .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, 242 .init_mem_type = mgag200_bo_init_mem_type, 243 .evict_flags = mgag200_bo_evict_flags, 244 .move = mgag200_bo_move, 245 .verify_access = mgag200_bo_verify_access, 246 .io_mem_reserve = &mgag200_ttm_io_mem_reserve, 247 .io_mem_free = &mgag200_ttm_io_mem_free, 248}; 249 250int mgag200_mm_init(struct mga_device *mdev) 251{ 252 int ret; 253 struct drm_device *dev = mdev->dev; 254 struct ttm_bo_device *bdev = &mdev->ttm.bdev; 255 256 ret = mgag200_ttm_global_init(mdev); 257 if (ret) 258 return ret; 259 260 ret = ttm_bo_device_init(&mdev->ttm.bdev, 261 mdev->ttm.bo_global_ref.ref.object, 262 &mgag200_bo_driver, 263 dev->anon_inode->i_mapping, 264 DRM_FILE_PAGE_OFFSET, 265 true); 266 if (ret) { 267 DRM_ERROR("Error initialising bo driver; %d\n", ret); 268 return ret; 269 } 270 271 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT); 272 if (ret) { 273 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); 274 return ret; 275 } 276 277 mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 278 pci_resource_len(dev->pdev, 0)); 279 280 return 0; 281} 282 283void mgag200_mm_fini(struct mga_device *mdev) 284{ 285 ttm_bo_device_release(&mdev->ttm.bdev); 286 287 mgag200_ttm_global_release(mdev); 288 289 arch_phys_wc_del(mdev->fb_mtrr); 290 mdev->fb_mtrr = 0; 291} 292 293void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) 294{ 295 u32 c = 0; 296 unsigned i; 297 298 bo->placement.placement = bo->placements; 299 bo->placement.busy_placement = bo->placements; 300 if (domain & TTM_PL_FLAG_VRAM) 301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 302 if (domain & TTM_PL_FLAG_SYSTEM) 303 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 304 if (!c) 305 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 306 bo->placement.num_placement = c; 307 bo->placement.num_busy_placement = c; 308 for (i = 0; i < c; ++i) { 309 bo->placements[i].fpfn = 0; 310 bo->placements[i].lpfn = 0; 311 } 312} 313 314int mgag200_bo_create(struct drm_device *dev, int size, int align, 315 uint32_t flags, struct mgag200_bo **pmgabo) 316{ 317 struct mga_device *mdev = dev->dev_private; 318 struct mgag200_bo *mgabo; 319 size_t acc_size; 320 int ret; 321 322 mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL); 323 if (!mgabo) 324 return -ENOMEM; 325 326 ret = drm_gem_object_init(dev, &mgabo->gem, size); 327 if (ret) { 328 kfree(mgabo); 329 return ret; 330 } 331 332 mgabo->bo.bdev = &mdev->ttm.bdev; 333 334 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 335 336 acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size, 337 sizeof(struct mgag200_bo)); 338 339 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, 340 ttm_bo_type_device, &mgabo->placement, 341 align >> PAGE_SHIFT, false, NULL, acc_size, 342 NULL, NULL, mgag200_bo_ttm_destroy); 343 if (ret) 344 return ret; 345 346 *pmgabo = mgabo; 347 return 0; 348} 349 350static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo) 351{ 352 return bo->bo.offset; 353} 354 355int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) 356{ 357 int i, ret; 358 359 if (bo->pin_count) { 360 bo->pin_count++; 361 if (gpu_addr) 362 *gpu_addr = mgag200_bo_gpu_offset(bo); 363 return 0; 364 } 365 366 mgag200_ttm_placement(bo, pl_flag); 367 for (i = 0; i < bo->placement.num_placement; i++) 368 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 370 if (ret) 371 return ret; 372 373 bo->pin_count = 1; 374 if (gpu_addr) 375 *gpu_addr = mgag200_bo_gpu_offset(bo); 376 return 0; 377} 378 379int mgag200_bo_unpin(struct mgag200_bo *bo) 380{ 381 int i, ret; 382 if (!bo->pin_count) { 383 DRM_ERROR("unpin bad %p\n", bo); 384 return 0; 385 } 386 bo->pin_count--; 387 if (bo->pin_count) 388 return 0; 389 390 for (i = 0; i < bo->placement.num_placement ; i++) 391 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 392 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 393 if (ret) 394 return ret; 395 396 return 0; 397} 398 399int mgag200_bo_push_sysram(struct mgag200_bo *bo) 400{ 401 int i, ret; 402 if (!bo->pin_count) { 403 DRM_ERROR("unpin bad %p\n", bo); 404 return 0; 405 } 406 bo->pin_count--; 407 if (bo->pin_count) 408 return 0; 409 410 if (bo->kmap.virtual) 411 ttm_bo_kunmap(&bo->kmap); 412 413 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 414 for (i = 0; i < bo->placement.num_placement ; i++) 415 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 416 417 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 418 if (ret) { 419 DRM_ERROR("pushing to VRAM failed\n"); 420 return ret; 421 } 422 return 0; 423} 424 425int mgag200_mmap(struct file *filp, struct vm_area_struct *vma) 426{ 427 struct drm_file *file_priv; 428 struct mga_device *mdev; 429 430 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 431 return -EINVAL; 432 433 file_priv = filp->private_data; 434 mdev = file_priv->minor->dev->dev_private; 435 return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev); 436} 437