root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gf100_vmm_pgt_pte
  2. gf100_vmm_pgt_sgl
  3. gf100_vmm_pgt_dma
  4. gf100_vmm_pgt_mem
  5. gf100_vmm_pgt_unmap
  6. gf100_vmm_pgd_pde
  7. gf100_vmm_invalidate_pdb
  8. gf100_vmm_invalidate
  9. gf100_vmm_flush
  10. gf100_vmm_valid
  11. gf100_vmm_aper
  12. gf100_vmm_part
  13. gf100_vmm_join_
  14. gf100_vmm_join
  15. gf100_vmm_new_
  16. gf100_vmm_new

   1 /*
   2  * Copyright 2017 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 #include "vmm.h"
  23 
  24 #include <subdev/fb.h>
  25 #include <subdev/ltc.h>
  26 #include <subdev/timer.h>
  27 
  28 #include <nvif/if900d.h>
  29 #include <nvif/unpack.h>
  30 
  31 static inline void
  32 gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  33                   u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  34 {
  35         u64 base = (addr >> 8) | map->type;
  36         u64 data = base;
  37 
  38         if (map->ctag && !(map->next & (1ULL << 44))) {
  39                 while (ptes--) {
  40                         data = base | ((map->ctag >> 1) << 44);
  41                         if (!(map->ctag++ & 1))
  42                                 data |= BIT_ULL(60);
  43 
  44                         VMM_WO064(pt, vmm, ptei++ * 8, data);
  45                         base += map->next;
  46                 }
  47         } else {
  48                 map->type += ptes * map->ctag;
  49 
  50                 while (ptes--) {
  51                         VMM_WO064(pt, vmm, ptei++ * 8, data);
  52                         data += map->next;
  53                 }
  54         }
  55 }
  56 
  57 void
  58 gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  59                   u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  60 {
  61         VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
  62 }
  63 
  64 void
  65 gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  66                   u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  67 {
  68         if (map->page->shift == PAGE_SHIFT) {
  69                 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
  70                 nvkm_kmap(pt->memory);
  71                 while (ptes--) {
  72                         const u64 data = (*map->dma++ >> 8) | map->type;
  73                         VMM_WO064(pt, vmm, ptei++ * 8, data);
  74                         map->type += map->ctag;
  75                 }
  76                 nvkm_done(pt->memory);
  77                 return;
  78         }
  79 
  80         VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
  81 }
  82 
  83 void
  84 gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  85                   u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  86 {
  87         VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
  88 }
  89 
  90 void
  91 gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
  92                     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  93 {
  94         VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
  95 }
  96 
  97 const struct nvkm_vmm_desc_func
  98 gf100_vmm_pgt = {
  99         .unmap = gf100_vmm_pgt_unmap,
 100         .mem = gf100_vmm_pgt_mem,
 101         .dma = gf100_vmm_pgt_dma,
 102         .sgl = gf100_vmm_pgt_sgl,
 103 };
 104 
 105 void
 106 gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
 107 {
 108         struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
 109         struct nvkm_mmu_pt *pd = pgd->pt[0];
 110         struct nvkm_mmu_pt *pt;
 111         u64 data = 0;
 112 
 113         if ((pt = pgt->pt[0])) {
 114                 switch (nvkm_memory_target(pt->memory)) {
 115                 case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
 116                 case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
 117                         data |= BIT_ULL(35); /* VOL */
 118                         break;
 119                 case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
 120                 default:
 121                         WARN_ON(1);
 122                         return;
 123                 }
 124                 data |= pt->addr >> 8;
 125         }
 126 
 127         if ((pt = pgt->pt[1])) {
 128                 switch (nvkm_memory_target(pt->memory)) {
 129                 case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
 130                 case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
 131                         data |= BIT_ULL(34); /* VOL */
 132                         break;
 133                 case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
 134                 default:
 135                         WARN_ON(1);
 136                         return;
 137                 }
 138                 data |= pt->addr << 24;
 139         }
 140 
 141         nvkm_kmap(pd->memory);
 142         VMM_WO064(pd, vmm, pdei * 8, data);
 143         nvkm_done(pd->memory);
 144 }
 145 
 146 const struct nvkm_vmm_desc_func
 147 gf100_vmm_pgd = {
 148         .unmap = gf100_vmm_pgt_unmap,
 149         .pde = gf100_vmm_pgd_pde,
 150 };
 151 
 152 static const struct nvkm_vmm_desc
 153 gf100_vmm_desc_17_12[] = {
 154         { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
 155         { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
 156         {}
 157 };
 158 
 159 static const struct nvkm_vmm_desc
 160 gf100_vmm_desc_17_17[] = {
 161         { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
 162         { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
 163         {}
 164 };
 165 
 166 static const struct nvkm_vmm_desc
 167 gf100_vmm_desc_16_12[] = {
 168         { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
 169         { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
 170         {}
 171 };
 172 
 173 static const struct nvkm_vmm_desc
 174 gf100_vmm_desc_16_16[] = {
 175         { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
 176         { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
 177         {}
 178 };
 179 
 180 void
 181 gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
 182 {
 183         struct nvkm_device *device = vmm->mmu->subdev.device;
 184         nvkm_wr32(device, 0x100cb8, addr);
 185 }
 186 
 187 void
 188 gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
 189 {
 190         struct nvkm_subdev *subdev = &vmm->mmu->subdev;
 191         struct nvkm_device *device = subdev->device;
 192         struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
 193         u64 addr = 0;
 194 
 195         mutex_lock(&subdev->mutex);
 196         /* Looks like maybe a "free flush slots" counter, the
 197          * faster you write to 0x100cbc to more it decreases.
 198          */
 199         nvkm_msec(device, 2000,
 200                 if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
 201                         break;
 202         );
 203 
 204         if (!(type & 0x00000002) /* ALL_PDB. */) {
 205                 switch (nvkm_memory_target(pd->memory)) {
 206                 case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break;
 207                 case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break;
 208                 case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break;
 209                 default:
 210                         WARN_ON(1);
 211                         break;
 212                 }
 213                 addr |= (vmm->pd->pt[0]->addr >> 12) << 4;
 214 
 215                 vmm->func->invalidate_pdb(vmm, addr);
 216         }
 217 
 218         nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
 219 
 220         /* Wait for flush to be queued? */
 221         nvkm_msec(device, 2000,
 222                 if (nvkm_rd32(device, 0x100c80) & 0x00008000)
 223                         break;
 224         );
 225         mutex_unlock(&subdev->mutex);
 226 }
 227 
 228 void
 229 gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
 230 {
 231         u32 type = 0x00000001; /* PAGE_ALL */
 232         if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
 233                 type |= 0x00000004; /* HUB_ONLY */
 234         gf100_vmm_invalidate(vmm, type);
 235 }
 236 
 237 int
 238 gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 239                 struct nvkm_vmm_map *map)
 240 {
 241         const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
 242         const struct nvkm_vmm_page *page = map->page;
 243         const bool gm20x = page->desc->func->sparse != NULL;
 244         union {
 245                 struct gf100_vmm_map_vn vn;
 246                 struct gf100_vmm_map_v0 v0;
 247         } *args = argv;
 248         struct nvkm_device *device = vmm->mmu->subdev.device;
 249         struct nvkm_memory *memory = map->memory;
 250         u8  kind, priv, ro, vol;
 251         int kindn, aper, ret = -ENOSYS;
 252         const u8 *kindm;
 253 
 254         map->next = (1 << page->shift) >> 8;
 255         map->type = map->ctag = 0;
 256 
 257         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 258                 vol  = !!args->v0.vol;
 259                 ro   = !!args->v0.ro;
 260                 priv = !!args->v0.priv;
 261                 kind =   args->v0.kind;
 262         } else
 263         if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 264                 vol  = target == NVKM_MEM_TARGET_HOST;
 265                 ro   = 0;
 266                 priv = 0;
 267                 kind = 0x00;
 268         } else {
 269                 VMM_DEBUG(vmm, "args");
 270                 return ret;
 271         }
 272 
 273         aper = vmm->func->aper(target);
 274         if (WARN_ON(aper < 0))
 275                 return aper;
 276 
 277         kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
 278         if (kind >= kindn || kindm[kind] == 0xff) {
 279                 VMM_DEBUG(vmm, "kind %02x", kind);
 280                 return -EINVAL;
 281         }
 282 
 283         if (kindm[kind] != kind) {
 284                 u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
 285                 u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
 286                 if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
 287                         VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
 288                         return -EINVAL;
 289                 }
 290 
 291                 ret = nvkm_memory_tags_get(memory, device, tags,
 292                                            nvkm_ltc_tags_clear,
 293                                            &map->tags);
 294                 if (ret) {
 295                         VMM_DEBUG(vmm, "comp %d", ret);
 296                         return ret;
 297                 }
 298 
 299                 if (map->tags->mn) {
 300                         u64 tags = map->tags->mn->offset + (map->offset >> 17);
 301                         if (page->shift == 17 || !gm20x) {
 302                                 map->type |= tags << 44;
 303                                 map->ctag |= 1ULL << 44;
 304                                 map->next |= 1ULL << 44;
 305                         } else {
 306                                 map->ctag |= tags << 1 | 1;
 307                         }
 308                 } else {
 309                         kind = kindm[kind];
 310                 }
 311         }
 312 
 313         map->type |= BIT(0);
 314         map->type |= (u64)priv << 1;
 315         map->type |= (u64)  ro << 2;
 316         map->type |= (u64) vol << 32;
 317         map->type |= (u64)aper << 33;
 318         map->type |= (u64)kind << 36;
 319         return 0;
 320 }
 321 
 322 int
 323 gf100_vmm_aper(enum nvkm_memory_target target)
 324 {
 325         switch (target) {
 326         case NVKM_MEM_TARGET_VRAM: return 0;
 327         case NVKM_MEM_TARGET_HOST: return 2;
 328         case NVKM_MEM_TARGET_NCOH: return 3;
 329         default:
 330                 return -EINVAL;
 331         }
 332 }
 333 
 334 void
 335 gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 336 {
 337         nvkm_fo64(inst, 0x0200, 0x00000000, 2);
 338 }
 339 
 340 int
 341 gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
 342 {
 343         struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
 344 
 345         switch (nvkm_memory_target(pd->memory)) {
 346         case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
 347         case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
 348                 base |= BIT_ULL(2) /* VOL. */;
 349                 break;
 350         case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
 351         default:
 352                 WARN_ON(1);
 353                 return -EINVAL;
 354         }
 355         base |= pd->addr;
 356 
 357         nvkm_kmap(inst);
 358         nvkm_wo64(inst, 0x0200, base);
 359         nvkm_wo64(inst, 0x0208, vmm->limit - 1);
 360         nvkm_done(inst);
 361         return 0;
 362 }
 363 
 364 int
 365 gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 366 {
 367         return gf100_vmm_join_(vmm, inst, 0);
 368 }
 369 
 370 static const struct nvkm_vmm_func
 371 gf100_vmm_17 = {
 372         .join = gf100_vmm_join,
 373         .part = gf100_vmm_part,
 374         .aper = gf100_vmm_aper,
 375         .valid = gf100_vmm_valid,
 376         .flush = gf100_vmm_flush,
 377         .invalidate_pdb = gf100_vmm_invalidate_pdb,
 378         .page = {
 379                 { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
 380                 { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
 381                 {}
 382         }
 383 };
 384 
 385 static const struct nvkm_vmm_func
 386 gf100_vmm_16 = {
 387         .join = gf100_vmm_join,
 388         .part = gf100_vmm_part,
 389         .aper = gf100_vmm_aper,
 390         .valid = gf100_vmm_valid,
 391         .flush = gf100_vmm_flush,
 392         .invalidate_pdb = gf100_vmm_invalidate_pdb,
 393         .page = {
 394                 { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
 395                 { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
 396                 {}
 397         }
 398 };
 399 
 400 int
 401 gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
 402                const struct nvkm_vmm_func *func_17,
 403                struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 404                void *argv, u32 argc, struct lock_class_key *key,
 405                const char *name, struct nvkm_vmm **pvmm)
 406 {
 407         switch (mmu->subdev.device->fb->page) {
 408         case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
 409                                       argv, argc, key, name, pvmm);
 410         case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
 411                                       argv, argc, key, name, pvmm);
 412         default:
 413                 WARN_ON(1);
 414                 return -EINVAL;
 415         }
 416 }
 417 
 418 int
 419 gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 420               void *argv, u32 argc, struct lock_class_key *key,
 421               const char *name, struct nvkm_vmm **pvmm)
 422 {
 423         return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
 424                               size, argv, argc, key, name, pvmm);
 425 }

/* [<][>][^][v][top][bottom][index][help] */