1/* 2 * Copyright (C) 2012 Red Hat 3 * 4 * based in parts on udlfb.c: 5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> 6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> 7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License v2. See the file COPYING in the main directory of this archive for 11 * more details. 12 */ 13#include <linux/module.h> 14#include <linux/slab.h> 15#include <linux/fb.h> 16#include <linux/dma-buf.h> 17 18#include <drm/drmP.h> 19#include <drm/drm_crtc.h> 20#include <drm/drm_crtc_helper.h> 21#include "udl_drv.h" 22 23#include <drm/drm_fb_helper.h> 24 25#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */ 26 27static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */ 28static int fb_bpp = 16; 29 30module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 31module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 32 33struct udl_fbdev { 34 struct drm_fb_helper helper; 35 struct udl_framebuffer ufb; 36 struct list_head fbdev_list; 37 int fb_count; 38}; 39 40#define DL_ALIGN_UP(x, a) ALIGN(x, a) 41#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a) 42 43/** Read the red component (0..255) of a 32 bpp colour. */ 44#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF) 45 46/** Read the green component (0..255) of a 32 bpp colour. */ 47#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF) 48 49/** Read the blue component (0..255) of a 32 bpp colour. */ 50#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF) 51 52/** Return red/green component of a 16 bpp colour number. */ 53#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF) 54 55/** Return green/blue component of a 16 bpp colour number. */ 56#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF) 57 58/** Return 8 bpp colour number from red, green and blue components. */ 59#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF) 60 61#if 0 62static uint8_t rgb8(uint32_t col) 63{ 64 uint8_t red = DLO_RGB_GETRED(col); 65 uint8_t grn = DLO_RGB_GETGRN(col); 66 uint8_t blu = DLO_RGB_GETBLU(col); 67 68 return DLO_RGB8(red, grn, blu); 69} 70 71static uint16_t rgb16(uint32_t col) 72{ 73 uint8_t red = DLO_RGB_GETRED(col); 74 uint8_t grn = DLO_RGB_GETGRN(col); 75 uint8_t blu = DLO_RGB_GETBLU(col); 76 77 return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu); 78} 79#endif 80 81/* 82 * NOTE: fb_defio.c is holding info->fbdefio.mutex 83 * Touching ANY framebuffer memory that triggers a page fault 84 * in fb_defio will cause a deadlock, when it also tries to 85 * grab the same mutex. 86 */ 87static void udlfb_dpy_deferred_io(struct fb_info *info, 88 struct list_head *pagelist) 89{ 90 struct page *cur; 91 struct fb_deferred_io *fbdefio = info->fbdefio; 92 struct udl_fbdev *ufbdev = info->par; 93 struct drm_device *dev = ufbdev->ufb.base.dev; 94 struct udl_device *udl = dev->dev_private; 95 struct urb *urb; 96 char *cmd; 97 cycles_t start_cycles, end_cycles; 98 int bytes_sent = 0; 99 int bytes_identical = 0; 100 int bytes_rendered = 0; 101 102 if (!fb_defio) 103 return; 104 105 start_cycles = get_cycles(); 106 107 urb = udl_get_urb(dev); 108 if (!urb) 109 return; 110 111 cmd = urb->transfer_buffer; 112 113 /* walk the written page list and render each to device */ 114 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 115 116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), 117 &urb, (char *) info->fix.smem_start, 118 &cmd, cur->index << PAGE_SHIFT, 119 cur->index << PAGE_SHIFT, 120 PAGE_SIZE, &bytes_identical, &bytes_sent)) 121 goto error; 122 bytes_rendered += PAGE_SIZE; 123 } 124 125 if (cmd > (char *) urb->transfer_buffer) { 126 /* Send partial buffer remaining before exiting */ 127 int len = cmd - (char *) urb->transfer_buffer; 128 udl_submit_urb(dev, urb, len); 129 bytes_sent += len; 130 } else 131 udl_urb_completion(urb); 132 133error: 134 atomic_add(bytes_sent, &udl->bytes_sent); 135 atomic_add(bytes_identical, &udl->bytes_identical); 136 atomic_add(bytes_rendered, &udl->bytes_rendered); 137 end_cycles = get_cycles(); 138 atomic_add(((unsigned int) ((end_cycles - start_cycles) 139 >> 10)), /* Kcycles */ 140 &udl->cpu_kcycles_used); 141} 142 143int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, 144 int width, int height) 145{ 146 struct drm_device *dev = fb->base.dev; 147 struct udl_device *udl = dev->dev_private; 148 int i, ret; 149 char *cmd; 150 cycles_t start_cycles, end_cycles; 151 int bytes_sent = 0; 152 int bytes_identical = 0; 153 struct urb *urb; 154 int aligned_x; 155 int bpp = (fb->base.bits_per_pixel / 8); 156 int x2, y2; 157 bool store_for_later = false; 158 unsigned long flags; 159 160 if (!fb->active_16) 161 return 0; 162 163 if (!fb->obj->vmapping) { 164 ret = udl_gem_vmap(fb->obj); 165 if (ret == -ENOMEM) { 166 DRM_ERROR("failed to vmap fb\n"); 167 return 0; 168 } 169 if (!fb->obj->vmapping) { 170 DRM_ERROR("failed to vmapping\n"); 171 return 0; 172 } 173 } 174 175 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); 176 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); 177 x = aligned_x; 178 179 if ((width <= 0) || 180 (x + width > fb->base.width) || 181 (y + height > fb->base.height)) 182 return -EINVAL; 183 184 /* if we are in atomic just store the info 185 can't test inside spin lock */ 186 if (in_atomic()) 187 store_for_later = true; 188 189 x2 = x + width - 1; 190 y2 = y + height - 1; 191 192 spin_lock_irqsave(&fb->dirty_lock, flags); 193 194 if (fb->y1 < y) 195 y = fb->y1; 196 if (fb->y2 > y2) 197 y2 = fb->y2; 198 if (fb->x1 < x) 199 x = fb->x1; 200 if (fb->x2 > x2) 201 x2 = fb->x2; 202 203 if (store_for_later) { 204 fb->x1 = x; 205 fb->x2 = x2; 206 fb->y1 = y; 207 fb->y2 = y2; 208 spin_unlock_irqrestore(&fb->dirty_lock, flags); 209 return 0; 210 } 211 212 fb->x1 = fb->y1 = INT_MAX; 213 fb->x2 = fb->y2 = 0; 214 215 spin_unlock_irqrestore(&fb->dirty_lock, flags); 216 start_cycles = get_cycles(); 217 218 urb = udl_get_urb(dev); 219 if (!urb) 220 return 0; 221 cmd = urb->transfer_buffer; 222 223 for (i = y; i <= y2 ; i++) { 224 const int line_offset = fb->base.pitches[0] * i; 225 const int byte_offset = line_offset + (x * bpp); 226 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); 227 if (udl_render_hline(dev, bpp, &urb, 228 (char *) fb->obj->vmapping, 229 &cmd, byte_offset, dev_byte_offset, 230 (x2 - x + 1) * bpp, 231 &bytes_identical, &bytes_sent)) 232 goto error; 233 } 234 235 if (cmd > (char *) urb->transfer_buffer) { 236 /* Send partial buffer remaining before exiting */ 237 int len = cmd - (char *) urb->transfer_buffer; 238 ret = udl_submit_urb(dev, urb, len); 239 bytes_sent += len; 240 } else 241 udl_urb_completion(urb); 242 243error: 244 atomic_add(bytes_sent, &udl->bytes_sent); 245 atomic_add(bytes_identical, &udl->bytes_identical); 246 atomic_add(width*height*bpp, &udl->bytes_rendered); 247 end_cycles = get_cycles(); 248 atomic_add(((unsigned int) ((end_cycles - start_cycles) 249 >> 10)), /* Kcycles */ 250 &udl->cpu_kcycles_used); 251 252 return 0; 253} 254 255static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 256{ 257 unsigned long start = vma->vm_start; 258 unsigned long size = vma->vm_end - vma->vm_start; 259 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 260 unsigned long page, pos; 261 262 if (offset + size > info->fix.smem_len) 263 return -EINVAL; 264 265 pos = (unsigned long)info->fix.smem_start + offset; 266 267 pr_notice("mmap() framebuffer addr:%lu size:%lu\n", 268 pos, size); 269 270 while (size > 0) { 271 page = vmalloc_to_pfn((void *)pos); 272 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) 273 return -EAGAIN; 274 275 start += PAGE_SIZE; 276 pos += PAGE_SIZE; 277 if (size > PAGE_SIZE) 278 size -= PAGE_SIZE; 279 else 280 size = 0; 281 } 282 283 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ 284 return 0; 285} 286 287static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 288{ 289 struct udl_fbdev *ufbdev = info->par; 290 291 sys_fillrect(info, rect); 292 293 udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width, 294 rect->height); 295} 296 297static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) 298{ 299 struct udl_fbdev *ufbdev = info->par; 300 301 sys_copyarea(info, region); 302 303 udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width, 304 region->height); 305} 306 307static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image) 308{ 309 struct udl_fbdev *ufbdev = info->par; 310 311 sys_imageblit(info, image); 312 313 udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width, 314 image->height); 315} 316 317/* 318 * It's common for several clients to have framebuffer open simultaneously. 319 * e.g. both fbcon and X. Makes things interesting. 320 * Assumes caller is holding info->lock (for open and release at least) 321 */ 322static int udl_fb_open(struct fb_info *info, int user) 323{ 324 struct udl_fbdev *ufbdev = info->par; 325 struct drm_device *dev = ufbdev->ufb.base.dev; 326 struct udl_device *udl = dev->dev_private; 327 328 /* If the USB device is gone, we don't accept new opens */ 329 if (drm_device_is_unplugged(udl->ddev)) 330 return -ENODEV; 331 332 ufbdev->fb_count++; 333 334 if (fb_defio && (info->fbdefio == NULL)) { 335 /* enable defio at last moment if not disabled by client */ 336 337 struct fb_deferred_io *fbdefio; 338 339 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 340 341 if (fbdefio) { 342 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 343 fbdefio->deferred_io = udlfb_dpy_deferred_io; 344 } 345 346 info->fbdefio = fbdefio; 347 fb_deferred_io_init(info); 348 } 349 350 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", 351 info->node, user, info, ufbdev->fb_count); 352 353 return 0; 354} 355 356 357/* 358 * Assumes caller is holding info->lock mutex (for open and release at least) 359 */ 360static int udl_fb_release(struct fb_info *info, int user) 361{ 362 struct udl_fbdev *ufbdev = info->par; 363 364 ufbdev->fb_count--; 365 366 if ((ufbdev->fb_count == 0) && (info->fbdefio)) { 367 fb_deferred_io_cleanup(info); 368 kfree(info->fbdefio); 369 info->fbdefio = NULL; 370 info->fbops->fb_mmap = udl_fb_mmap; 371 } 372 373 pr_warn("released /dev/fb%d user=%d count=%d\n", 374 info->node, user, ufbdev->fb_count); 375 376 return 0; 377} 378 379static struct fb_ops udlfb_ops = { 380 .owner = THIS_MODULE, 381 .fb_check_var = drm_fb_helper_check_var, 382 .fb_set_par = drm_fb_helper_set_par, 383 .fb_fillrect = udl_fb_fillrect, 384 .fb_copyarea = udl_fb_copyarea, 385 .fb_imageblit = udl_fb_imageblit, 386 .fb_pan_display = drm_fb_helper_pan_display, 387 .fb_blank = drm_fb_helper_blank, 388 .fb_setcmap = drm_fb_helper_setcmap, 389 .fb_debug_enter = drm_fb_helper_debug_enter, 390 .fb_debug_leave = drm_fb_helper_debug_leave, 391 .fb_mmap = udl_fb_mmap, 392 .fb_open = udl_fb_open, 393 .fb_release = udl_fb_release, 394}; 395 396static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, 397 struct drm_file *file, 398 unsigned flags, unsigned color, 399 struct drm_clip_rect *clips, 400 unsigned num_clips) 401{ 402 struct udl_framebuffer *ufb = to_udl_fb(fb); 403 int i; 404 int ret = 0; 405 406 drm_modeset_lock_all(fb->dev); 407 408 if (!ufb->active_16) 409 goto unlock; 410 411 if (ufb->obj->base.import_attach) { 412 ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf, 413 0, ufb->obj->base.size, 414 DMA_FROM_DEVICE); 415 if (ret) 416 goto unlock; 417 } 418 419 for (i = 0; i < num_clips; i++) { 420 ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1, 421 clips[i].x2 - clips[i].x1, 422 clips[i].y2 - clips[i].y1); 423 if (ret) 424 break; 425 } 426 427 if (ufb->obj->base.import_attach) { 428 dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, 429 0, ufb->obj->base.size, 430 DMA_FROM_DEVICE); 431 } 432 433 unlock: 434 drm_modeset_unlock_all(fb->dev); 435 436 return ret; 437} 438 439static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) 440{ 441 struct udl_framebuffer *ufb = to_udl_fb(fb); 442 443 if (ufb->obj) 444 drm_gem_object_unreference_unlocked(&ufb->obj->base); 445 446 drm_framebuffer_cleanup(fb); 447 kfree(ufb); 448} 449 450static const struct drm_framebuffer_funcs udlfb_funcs = { 451 .destroy = udl_user_framebuffer_destroy, 452 .dirty = udl_user_framebuffer_dirty, 453}; 454 455 456static int 457udl_framebuffer_init(struct drm_device *dev, 458 struct udl_framebuffer *ufb, 459 struct drm_mode_fb_cmd2 *mode_cmd, 460 struct udl_gem_object *obj) 461{ 462 int ret; 463 464 spin_lock_init(&ufb->dirty_lock); 465 ufb->obj = obj; 466 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); 467 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); 468 return ret; 469} 470 471 472static int udlfb_create(struct drm_fb_helper *helper, 473 struct drm_fb_helper_surface_size *sizes) 474{ 475 struct udl_fbdev *ufbdev = 476 container_of(helper, struct udl_fbdev, helper); 477 struct drm_device *dev = ufbdev->helper.dev; 478 struct fb_info *info; 479 struct device *device = dev->dev; 480 struct drm_framebuffer *fb; 481 struct drm_mode_fb_cmd2 mode_cmd; 482 struct udl_gem_object *obj; 483 uint32_t size; 484 int ret = 0; 485 486 if (sizes->surface_bpp == 24) 487 sizes->surface_bpp = 32; 488 489 mode_cmd.width = sizes->surface_width; 490 mode_cmd.height = sizes->surface_height; 491 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); 492 493 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 494 sizes->surface_depth); 495 496 size = mode_cmd.pitches[0] * mode_cmd.height; 497 size = ALIGN(size, PAGE_SIZE); 498 499 obj = udl_gem_alloc_object(dev, size); 500 if (!obj) 501 goto out; 502 503 ret = udl_gem_vmap(obj); 504 if (ret) { 505 DRM_ERROR("failed to vmap fb\n"); 506 goto out_gfree; 507 } 508 509 info = framebuffer_alloc(0, device); 510 if (!info) { 511 ret = -ENOMEM; 512 goto out_gfree; 513 } 514 info->par = ufbdev; 515 516 ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj); 517 if (ret) 518 goto out_gfree; 519 520 fb = &ufbdev->ufb.base; 521 522 ufbdev->helper.fb = fb; 523 ufbdev->helper.fbdev = info; 524 525 strcpy(info->fix.id, "udldrmfb"); 526 527 info->screen_base = ufbdev->ufb.obj->vmapping; 528 info->fix.smem_len = size; 529 info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping; 530 531 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 532 info->fbops = &udlfb_ops; 533 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 534 drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); 535 536 ret = fb_alloc_cmap(&info->cmap, 256, 0); 537 if (ret) { 538 ret = -ENOMEM; 539 goto out_gfree; 540 } 541 542 543 DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", 544 fb->width, fb->height, 545 ufbdev->ufb.obj->vmapping); 546 547 return ret; 548out_gfree: 549 drm_gem_object_unreference(&ufbdev->ufb.obj->base); 550out: 551 return ret; 552} 553 554static const struct drm_fb_helper_funcs udl_fb_helper_funcs = { 555 .fb_probe = udlfb_create, 556}; 557 558static void udl_fbdev_destroy(struct drm_device *dev, 559 struct udl_fbdev *ufbdev) 560{ 561 struct fb_info *info; 562 if (ufbdev->helper.fbdev) { 563 info = ufbdev->helper.fbdev; 564 unregister_framebuffer(info); 565 if (info->cmap.len) 566 fb_dealloc_cmap(&info->cmap); 567 framebuffer_release(info); 568 } 569 drm_fb_helper_fini(&ufbdev->helper); 570 drm_framebuffer_unregister_private(&ufbdev->ufb.base); 571 drm_framebuffer_cleanup(&ufbdev->ufb.base); 572 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); 573} 574 575int udl_fbdev_init(struct drm_device *dev) 576{ 577 struct udl_device *udl = dev->dev_private; 578 int bpp_sel = fb_bpp; 579 struct udl_fbdev *ufbdev; 580 int ret; 581 582 ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL); 583 if (!ufbdev) 584 return -ENOMEM; 585 586 udl->fbdev = ufbdev; 587 588 drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs); 589 590 ret = drm_fb_helper_init(dev, &ufbdev->helper, 591 1, 1); 592 if (ret) 593 goto free; 594 595 ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper); 596 if (ret) 597 goto fini; 598 599 /* disable all the possible outputs/crtcs before entering KMS mode */ 600 drm_helper_disable_unused_functions(dev); 601 602 ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel); 603 if (ret) 604 goto fini; 605 606 return 0; 607 608fini: 609 drm_fb_helper_fini(&ufbdev->helper); 610free: 611 kfree(ufbdev); 612 return ret; 613} 614 615void udl_fbdev_cleanup(struct drm_device *dev) 616{ 617 struct udl_device *udl = dev->dev_private; 618 if (!udl->fbdev) 619 return; 620 621 udl_fbdev_destroy(dev, udl->fbdev); 622 kfree(udl->fbdev); 623 udl->fbdev = NULL; 624} 625 626void udl_fbdev_unplug(struct drm_device *dev) 627{ 628 struct udl_device *udl = dev->dev_private; 629 struct udl_fbdev *ufbdev; 630 if (!udl->fbdev) 631 return; 632 633 ufbdev = udl->fbdev; 634 if (ufbdev->helper.fbdev) { 635 struct fb_info *info; 636 info = ufbdev->helper.fbdev; 637 unlink_framebuffer(info); 638 } 639} 640 641struct drm_framebuffer * 642udl_fb_user_fb_create(struct drm_device *dev, 643 struct drm_file *file, 644 struct drm_mode_fb_cmd2 *mode_cmd) 645{ 646 struct drm_gem_object *obj; 647 struct udl_framebuffer *ufb; 648 int ret; 649 uint32_t size; 650 651 obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]); 652 if (obj == NULL) 653 return ERR_PTR(-ENOENT); 654 655 size = mode_cmd->pitches[0] * mode_cmd->height; 656 size = ALIGN(size, PAGE_SIZE); 657 658 if (size > obj->size) { 659 DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height); 660 return ERR_PTR(-ENOMEM); 661 } 662 663 ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); 664 if (ufb == NULL) 665 return ERR_PTR(-ENOMEM); 666 667 ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj)); 668 if (ret) { 669 kfree(ufb); 670 return ERR_PTR(-EINVAL); 671 } 672 return &ufb->base; 673} 674