root/drivers/media/pci/ivtv/ivtv-udma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ivtv_udma_get_page_info
  2. ivtv_udma_fill_sg_list
  3. ivtv_udma_fill_sg_array
  4. ivtv_udma_alloc
  5. ivtv_udma_setup
  6. ivtv_udma_unmap
  7. ivtv_udma_free
  8. ivtv_udma_start
  9. ivtv_udma_prepare

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3     User DMA
   4 
   5     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
   6     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
   7     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
   8 
   9  */
  10 
  11 #include "ivtv-driver.h"
  12 #include "ivtv-udma.h"
  13 
  14 void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
  15 {
  16         dma_page->uaddr = first & PAGE_MASK;
  17         dma_page->offset = first & ~PAGE_MASK;
  18         dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
  19         dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
  20         dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
  21         dma_page->page_count = dma_page->last - dma_page->first + 1;
  22         if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
  23 }
  24 
  25 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
  26 {
  27         int i, offset;
  28         unsigned long flags;
  29 
  30         if (map_offset < 0)
  31                 return map_offset;
  32 
  33         offset = dma_page->offset;
  34 
  35         /* Fill SG Array with new values */
  36         for (i = 0; i < dma_page->page_count; i++) {
  37                 unsigned int len = (i == dma_page->page_count - 1) ?
  38                         dma_page->tail : PAGE_SIZE - offset;
  39 
  40                 if (PageHighMem(dma->map[map_offset])) {
  41                         void *src;
  42 
  43                         if (dma->bouncemap[map_offset] == NULL)
  44                                 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
  45                         if (dma->bouncemap[map_offset] == NULL)
  46                                 return -1;
  47                         local_irq_save(flags);
  48                         src = kmap_atomic(dma->map[map_offset]) + offset;
  49                         memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
  50                         kunmap_atomic(src);
  51                         local_irq_restore(flags);
  52                         sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
  53                 }
  54                 else {
  55                         sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
  56                 }
  57                 offset = 0;
  58                 map_offset++;
  59         }
  60         return map_offset;
  61 }
  62 
  63 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
  64         int i;
  65         struct scatterlist *sg;
  66 
  67         for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
  68                 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
  69                 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
  70                 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
  71                 buffer_offset += sg_dma_len(sg);
  72 
  73                 split -= sg_dma_len(sg);
  74                 if (split == 0)
  75                         buffer_offset = buffer_offset_2;
  76         }
  77 }
  78 
  79 /* User DMA Buffers */
  80 void ivtv_udma_alloc(struct ivtv *itv)
  81 {
  82         if (itv->udma.SG_handle == 0) {
  83                 /* Map DMA Page Array Buffer */
  84                 itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
  85                            sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
  86                 ivtv_udma_sync_for_cpu(itv);
  87         }
  88 }
  89 
  90 int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
  91                        void __user *userbuf, int size_in_bytes)
  92 {
  93         struct ivtv_dma_page_info user_dma;
  94         struct ivtv_user_dma *dma = &itv->udma;
  95         int i, err;
  96 
  97         IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
  98 
  99         /* Still in USE */
 100         if (dma->SG_length || dma->page_count) {
 101                 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
 102                            dma->SG_length, dma->page_count);
 103                 return -EBUSY;
 104         }
 105 
 106         ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
 107 
 108         if (user_dma.page_count <= 0) {
 109                 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
 110                            user_dma.page_count, size_in_bytes, user_dma.offset);
 111                 return -EINVAL;
 112         }
 113 
 114         /* Get user pages for DMA Xfer */
 115         err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
 116                         dma->map, FOLL_FORCE);
 117 
 118         if (user_dma.page_count != err) {
 119                 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
 120                            err, user_dma.page_count);
 121                 if (err >= 0) {
 122                         for (i = 0; i < err; i++)
 123                                 put_page(dma->map[i]);
 124                         return -EINVAL;
 125                 }
 126                 return err;
 127         }
 128 
 129         dma->page_count = user_dma.page_count;
 130 
 131         /* Fill SG List with new values */
 132         if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
 133                 for (i = 0; i < dma->page_count; i++) {
 134                         put_page(dma->map[i]);
 135                 }
 136                 dma->page_count = 0;
 137                 return -ENOMEM;
 138         }
 139 
 140         /* Map SG List */
 141         dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
 142 
 143         /* Fill SG Array with new values */
 144         ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
 145 
 146         /* Tag SG Array with Interrupt Bit */
 147         dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
 148 
 149         ivtv_udma_sync_for_device(itv);
 150         return dma->page_count;
 151 }
 152 
 153 void ivtv_udma_unmap(struct ivtv *itv)
 154 {
 155         struct ivtv_user_dma *dma = &itv->udma;
 156         int i;
 157 
 158         IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
 159 
 160         /* Nothing to free */
 161         if (dma->page_count == 0)
 162                 return;
 163 
 164         /* Unmap Scatterlist */
 165         if (dma->SG_length) {
 166                 pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
 167                 dma->SG_length = 0;
 168         }
 169         /* sync DMA */
 170         ivtv_udma_sync_for_cpu(itv);
 171 
 172         /* Release User Pages */
 173         for (i = 0; i < dma->page_count; i++) {
 174                 put_page(dma->map[i]);
 175         }
 176         dma->page_count = 0;
 177 }
 178 
 179 void ivtv_udma_free(struct ivtv *itv)
 180 {
 181         int i;
 182 
 183         /* Unmap SG Array */
 184         if (itv->udma.SG_handle) {
 185                 pci_unmap_single(itv->pdev, itv->udma.SG_handle,
 186                          sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
 187         }
 188 
 189         /* Unmap Scatterlist */
 190         if (itv->udma.SG_length) {
 191                 pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
 192         }
 193 
 194         for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
 195                 if (itv->udma.bouncemap[i])
 196                         __free_page(itv->udma.bouncemap[i]);
 197         }
 198 }
 199 
 200 void ivtv_udma_start(struct ivtv *itv)
 201 {
 202         IVTV_DEBUG_DMA("start UDMA\n");
 203         write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
 204         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
 205         set_bit(IVTV_F_I_DMA, &itv->i_flags);
 206         set_bit(IVTV_F_I_UDMA, &itv->i_flags);
 207         clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
 208 }
 209 
 210 void ivtv_udma_prepare(struct ivtv *itv)
 211 {
 212         unsigned long flags;
 213 
 214         spin_lock_irqsave(&itv->dma_reg_lock, flags);
 215         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
 216                 ivtv_udma_start(itv);
 217         else
 218                 set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
 219         spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
 220 }

/* [<][>][^][v][top][bottom][index][help] */