Searched refs:unmap (Results 1 - 200 of 465) sorted by relevance

123

/linux-4.1.27/crypto/async_tx/
H A Dasync_memcpy.c53 struct dmaengine_unmap_data *unmap = NULL; async_memcpy() local
56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); async_memcpy()
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { async_memcpy()
66 unmap->to_cnt = 1; async_memcpy()
67 unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, async_memcpy()
69 unmap->from_cnt = 1; async_memcpy()
70 unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, async_memcpy()
72 unmap->len = len; async_memcpy()
74 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], async_memcpy()
75 unmap->addr[0], len, async_memcpy()
82 dma_set_unmap(tx, unmap); async_memcpy()
102 dmaengine_unmap_put(unmap); async_memcpy()
H A Dasync_xor.c36 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, do_async_xor() argument
45 int src_cnt = unmap->to_cnt; do_async_xor()
47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; do_async_xor()
48 dma_addr_t *src_list = unmap->addr; do_async_xor()
76 if (src_list > unmap->addr) do_async_xor()
79 xor_src_cnt, unmap->len, do_async_xor()
90 xor_src_cnt, unmap->len, do_async_xor()
95 dma_set_unmap(tx, unmap); do_async_xor()
180 struct dmaengine_unmap_data *unmap = NULL; async_xor() local
185 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); async_xor()
187 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { async_xor()
194 unmap->len = len; async_xor()
198 unmap->to_cnt++; async_xor()
199 unmap->addr[j++] = dma_map_page(device->dev, src_list[i], async_xor()
204 unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, async_xor()
206 unmap->bidi_cnt = 1; async_xor()
208 tx = do_async_xor(chan, unmap, submit); async_xor()
209 dmaengine_unmap_put(unmap); async_xor()
212 dmaengine_unmap_put(unmap); async_xor()
276 struct dmaengine_unmap_data *unmap = NULL; async_xor_val() local
281 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); async_xor_val()
283 if (unmap && src_cnt <= device->max_xor && async_xor_val()
296 unmap->addr[i] = dma_map_page(device->dev, src_list[i], async_xor_val()
298 unmap->to_cnt++; async_xor_val()
300 unmap->len = len; async_xor_val()
302 tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, async_xor_val()
311 unmap->addr, src_cnt, len, result, async_xor_val()
315 dma_set_unmap(tx, unmap); async_xor_val()
337 dmaengine_unmap_put(unmap); async_xor_val()
H A Dasync_pq.c51 struct dmaengine_unmap_data *unmap, do_async_gen_syndrome()
91 dma_dest[0] = unmap->addr[disks - 2]; do_async_gen_syndrome()
92 dma_dest[1] = unmap->addr[disks - 1]; do_async_gen_syndrome()
94 &unmap->addr[src_off], do_async_gen_syndrome()
96 &scfs[src_off], unmap->len, do_async_gen_syndrome()
104 dma_set_unmap(tx, unmap); do_async_gen_syndrome()
186 struct dmaengine_unmap_data *unmap = NULL; async_gen_syndrome() local
191 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); async_gen_syndrome()
194 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && async_gen_syndrome()
210 unmap->len = len; async_gen_syndrome()
214 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, async_gen_syndrome()
217 unmap->to_cnt++; async_gen_syndrome()
225 unmap->bidi_cnt++; async_gen_syndrome()
227 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), async_gen_syndrome()
230 unmap->addr[j++] = 0; async_gen_syndrome()
234 unmap->bidi_cnt++; async_gen_syndrome()
236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), async_gen_syndrome()
239 unmap->addr[j++] = 0; async_gen_syndrome()
243 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); async_gen_syndrome()
244 dmaengine_unmap_put(unmap); async_gen_syndrome()
248 dmaengine_unmap_put(unmap); async_gen_syndrome()
305 struct dmaengine_unmap_data *unmap = NULL; async_syndrome_val() local
310 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); async_syndrome_val()
312 if (unmap && disks <= dma_maxpq(device, 0) && async_syndrome_val()
321 unmap->len = len; async_syndrome_val()
324 unmap->addr[j] = dma_map_page(dev, blocks[i], async_syndrome_val()
328 unmap->to_cnt++; async_syndrome_val()
340 unmap->addr[j++] = pq[0]; async_syndrome_val()
341 unmap->to_cnt++; async_syndrome_val()
350 unmap->addr[j++] = pq[1]; async_syndrome_val()
351 unmap->to_cnt++; async_syndrome_val()
358 unmap->addr, async_syndrome_val()
369 dma_set_unmap(tx, unmap); async_syndrome_val()
49 do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) do_async_gen_syndrome() argument
H A Dasync_raid6_recov.c38 struct dmaengine_unmap_data *unmap = NULL; async_sum_product() local
44 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); async_sum_product()
46 if (unmap) { async_sum_product()
54 unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); async_sum_product()
55 unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); async_sum_product()
56 unmap->to_cnt = 2; async_sum_product()
58 unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); async_sum_product()
59 unmap->bidi_cnt = 1; async_sum_product()
61 pq[1] = unmap->addr[2]; async_sum_product()
63 unmap->len = len; async_sum_product()
64 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, async_sum_product()
67 dma_set_unmap(tx, unmap); async_sum_product()
69 dmaengine_unmap_put(unmap); async_sum_product()
73 /* could not get a descriptor, unmap and fall through to async_sum_product()
76 dmaengine_unmap_put(unmap); async_sum_product()
103 struct dmaengine_unmap_data *unmap = NULL; async_mult() local
108 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); async_mult()
110 if (unmap) { async_mult()
118 unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); async_mult()
119 unmap->to_cnt++; async_mult()
120 unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); async_mult()
121 dma_dest[1] = unmap->addr[1]; async_mult()
122 unmap->bidi_cnt++; async_mult()
123 unmap->len = len; async_mult()
129 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, async_mult()
133 dma_set_unmap(tx, unmap); async_mult()
134 dmaengine_unmap_put(unmap); async_mult()
139 /* could not get a descriptor, unmap and fall through to async_mult()
142 dmaengine_unmap_put(unmap); async_mult()
/linux-4.1.27/drivers/iommu/
H A Diommu-traces.c24 EXPORT_TRACEPOINT_SYMBOL_GPL(unmap); variable
H A Dio-pgtable-arm.c215 /* We require an unmap first */ arm_lpae_init_pte()
220 * We need to unmap and free the old table before arm_lpae_init_pte()
454 * minus the part we want to unmap __arm_lpae_unmap()
585 .unmap = arm_lpae_unmap, arm_lpae_alloc_pgtable()
930 /* Partial unmap */ arm_lpae_run_tests()
932 if (ops->unmap(ops, SZ_1G + size, size) != size) arm_lpae_run_tests()
935 /* Remap of partial unmap */ arm_lpae_run_tests()
942 /* Full unmap */ arm_lpae_run_tests()
948 if (ops->unmap(ops, iova, size) != size) arm_lpae_run_tests()
H A Dio-pgtable.h72 * @unmap: Unmap a physically contiguous memory region.
81 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, member in struct:io_pgtable_ops
H A Dtegra-gart.c100 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK); gart_set_pte()
345 .unmap = gart_iommu_unmap,
H A Diommu.c1090 if (unlikely(domain->ops->unmap == NULL || iommu_unmap()
1111 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); iommu_unmap()
1114 * Keep iterating until we either unmap 'size' bytes (or more) iommu_unmap()
1120 unmapped_page = domain->ops->unmap(domain, iova, pgsize); iommu_unmap()
H A Dshmobile-iommu.c370 .unmap = shmobile_iommu_unmap,
H A Dipmmu-vmsa.c567 return domain->iop->unmap(domain->iop, iova, size); ipmmu_unmap()
750 .unmap = ipmmu_unmap,
/linux-4.1.27/arch/um/kernel/
H A Dexec.c27 ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data); flush_thread()
28 ret = ret || unmap(&current->mm->context.id, STUB_END, flush_thread()
H A Dtlb.c68 ret = unmap(hvc->id, op->u.munmap.addr, do_ops()
431 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); flush_tlb_page()
/linux-4.1.27/include/linux/
H A Dzpool.h27 * changed memory back out on unmap. Write-only does not copy
33 ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
72 * @unmap: unmap a handle.
96 void (*unmap)(void *pool, unsigned long handle); member in struct:zpool_driver
H A Dio-mapping.h82 /* Atomic map/unmap */
139 /* Atomic map/unmap */
154 /* Non-atomic map/unmap */
H A Dzsmalloc.h27 ZS_MM_RO, /* read-only (no copy-out at unmap time) */
H A Ddmaengine.h446 struct dmaengine_unmap_data *unmap; member in struct:dma_async_tx_descriptor
456 struct dmaengine_unmap_data *unmap) dma_set_unmap()
458 kref_get(&unmap->kref); dma_set_unmap()
459 tx->unmap = unmap; dma_set_unmap()
464 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
467 struct dmaengine_unmap_data *unmap) dma_set_unmap()
475 static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) dmaengine_unmap_put() argument
482 if (tx->unmap) { dma_descriptor_unmap()
483 dmaengine_unmap_put(tx->unmap); dma_descriptor_unmap()
484 tx->unmap = NULL; dma_descriptor_unmap()
455 dma_set_unmap(struct dma_async_tx_descriptor *tx, struct dmaengine_unmap_data *unmap) dma_set_unmap() argument
466 dma_set_unmap(struct dma_async_tx_descriptor *tx, struct dmaengine_unmap_data *unmap) dma_set_unmap() argument
H A Ddma-buf.h60 * space, users may not block until the subsequent unmap call.
80 /* For {map,unmap}_dma_buf below, any specific buffer attributes
127 /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
H A Dswiotlb.h15 * The complexity of {map,unmap}_single is linearly dependent on this value.
H A Diommu.h55 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
126 * @unmap: unmap a physically contiguous memory region from an iommu domain
149 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, member in struct:iommu_ops
H A Dirqdomain.h54 * @unmap: Dispose of such a mapping
66 void (*unmap)(struct irq_domain *d, unsigned int virq); member in struct:irq_domain_ops
171 * @ops: map/unmap domain callbacks
H A Dpipe_fs_i.h70 * ->unmap()
H A Drmap.h85 TTU_UNMAP = 1, /* unmap mode */
H A Dipack.h142 * @unmap_space: unmap IP address space
H A Drelay.h112 * buf_unmapped - relay buffer unmap notification
H A Defi.h355 u32 unmap; member in struct:__anon11636
375 u64 unmap; member in struct:__anon11637
395 void *unmap; member in struct:__anon11638
/linux-4.1.27/include/uapi/xen/
H A Dgntalloc.h47 /* Number of references to unmap */
52 * Sets up an unmap notification within the page, so that the other side can do
71 /* Action(s) to take on unmap */
H A Dgntdev.h120 * Sets up an unmap notification within the page, so that the other side can do
139 /* Action(s) to take on unmap */
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvif/
H A Ddriver.h13 void (*unmap)(void *priv, void __iomem *ptr, u32 size); member in struct:nvif_driver
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/subdev/
H A Dbar.h17 void (*unmap)(struct nvkm_bar *, struct nvkm_vma *); member in struct:nvkm_bar
H A Dmmu.h59 void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt); member in struct:nvkm_mmu
/linux-4.1.27/drivers/net/ethernet/brocade/bna/
H A Dbnad.c119 struct bnad_tx_unmap *unmap; bnad_tx_buff_unmap() local
123 unmap = &unmap_q[index]; bnad_tx_buff_unmap()
124 nvecs = unmap->nvecs; bnad_tx_buff_unmap()
126 skb = unmap->skb; bnad_tx_buff_unmap()
127 unmap->skb = NULL; bnad_tx_buff_unmap()
128 unmap->nvecs = 0; bnad_tx_buff_unmap()
130 dma_unmap_addr(&unmap->vectors[0], dma_addr), bnad_tx_buff_unmap()
132 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); bnad_tx_buff_unmap()
141 unmap = &unmap_q[index]; bnad_tx_buff_unmap()
145 dma_unmap_addr(&unmap->vectors[vector], dma_addr), bnad_tx_buff_unmap()
146 dma_unmap_len(&unmap->vectors[vector], dma_len), bnad_tx_buff_unmap()
148 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); bnad_tx_buff_unmap()
160 * so DMA unmap & freeing is fine.
190 struct bnad_tx_unmap *unmap; bnad_txcmpl_process() local
205 unmap = &unmap_q[cons]; bnad_txcmpl_process()
207 skb = unmap->skb; bnad_txcmpl_process()
212 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); bnad_txcmpl_process()
317 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) bnad_rxq_cleanup_page() argument
319 if (!unmap->page) bnad_rxq_cleanup_page()
323 dma_unmap_addr(&unmap->vector, dma_addr), bnad_rxq_cleanup_page()
324 unmap->vector.len, DMA_FROM_DEVICE); bnad_rxq_cleanup_page()
325 put_page(unmap->page); bnad_rxq_cleanup_page()
326 unmap->page = NULL; bnad_rxq_cleanup_page()
327 dma_unmap_addr_set(&unmap->vector, dma_addr, 0); bnad_rxq_cleanup_page()
328 unmap->vector.len = 0; bnad_rxq_cleanup_page()
332 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) bnad_rxq_cleanup_skb() argument
334 if (!unmap->skb) bnad_rxq_cleanup_skb()
338 dma_unmap_addr(&unmap->vector, dma_addr), bnad_rxq_cleanup_skb()
339 unmap->vector.len, DMA_FROM_DEVICE); bnad_rxq_cleanup_skb()
340 dev_kfree_skb_any(unmap->skb); bnad_rxq_cleanup_skb()
341 unmap->skb = NULL; bnad_rxq_cleanup_skb()
342 dma_unmap_addr_set(&unmap->vector, dma_addr, 0); bnad_rxq_cleanup_skb()
343 unmap->vector.len = 0; bnad_rxq_cleanup_skb()
353 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; bnad_rxq_cleanup() local
356 bnad_rxq_cleanup_skb(bnad, unmap); bnad_rxq_cleanup()
358 bnad_rxq_cleanup_page(bnad, unmap); bnad_rxq_cleanup()
368 struct bnad_rx_unmap *unmap, *prev; bnad_rxq_refill_page() local
381 unmap = &unmap_q->unmap[prod]; bnad_rxq_refill_page()
388 prev = &unmap_q->unmap[unmap_q->reuse_pi]; bnad_rxq_refill_page()
403 unmap->page = page; bnad_rxq_refill_page()
404 unmap->page_offset = page_offset; bnad_rxq_refill_page()
405 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); bnad_rxq_refill_page()
406 unmap->vector.len = unmap_q->map_size; bnad_rxq_refill_page()
436 struct bnad_rx_unmap *unmap; bnad_rxq_refill_skb() local
447 unmap = &unmap_q->unmap[prod]; bnad_rxq_refill_skb()
459 unmap->skb = skb; bnad_rxq_refill_skb()
460 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); bnad_rxq_refill_skb()
461 unmap->vector.len = buff_sz; bnad_rxq_refill_skb()
515 struct bnad_rx_unmap *unmap; bnad_cq_drop_packet() local
520 unmap = &unmap_q->unmap[ci]; bnad_cq_drop_packet()
524 bnad_rxq_cleanup_skb(bnad, unmap); bnad_cq_drop_packet()
526 bnad_rxq_cleanup_page(bnad, unmap); bnad_cq_drop_packet()
537 struct bnad_rx_unmap *unmap; bnad_cq_setup_skb_frags() local
543 prefetch(page_address(unmap_q->unmap[sop_ci].page) + bnad_cq_setup_skb_frags()
544 unmap_q->unmap[sop_ci].page_offset); bnad_cq_setup_skb_frags()
547 unmap = &unmap_q->unmap[ci]; bnad_cq_setup_skb_frags()
551 dma_unmap_addr(&unmap->vector, dma_addr), bnad_cq_setup_skb_frags()
552 unmap->vector.len, DMA_FROM_DEVICE); bnad_cq_setup_skb_frags()
555 last_fraglen : unmap->vector.len; bnad_cq_setup_skb_frags()
556 skb->truesize += unmap->vector.len; bnad_cq_setup_skb_frags()
560 unmap->page, unmap->page_offset, len); bnad_cq_setup_skb_frags()
562 unmap->page = NULL; bnad_cq_setup_skb_frags()
563 unmap->vector.len = 0; bnad_cq_setup_skb_frags()
572 struct bnad_rx_unmap *unmap, u32 len) bnad_cq_setup_skb()
577 dma_unmap_addr(&unmap->vector, dma_addr), bnad_cq_setup_skb()
578 unmap->vector.len, DMA_FROM_DEVICE); bnad_cq_setup_skb()
583 unmap->skb = NULL; bnad_cq_setup_skb()
584 unmap->vector.len = 0; bnad_cq_setup_skb()
593 struct bnad_rx_unmap *unmap = NULL; bnad_cq_process() local
631 unmap = &unmap_q->unmap[sop_ci]; bnad_cq_process()
632 skb = unmap->skb; bnad_cq_process()
691 bnad_cq_setup_skb(bnad, skb, unmap, len); bnad_cq_process()
2937 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; bnad_start_xmit() local
3034 unmap = head_unmap; bnad_start_xmit()
3039 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); bnad_start_xmit()
3063 unmap = &unmap_q[prod]; bnad_start_xmit()
3068 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); bnad_start_xmit()
3071 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, bnad_start_xmit()
571 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, struct bnad_rx_unmap *unmap, u32 len) bnad_cq_setup_skb() argument
H A Dbnad.h262 struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
/linux-4.1.27/drivers/vfio/
H A Dvfio_iommu_type1.c372 * largest contiguous physical memory chunk to unmap. vfio_unmap_unpin()
417 struct vfio_iommu_type1_dma_unmap *unmap) vfio_dma_do_unmap()
426 if (unmap->iova & mask) vfio_dma_do_unmap()
428 if (!unmap->size || unmap->size & mask) vfio_dma_do_unmap()
439 * to unmap any range. Depending on the contiguousness of physical vfio_dma_do_unmap()
441 * or may not have worked. We only guaranteed unmap granularity vfio_dma_do_unmap()
446 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with vfio_dma_do_unmap()
447 * a zero sized unmap. Also, if an unmap request overlaps the first vfio_dma_do_unmap()
448 * address of a hugepage, the IOMMU will unmap the entire hugepage. vfio_dma_do_unmap()
449 * This also returns success and the returned unmap size reflects the vfio_dma_do_unmap()
453 * we take control out of the hands of the IOMMU. Therefore, an unmap vfio_dma_do_unmap()
455 * return success with zero sized unmap. And an unmap request covering vfio_dma_do_unmap()
456 * the first iova of mapping will unmap the entire range. vfio_dma_do_unmap()
467 dma = vfio_find_dma(iommu, unmap->iova, 0); vfio_dma_do_unmap()
468 if (dma && dma->iova != unmap->iova) { vfio_dma_do_unmap()
472 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); vfio_dma_do_unmap()
473 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { vfio_dma_do_unmap()
479 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { vfio_dma_do_unmap()
480 if (!iommu->v2 && unmap->iova > dma->iova) vfio_dma_do_unmap()
490 unmap->size = unmapped; vfio_dma_do_unmap()
692 * We change our unmap behavior slightly depending on whether the IOMMU
1008 struct vfio_iommu_type1_dma_unmap unmap; vfio_iommu_type1_ioctl() local
1013 if (copy_from_user(&unmap, (void __user *)arg, minsz)) vfio_iommu_type1_ioctl()
1016 if (unmap.argsz < minsz || unmap.flags) vfio_iommu_type1_ioctl()
1019 ret = vfio_dma_do_unmap(iommu, &unmap); vfio_iommu_type1_ioctl()
1023 return copy_to_user((void __user *)arg, &unmap, minsz) ? vfio_iommu_type1_ioctl()
416 vfio_dma_do_unmap(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_unmap *unmap) vfio_dma_do_unmap() argument
H A Dvfio_iommu_spapr_tce.c68 * of locked pages on each map and unmap. For powerpc, the map unmap tce_iommu_enable()
/linux-4.1.27/tools/testing/selftests/vm/
H A Dhugetlbfstest.c32 static void do_mmap(int fd, int extra_flags, int unmap) do_mmap() argument
46 if (!unmap) do_mmap()
/linux-4.1.27/drivers/xen/xenbus/
H A Dxenbus_client.c74 int (*unmap)(struct xenbus_device *dev, void *vaddr); member in struct:xenbus_ring_ops
483 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; __xenbus_map_ring() local
515 memset(&unmap[j], 0, sizeof(unmap[j])); __xenbus_map_ring()
516 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], __xenbus_map_ring()
522 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) __xenbus_map_ring()
527 if (unmap[i].status != GNTST_okay) { __xenbus_map_ring()
708 * @vaddr: addr to unmap
719 return ring_ops->unmap(dev, vaddr); xenbus_unmap_ring_vfree()
726 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; xenbus_unmap_ring_vfree_pv() local
752 memset(&unmap[i], 0, sizeof(unmap[i])); xenbus_unmap_ring_vfree_pv()
754 unmap[i].host_addr = arbitrary_virt_to_machine( xenbus_unmap_ring_vfree_pv()
756 unmap[i].dev_bus_addr = 0; xenbus_unmap_ring_vfree_pv()
757 unmap[i].handle = node->handles[i]; xenbus_unmap_ring_vfree_pv()
760 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) xenbus_unmap_ring_vfree_pv()
766 if (unmap[i].status != GNTST_okay) { xenbus_unmap_ring_vfree_pv()
768 xenbus_dev_error(dev, unmap[i].status, xenbus_unmap_ring_vfree_pv()
770 node->handles[i], unmap[i].status); xenbus_unmap_ring_vfree_pv()
771 err = unmap[i].status; xenbus_unmap_ring_vfree_pv()
834 * @vaddrs: addresses to unmap
844 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; xenbus_unmap_ring() local
852 gnttab_set_unmap_op(&unmap[i], vaddrs[i], xenbus_unmap_ring()
855 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) xenbus_unmap_ring()
860 if (unmap[i].status != GNTST_okay) { xenbus_unmap_ring()
861 xenbus_dev_error(dev, unmap[i].status, xenbus_unmap_ring()
863 handles[i], unmap[i].status); xenbus_unmap_ring()
864 err = unmap[i].status; xenbus_unmap_ring()
894 .unmap = xenbus_unmap_ring_vfree_pv,
899 .unmap = xenbus_unmap_ring_vfree_hvm,
/linux-4.1.27/drivers/dma/
H A Ddmaengine.c988 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1019 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); dmaengine_unmap() local
1020 struct device *dev = unmap->dev; dmaengine_unmap()
1023 cnt = unmap->to_cnt; dmaengine_unmap()
1025 dma_unmap_page(dev, unmap->addr[i], unmap->len, dmaengine_unmap()
1027 cnt += unmap->from_cnt; dmaengine_unmap()
1029 dma_unmap_page(dev, unmap->addr[i], unmap->len, dmaengine_unmap()
1031 cnt += unmap->bidi_cnt; dmaengine_unmap()
1033 if (unmap->addr[i] == 0) dmaengine_unmap()
1035 dma_unmap_page(dev, unmap->addr[i], unmap->len, dmaengine_unmap()
1038 cnt = unmap->map_cnt; dmaengine_unmap()
1039 mempool_free(unmap, __get_unmap_pool(cnt)->pool); dmaengine_unmap()
1042 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) dmaengine_unmap_put() argument
1044 if (unmap) dmaengine_unmap_put()
1045 kref_put(&unmap->kref, dmaengine_unmap); dmaengine_unmap_put()
1095 struct dmaengine_unmap_data *unmap; dmaengine_get_unmap_data() local
1097 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); dmaengine_get_unmap_data()
1098 if (!unmap) dmaengine_get_unmap_data()
1101 memset(unmap, 0, sizeof(*unmap)); dmaengine_get_unmap_data()
1102 kref_init(&unmap->kref); dmaengine_get_unmap_data()
1103 unmap->dev = dev; dmaengine_get_unmap_data()
1104 unmap->map_cnt = nr; dmaengine_get_unmap_data()
1106 return unmap; dmaengine_get_unmap_data()
H A Dmv_xor.c709 struct dmaengine_unmap_data *unmap; mv_xor_memcpy_self_test() local
732 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); mv_xor_memcpy_self_test()
733 if (!unmap) { mv_xor_memcpy_self_test()
740 unmap->addr[0] = src_dma; mv_xor_memcpy_self_test()
747 unmap->to_cnt = 1; mv_xor_memcpy_self_test()
751 unmap->addr[1] = dest_dma; mv_xor_memcpy_self_test()
758 unmap->from_cnt = 1; mv_xor_memcpy_self_test()
759 unmap->len = PAGE_SIZE; mv_xor_memcpy_self_test()
800 dmaengine_unmap_put(unmap); mv_xor_memcpy_self_test()
818 struct dmaengine_unmap_data *unmap; mv_xor_xor_self_test() local
863 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, mv_xor_xor_self_test()
865 if (!unmap) { mv_xor_xor_self_test()
872 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], mv_xor_xor_self_test()
874 dma_srcs[i] = unmap->addr[i]; mv_xor_xor_self_test()
875 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); mv_xor_xor_self_test()
880 unmap->to_cnt++; mv_xor_xor_self_test()
883 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, mv_xor_xor_self_test()
885 dest_dma = unmap->addr[src_count]; mv_xor_xor_self_test()
886 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); mv_xor_xor_self_test()
891 unmap->from_cnt = 1; mv_xor_xor_self_test()
892 unmap->len = PAGE_SIZE; mv_xor_xor_self_test()
937 dmaengine_unmap_put(unmap); mv_xor_xor_self_test()
/linux-4.1.27/drivers/scsi/
H A Dscsi_lib_dma.c39 * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
H A Dscsi_trace.c64 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); scsi_trace_rw10()
118 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); scsi_trace_rw16()
172 trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); scsi_trace_rw32()
H A Dwd719x.c184 /* finish a SCSI command, mark SCB (if any) as free, unmap buffers */ wd719x_finish_cmd()
926 goto unmap; wd719x_pci_probe()
939 unmap: wd719x_pci_probe()
/linux-4.1.27/drivers/acpi/
H A Dnvs.c79 bool unmap; member in struct:nvs_page
138 if (entry->unmap) { suspend_nvs_free()
140 entry->unmap = false; suspend_nvs_free()
184 entry->unmap = !!entry->kaddr; suspend_nvs_save()
H A Dosl.c952 bool unmap = false; acpi_os_read_memory() local
962 unmap = true; acpi_os_read_memory()
985 if (unmap) acpi_os_read_memory()
1011 bool unmap = false; acpi_os_write_memory() local
1020 unmap = true; acpi_os_write_memory()
1040 if (unmap) acpi_os_write_memory()
/linux-4.1.27/include/asm-generic/
H A Dearly_ioremap.h32 * accordingly for subsequent map/unmap requests.
/linux-4.1.27/arch/x86/platform/geode/
H A Dnet5501.c118 goto unmap; net5501_present()
132 unmap: net5501_present()
/linux-4.1.27/include/xen/
H A Dgrant_table.h152 gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, gnttab_set_unmap_op() argument
156 unmap->host_addr = addr; gnttab_set_unmap_op()
158 unmap->host_addr = __pa(addr); gnttab_set_unmap_op()
160 unmap->host_addr = addr; gnttab_set_unmap_op()
162 unmap->handle = handle; gnttab_set_unmap_op()
163 unmap->dev_bus_addr = 0; gnttab_set_unmap_op()
/linux-4.1.27/drivers/ntb/
H A Dntb_transport.c1061 struct dmaengine_unmap_data *unmap; ntb_async_rx() local
1080 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); ntb_async_rx()
1081 if (!unmap) ntb_async_rx()
1084 unmap->len = len; ntb_async_rx()
1085 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), ntb_async_rx()
1087 if (dma_mapping_error(device->dev, unmap->addr[0])) ntb_async_rx()
1090 unmap->to_cnt = 1; ntb_async_rx()
1092 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), ntb_async_rx()
1094 if (dma_mapping_error(device->dev, unmap->addr[1])) ntb_async_rx()
1097 unmap->from_cnt = 1; ntb_async_rx()
1099 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], ntb_async_rx()
1100 unmap->addr[0], len, ntb_async_rx()
1107 dma_set_unmap(txd, unmap); ntb_async_rx()
1113 dmaengine_unmap_put(unmap); ntb_async_rx()
1122 dmaengine_unmap_put(unmap); ntb_async_rx()
1124 dmaengine_unmap_put(unmap); ntb_async_rx()
1279 struct dmaengine_unmap_data *unmap; ntb_async_tx() local
1307 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); ntb_async_tx()
1308 if (!unmap) ntb_async_tx()
1311 unmap->len = len; ntb_async_tx()
1312 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), ntb_async_tx()
1314 if (dma_mapping_error(device->dev, unmap->addr[0])) ntb_async_tx()
1317 unmap->to_cnt = 1; ntb_async_tx()
1319 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, ntb_async_tx()
1326 dma_set_unmap(txd, unmap); ntb_async_tx()
1332 dmaengine_unmap_put(unmap); ntb_async_tx()
1339 dmaengine_unmap_put(unmap); ntb_async_tx()
1341 dmaengine_unmap_put(unmap); ntb_async_tx()
/linux-4.1.27/mm/
H A Dpercpu-vm.c139 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
142 * @page_start: page index of the first page to unmap
143 * @page_end: page index of the last page to unmap + 1
145 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
295 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
300 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
319 /* unmap and free */ pcpu_depopulate_chunk()
H A Dzpool.c309 * its operatons on the mapped handle memory quickly and unmap
325 * @handle The handle to unmap
334 zpool->driver->unmap(zpool->pool, handle); zpool_unmap_handle()
H A Dhighmem.c239 * Sleep for somebody else to unmap their entries map_new_virtual()
326 * kunmap_high - unmap a highmem page into memory
327 * @page: &struct page to unmap
H A Dmlock.c418 * still on lru. In unmap path, pages might be scanned by reclaim
419 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
H A Dvmalloc.c595 * if we found no lazy vmap areas to unmap (callers can use this to optimise
688 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
698 * Free and unmap a vmap area
1026 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1076 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1078 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1255 * unmap_kernel_range_noflush - unmap kernel VM area
1256 * @addr: start of the VM area to unmap
1257 * @size: size of the VM area to unmap
1275 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1276 * @addr: start of the VM area to unmap
1277 * @size: size of the VM area to unmap
H A Dzbud.c40 * allocation data and unmap the handle with zbud_unmap() when operations
197 .unmap = zbud_zpool_unmap,
H A Dmmu_notifier.c106 * unmap the address and return 1 or 0 depending if the mapping previously
/linux-4.1.27/arch/s390/pci/
H A Dpci_dma.c167 * validated. With lazy unmap, it also is skipped for previously valid dma_update_trans()
240 /* global flush after wrap-around with lazy unmap */ dma_alloc_iommu()
256 * Lazy flush for unmap: need to move next_bit to avoid address re-use dma_free_iommu()
327 zpci_err("unmap error:\n"); s390_dma_unmap_pages()
394 goto unmap; for_each_sg()
399 unmap: for_each_sg()
/linux-4.1.27/lib/
H A Ddevres.c104 * @dev: Generic device to unmap for
105 * @addr: Address to unmap
217 * @dev: Generic device to unmap for
218 * @addr: Address to unmap
309 * @addr: Address to unmap
413 * @mask: Mask of BARs to unmap and release
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_iommu.c102 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); msm_iommu_unmap()
123 .unmap = msm_iommu_unmap,
H A Dmsm_mmu.h28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, member in struct:msm_mmu_funcs
H A Dmsm_gem.c324 /* this is safe right now because we don't unmap until the msm_gem_get_iova()
352 // normally unmap here, but instead just mark that it could be msm_gem_put_iova()
529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); msm_gem_free_object()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dnv04.c33 * VM map/unmap callbacks
105 priv->base.unmap = nv04_vm_unmap; nv04_mmu_ctor()
H A Dnv41.c35 * VM map/unmap callbacks
112 priv->base.unmap = nv41_vm_unmap; nv41_mmu_ctor()
H A Dnv44.c35 * VM map/unmap callbacks
183 priv->base.unmap = nv44_vm_unmap; nv44_mmu_ctor()
H A Dgf100.c223 priv->base.unmap = gf100_vm_unmap; gf100_mmu_ctor()
H A Dnv50.c227 priv->base.unmap = nv50_vm_unmap; nv50_mmu_ctor()
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Dpci_dlpar.c112 * and thus, we can safely unmap the IO space as it's not in use remove_phb_dynamic()
118 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", remove_phb_dynamic()
/linux-4.1.27/drivers/irqchip/
H A Dirq-crossbar.c143 * crossbar_domain_free - unmap/free a crossbar<->irq connection
144 * @domain: domain of irq to unmap
148 * We do not maintain a use count of total number of map/unmap
150 * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
/linux-4.1.27/drivers/mtd/maps/
H A Dintel_vr_nor.c107 /* unmap the flash window */ vr_nor_destroy_maps()
110 /* unmap the csr window */ vr_nor_destroy_maps()
236 /* unmap the flash window */ vr_nor_pci_probe()
239 /* unmap the csr window */ vr_nor_pci_probe()
/linux-4.1.27/drivers/gpio/
H A Dgpio-sodaville.c233 goto unmap; sdv_gpio_probe()
239 goto unmap; sdv_gpio_probe()
244 goto unmap; sdv_gpio_probe()
250 unmap: sdv_gpio_probe()
/linux-4.1.27/sound/pci/ctxfi/
H A Dctvmem.h56 void (*unmap)(struct ct_vm *, struct ct_vm_block *block); member in struct:ct_vm
H A Dctsrc.h118 int (*unmap)(struct srcimp *srcimp); member in struct:srcimp_rsc_ops
H A Dctvmem.c205 vm->unmap = ct_vm_unmap; ct_vm_create()
/linux-4.1.27/drivers/input/serio/
H A Dambakmi.c148 goto unmap; amba_kmi_probe()
157 unmap: amba_kmi_probe()
/linux-4.1.27/fs/ntfs/
H A Daops.h38 * Unpin, unmap and release a page that was obtained from ntfs_map_page().
74 * unpin, unmap and release the page.
H A Dbitmap.c100 /* If we are done, unmap the page and return success. */ __ntfs_bitmap_set_bits_in_run()
/linux-4.1.27/arch/arm/mach-omap2/
H A Dboard-flash.c165 goto unmap; get_gpmc0_type()
178 unmap: get_gpmc0_type()
/linux-4.1.27/drivers/block/xen-blkback/
H A Dblkback.c271 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; free_persistent_gnts() local
279 unmap_data.unmap_ops = unmap; free_persistent_gnts()
285 gnttab_set_unmap_op(&unmap[segs_to_unmap], foreach_grant_safe()
312 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; xen_blkbk_unmap_purged_grants() local
320 unmap_data.unmap_ops = unmap; xen_blkbk_unmap_purged_grants()
329 gnttab_set_unmap_op(&unmap[segs_to_unmap], xen_blkbk_unmap_purged_grants()
733 req->unmap, req->unmap_pages); xen_blkbk_unmap_and_respond()
737 work->unmap_ops = req->unmap; xen_blkbk_unmap_and_respond()
757 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; xen_blkbk_unmap() local
766 unmap, unmap_pages); xen_blkbk_unmap()
768 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); xen_blkbk_unmap()
943 goto unmap; xen_blkbk_parse_indirect()
960 goto unmap; xen_blkbk_parse_indirect()
965 unmap: xen_blkbk_parse_indirect()
1063 * If all of the bio's have completed it is time to unmap __end_block_io_op()
1308 * the hypercall to unmap the grants - that is all done in dispatch_rw_block_io()
/linux-4.1.27/include/trace/events/
H A Diommu.h109 TRACE_EVENT(unmap,
/linux-4.1.27/drivers/gpu/drm/cirrus/
H A Dcirrus_fbdev.c29 bool unmap = false; cirrus_dirty_update() local
83 unmap = true; cirrus_dirty_update()
91 if (unmap) cirrus_dirty_update()
/linux-4.1.27/drivers/gpu/drm/mgag200/
H A Dmgag200_fb.c31 bool unmap = false; mga_dirty_update() local
86 unmap = true; mga_dirty_update()
94 if (unmap) mga_dirty_update()
/linux-4.1.27/arch/avr32/include/asm/
H A Ddma-mapping.h64 * Free (and unmap) a DMA buffer previously allocated by
94 * Free (and unmap) a DMA buffer previously allocated by
125 * dma_unmap_single - unmap a single buffer previously mapped
169 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
229 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
/linux-4.1.27/arch/powerpc/platforms/ps3/
H A Dmm.c890 * @bus_addr: The starting ioc bus address of the area to unmap.
891 * @len: Length in bytes of the area to unmap.
893 * This is the common dma unmap routine.
1031 * This routine will unmap all mapped areas and free the HV dma region.
1095 * @bus_addr: The starting ioc bus address of the area to unmap.
1096 * @len: Length in bytes of the area to unmap.
1111 .unmap = dma_sb_unmap_area
1118 .unmap = dma_sb_unmap_area_linear
1125 .unmap = dma_ioc0_unmap_area
1189 return r->region_ops->unmap(r, bus_addr, len); ps3_dma_unmap()
/linux-4.1.27/drivers/dma-buf/
H A Ddma-buf.c507 * @attach: [in] attachment to unmap buffer from
508 * @sg_table: [in] scatterlist info of the buffer to unmap
595 * @dmabuf: [in] buffer to unmap page from.
596 * @page_num: [in] page in PAGE_SIZE units to unmap.
630 * @dmabuf: [in] buffer to unmap page from.
631 * @page_num: [in] page in PAGE_SIZE units to unmap.
/linux-4.1.27/drivers/i2c/busses/
H A Di2c-iop3xx.c463 goto unmap; iop3xx_i2c_probe()
470 goto unmap; iop3xx_i2c_probe()
498 unmap: iop3xx_i2c_probe()
/linux-4.1.27/drivers/gpu/drm/ast/
H A Dast_fb.c55 bool unmap = false; ast_dirty_update() local
110 unmap = true; ast_dirty_update()
118 if (unmap) ast_dirty_update()
/linux-4.1.27/include/xen/interface/io/
H A Dblkif.h64 * (ATA) or unmap (SCSI) - conviently called discard requests are likely
80 * underlying block device supports trim (ATA) or unmap (SCSI) operations,
82 * More information about trim/unmap operations at:
/linux-4.1.27/tools/lib/traceevent/
H A Dplugin_scsi.c189 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); scsi_trace_rw10()
241 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); scsi_trace_rw16()
294 trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); scsi_trace_rw32()
/linux-4.1.27/drivers/media/pci/mantis/
H A Dmantis_pci.c131 dprintk(MANTIS_ERROR, 1, "ERROR: <%d> I/O unmap", ret); mantis_pci_init()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_nvif.c134 .unmap = nvkm_client_unmap,
/linux-4.1.27/include/media/
H A Dvideobuf-dma-sg.h39 * scatterlist for the buffer (and unmap frees it ...)
/linux-4.1.27/arch/powerpc/platforms/powernv/
H A Dopal-xscom.c122 .unmap = opal_scom_unmap,
/linux-4.1.27/arch/powerpc/kvm/
H A Dtrace_booke.h85 TP_printk("unmap hva 0x%lx\n", __entry->hva)
H A Dtrace_pr.h268 TP_printk("unmap hva 0x%lx\n", __entry->hva)
/linux-4.1.27/arch/alpha/include/asm/
H A Dfloppy.h51 /* different from last time -- unmap prev */ alpha_fd_dma_setup()
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
H A Dtxrx.c210 ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n", ath10k_peer_unmap_event()
215 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", ath10k_peer_unmap_event()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvif/
H A Dobject.c151 struct nvif_ioctl_unmap unmap; nvif_object_unmap() member in struct:__anon4200
157 client->driver->unmap(client, object->map.ptr, nvif_object_unmap()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dscom.h55 void (*unmap)(scom_map_t map); member in struct:scom_controller
129 scom_controller->unmap(map); scom_unmap()
H A Dfloppy.h141 /* different from last time -- unmap prev */ hard_dma_setup()
/linux-4.1.27/arch/ia64/include/asm/
H A Dtlb.h25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
/linux-4.1.27/arch/arm/kvm/
H A Dtrace.h192 TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
209 TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
H A Dmmu.c729 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
731 * @start: The intermediate physical base address of the range to unmap
732 * @size: The size of the area to unmap
755 * unmap any of them. stage2_unmap_memslot()
789 * Go through the memregions and unmap any reguler RAM
/linux-4.1.27/fs/ubifs/
H A Dgc.c96 * The GC write-buffer was synchronized, we may safely unmap switch_gc_head()
547 idx_gc->unmap = 0; ubifs_garbage_collect_leb()
832 * If a LEB has only dirty and free space, then we may safely unmap it and make
873 /* Mark GC'd index LEBs OK to unmap after this commit finishes */ ubifs_gc_start_commit()
875 idx_gc->unmap = 1; ubifs_gc_start_commit()
904 idx_gc->unmap = 1; ubifs_gc_start_commit()
927 if (idx_gc->unmap) { ubifs_gc_end_commit()
H A Dlog.c355 * finished UBIFS may safely unmap all the previous log LEBs). This function
524 dbg_log("unmap log LEB %d", lnum); ubifs_log_post_commit()
H A Dsb.c659 * fixup_leb - fixup/unmap an LEB containing free space.
678 dbg_mnt("unmap empty LEB %d", lnum); fixup_leb()
/linux-4.1.27/arch/ia64/sn/pci/
H A Dpci_dma.c232 * sn_dma_unmap_sg - unmap a DMA scatterlist
233 * @dev: device to unmap
234 * @sg: scatterlist to unmap
/linux-4.1.27/sound/pci/emu10k1/
H A Dmemory.c164 * unmap the block
275 /* no enough page - try to unmap some blocks */ snd_emu10k1_memblk_map()
347 blk->map_locked = 1; /* do not unmap this block! */ snd_emu10k1_alloc_pages()
/linux-4.1.27/drivers/ssb/
H A Ddriver_pcicore.c131 goto unmap; ssb_extpci_read_config()
149 unmap: ssb_extpci_read_config()
177 goto unmap; ssb_extpci_write_config()
198 unmap: ssb_extpci_write_config()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/
H A Ddma.h24 /* map/unmap direction */
/linux-4.1.27/drivers/dma/ioat/
H A Ddma_v2.h114 * @len: total transaction length for unmap
/linux-4.1.27/arch/x86/kernel/
H A Ddevicetree.c229 .unmap = mp_irqdomain_unmap,
/linux-4.1.27/arch/xtensa/include/asm/
H A Dinitialize_mmu.h109 /* Step 3: unmap everything other than current area.
/linux-4.1.27/block/
H A Dblk-map.c137 * blk_rq_unmap_user - unmap a request with user data
/linux-4.1.27/arch/um/os-Linux/skas/
H A Dmem.c184 int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len, unmap() function
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/bar/
H A Dbase.c74 bar->unmap(bar, &barobj->vma); nvkm_barobj_dtor()
H A Dgf100.c165 priv->base.unmap = gf100_bar_unmap; gf100_bar_ctor()
H A Dnv50.c202 priv->base.unmap = nv50_bar_unmap; nv50_bar_ctor()
/linux-4.1.27/drivers/bcma/
H A Ddriver_pci_host.c124 goto unmap; bcma_extpci_read_config()
141 unmap: bcma_extpci_read_config()
189 goto unmap; bcma_extpci_write_config()
223 unmap: bcma_extpci_write_config()
/linux-4.1.27/arch/mips/include/asm/
H A Dmsa.h159 __BUILD_MSA_CTL_REG(unmap, 7)
/linux-4.1.27/include/rdma/
H A Dib_umem_odp.h61 * umem, allowing only a single thread to map/unmap pages. The mutex
/linux-4.1.27/include/uapi/mtd/
H A Dubi-user.h112 * Logical eraseblock unmap
115 * To unmap a logical eraseblock to a physical eraseblock, the %UBI_IOCEBUNMAP
120 * after the unmap ioctl returns, you may find the LEB mapped again to the same
414 * @lnum: logical eraseblock number to unmap
/linux-4.1.27/arch/s390/kernel/
H A Dmachine_kexec.c138 * Map or unmap crashkernel memory
/linux-4.1.27/arch/m68k/mm/
H A Dinit.c160 /* insert also pointer table that we used to unmap the zero page */ init_pointer_tables()
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dfmr_ops.c160 /* After a disconnect, unmap all FMRs.
/linux-4.1.27/tools/power/acpi/os_specific/service_layers/
H A Dosunixmap.c134 * length - How much memory to unmap
/linux-4.1.27/drivers/s390/kvm/
H A Dkvm_virtio.c218 goto unmap; kvm_find_vq()
229 unmap: kvm_find_vq()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_mr.c386 * ipath_unmap_fmr - unmap fast memory regions
387 * @fmr_list: the list of fast memory regions to unmap
H A Dipath_sdma.c744 goto unmap; ipath_sdma_verbs_send()
804 unmap: ipath_sdma_verbs_send()
/linux-4.1.27/arch/metag/kernel/
H A Dprocess.c405 * So we first map the 'big' image - and unmap the remainder at __metag_elf_map()
406 * the end. (which unmap is needed for ELF images with holes.) __metag_elf_map()
/linux-4.1.27/arch/x86/mm/
H A Dmpx.c376 * directory here means that we do not have to do xsave in the unmap mpx_enable_management()
689 * Someone else raced with us to unmap the table. unmap_single_bt()
716 * not shared, unmap this whole bounds table. Otherwise, only free
835 /* Success, or no tables to unmap */ mpx_unmap_tables()
844 * Only unmap the bounds table that are mpx_unmap_tables()
/linux-4.1.27/net/rds/
H A Diw_rdma.c503 * For FMR based MRs, the mappings on the unmap list are rds_iw_flush_mr_pool()
508 * will be destroyed by the unmap function. rds_iw_flush_mr_pool()
833 * Why do we do it this way, even though we could pipeline unmap rds_iw_unmap_fastreg_list()
840 * requires that a MR goes through an explicit unmap operation before rds_iw_unmap_fastreg_list()
860 /* Move all laundered mappings back to the unmap list.
/linux-4.1.27/drivers/net/ethernet/natsemi/
H A Dsonic.c125 /* unmap and free skbs that haven't been transmitted */ sonic_close()
137 /* unmap and free the receive buffers */ sonic_close()
327 /* and unmap DMA buffer */ sonic_interrupt()
/linux-4.1.27/drivers/misc/mic/card/
H A Dmic_virtio.c268 goto unmap; mic_find_vq()
275 goto unmap; mic_find_vq()
306 unmap: mic_find_vq()
/linux-4.1.27/drivers/xen/
H A Dgntdev.c357 pr_debug("unmap handle=%d st=%d\n", __unmap_grant_pages()
369 pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); unmap_grant_pages()
372 * already unmapped some of the grants. Only unmap valid ranges. unmap_grant_pages()
H A Dxen-scsiback.c115 * call to map/unmap grants. Don't choose it too large, as there are arrays
280 struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH]; scsiback_fast_flush_area() local
297 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), scsiback_fast_flush_area()
305 err = gnttab_unmap_refs(unmap, NULL, pages, invcount); scsiback_fast_flush_area()
311 err = gnttab_unmap_refs(unmap, NULL, pages, invcount); scsiback_fast_flush_area()
/linux-4.1.27/drivers/lguest/
H A Dcore.c330 goto unmap; init()
345 unmap: init()
/linux-4.1.27/drivers/mmc/host/
H A Dtoshsd.c655 goto unmap; toshsd_probe()
666 unmap: toshsd_probe()
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_priv.h97 * @unmap_dma unmap the memory for dma
99 * @unmap_kernel unmap memory to the kernel
H A Dion.h173 * @handle: handle to unmap
/linux-4.1.27/drivers/pcmcia/
H A Dvrc4173_cardu.c511 goto unmap; vrc4173_cardu_probe()
526 unmap: vrc4173_cardu_probe()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_mr.c478 * qib_unmap_fmr - unmap fast memory regions
479 * @fmr_list: the list of fast memory regions to unmap
H A Dqib_sdma.c372 /* if desc is part of this txp, unmap if needed */ qib_sdma_make_progress()
601 goto unmap; qib_sdma_verbs_send()
655 unmap: qib_sdma_verbs_send()
/linux-4.1.27/drivers/clk/ti/
H A Dfapll.c592 goto unmap; ti_fapll_setup()
652 unmap: ti_fapll_setup()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/
H A Dioctl.c339 nv_ioctl(object, "unmap size %d\n", size); nvkm_ioctl_unmap()
341 nv_ioctl(object, "unmap\n"); nvkm_ioctl_unmap()
/linux-4.1.27/arch/parisc/kernel/
H A Dpci-dma.c327 panic("%s: pcxl_free_range() Too many pages to unmap.\n", pcxl_free_range()
470 * simple map/unmap case. However, it IS necessary if if pa11_dma_unmap_single()
/linux-4.1.27/arch/arm/common/
H A Ddmabounce.c4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
578 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
/linux-4.1.27/sound/pci/asihpi/
H A Dhpioctl.c384 /* unmap previously mapped pci mem space */ asihpi_adapter_probe()
543 /* unmap PCI memory space, mapped during device init. */ asihpi_adapter_remove()
/linux-4.1.27/drivers/media/v4l2-core/
H A Dvideobuf-vmalloc.c96 in order to do memory unmap. videobuf_vm_close()
H A Dvideobuf-dma-contig.c108 in order to do memory unmap. videobuf_vm_close()
H A Dvideobuf2-vmalloc.c287 /* stealing dmabuf mutex to serialize map/unmap operations */ vb2_vmalloc_dmabuf_ops_map()
/linux-4.1.27/drivers/net/wireless/prism54/
H A Dislpci_hotplug.c245 /* free the PCI memory and unmap the remapped page */ prism54_remove()
/linux-4.1.27/drivers/pci/
H A Drom.c175 * pci_unmap_rom - unmap the ROM from kernel space
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.h177 /* unmap sgt from dma region. */
H A Dexynos_drm_g2d.c850 * commands in run_cmdlist have been completed so unmap all gem g2d_free_runqueue_node()
1381 * unmap all gem objects not completed. g2d_close()
1384 * there may be some commands in inuse_cmdlist so unmap g2d_close()
/linux-4.1.27/drivers/gpu/drm/gma500/
H A Dgem.c163 * does most of the work for us including the actual map/unmap calls
/linux-4.1.27/drivers/cpufreq/
H A Dpasemi-cpufreq.c226 * We don't support CPU hotplug. Don't unmap after the system pas_cpufreq_cpu_exit()
/linux-4.1.27/arch/x86/kvm/
H A Diommu.c121 * important because we unmap and unpin in 4kb steps later. kvm_iommu_map_pages()
/linux-4.1.27/arch/x86/platform/efi/
H A Dquirks.c254 * Once setup is done earlier, unmap the EFI memory map on mismatched efi_apply_memmap_quirks()
/linux-4.1.27/arch/sparc/include/asm/
H A Ddma.h121 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
/linux-4.1.27/drivers/block/
H A Dz2ram.c321 * FIXME: unmap memory z2_release()
H A Dnvme-core.c1790 goto unmap; nvme_submit_io()
1798 goto unmap; nvme_submit_io()
1803 goto unmap; nvme_submit_io()
1823 unmap: nvme_submit_io()
2379 goto unmap; nvme_dev_map()
2389 goto unmap; nvme_dev_map()
2399 unmap: nvme_dev_map()
2827 goto unmap; nvme_dev_start()
2867 unmap: nvme_dev_start()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_mn.c119 * unmap them by move them into system domain again.
/linux-4.1.27/drivers/platform/x86/
H A Dibm_rtl.c320 /* unmap, unlink and remove all traces */ ibm_rtl_exit()
/linux-4.1.27/include/uapi/linux/
H A Dkd.h51 #define KDUNMAPDISP 0x4B3D /* unmap display from address space */
/linux-4.1.27/arch/s390/mm/
H A Dpgtable.c350 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
366 * gmap_unmap_segment - unmap segment from the guest address space
369 * @len: length of the memory area to unmap
371 * Returns 0 if the unmap succeeded, -EINVAL if not.
/linux-4.1.27/arch/powerpc/kernel/
H A Disa-bridge.c193 * isa_bridge_remove - Remove/unmap an ISA bridge
H A Dpci_64.c117 /* This is a PHB, we fully unmap the IO area */ pcibios_unmap_io_space()
/linux-4.1.27/kernel/irq/
H A Dirqdomain.c164 * @ops: map/unmap domain callbacks
256 if (domain->ops->unmap) irq_domain_disassociate()
257 domain->ops->unmap(domain, irq); irq_domain_disassociate()
519 * @virq: linux irq number of the interrupt to unmap
/linux-4.1.27/include/drm/
H A Ddrm_vma_manager.h221 * @file_mapping: Address space to unmap @node from
/linux-4.1.27/drivers/net/ethernet/ti/
H A Dnetcp_core.c559 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); netcp_free_rx_desc_chain()
588 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", netcp_empty_rx_queue()
616 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); netcp_process_one_rx_packet()
649 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); netcp_process_one_rx_packet()
739 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); netcp_free_rx_buf()
928 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); netcp_free_tx_desc_chain()
949 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); netcp_process_tx_compl_packets()
/linux-4.1.27/arch/arm/mm/
H A Ddma-mapping.c92 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
926 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
929 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1637 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1640 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1653 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1656 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c308 * deferred_unmap_destructor - unmap a packet when it is freed
342 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), unmap_sgl()
349 goto unmap; unmap_sgl()
385 * @unmap: whether the buffers should be unmapped for DMA
391 unsigned int n, bool unmap) free_tx_desc()
400 if (unmap) free_tx_desc()
428 * @unmap: whether the buffers should be unmapped for DMA
435 bool unmap) reclaim_completed_tx()
447 free_tx_desc(adap, q, avail, unmap); reclaim_completed_tx()
510 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
390 free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap) free_tx_desc() argument
434 reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, bool unmap) reclaim_completed_tx() argument
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c327 unmap: unmap_sgl()
335 goto unmap; unmap_sgl()
372 * @unmap: whether the buffers should be unmapped for DMA
378 unsigned int n, bool unmap) free_tx_desc()
384 const int need_unmap = need_skb_unmap() && unmap; free_tx_desc()
390 * unmap it from PCI DMA space (if required) and free it. free_tx_desc()
424 * @unmap: whether the buffers should be unmapped for DMA
432 bool unmap) reclaim_completed_tx()
444 free_tx_desc(adapter, tq, avail, unmap); reclaim_completed_tx()
491 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
377 free_tx_desc(struct adapter *adapter, struct sge_txq *tq, unsigned int n, bool unmap) free_tx_desc() argument
430 reclaim_completed_tx(struct adapter *adapter, struct sge_txq *tq, bool unmap) reclaim_completed_tx() argument
/linux-4.1.27/drivers/atm/
H A Dfirestream.c1715 goto unmap; fs_init()
1728 goto unmap; fs_init()
1808 goto unmap; fs_init()
1818 goto unmap; fs_init()
1845 goto unmap; fs_init()
1895 unmap: fs_init()
H A Deni.c1744 goto unmap; eni_do_init()
1772 goto unmap; eni_do_init()
1776 goto unmap; eni_do_init()
1786 goto unmap; eni_do_init()
1789 unmap: eni_do_init()
/linux-4.1.27/drivers/isdn/hardware/eicon/
H A Dos_pri.c280 Step 1: unmap all BAR's, if any was mapped diva_pri_cleanup_adapter()
546 ** Stop Adapter, but do not unmap/unregister - adapter
/linux-4.1.27/drivers/misc/
H A Dhpilo.c812 goto unmap; ilo_probe()
838 unmap: ilo_probe()
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.c512 * To make our bookkeeping simpler, we don't unmap DB mthca_unmap_user_db()
691 /* XXX may be able to unmap more pages now */ mthca_free_db()
H A Dmthca_cmd.h290 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
/linux-4.1.27/drivers/video/fbdev/
H A Damba-clcd.c524 goto unmap; clcdfb_register()
543 unmap: clcdfb_register()
/linux-4.1.27/include/uapi/linux/genwqe/
H A Dgenwqe_card.h456 * @size: size of the area pin/dma-map/unmap
464 * unmap to get the DMA addresses.
/linux-4.1.27/drivers/mtd/nand/
H A Dcs553x_nand.c342 /* unmap physical address */ cs553x_cleanup()
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
H A Dpci.c265 ah->iobase = mem; /* So we can unmap it on detach */ ath5k_pci_probe()
/linux-4.1.27/drivers/mfd/
H A Dtc3589x.c238 .unmap = tc3589x_irq_unmap,
/linux-4.1.27/drivers/staging/i2o/
H A Dpci.c52 * Remove all allocated DMA memory and unmap memory IO regions. If MTRR
/linux-4.1.27/drivers/pci/hotplug/
H A Drpadlpar_core.c388 printk(KERN_ERR "%s: failed to unmap bus range\n", dlpar_remove_pci_slot()
/linux-4.1.27/arch/tile/mm/
H A Dhighmem.c192 * those platforms, we do have to take a global lock to map and unmap
/linux-4.1.27/arch/um/include/asm/
H A Dpgtable.h252 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to set_pte()
/linux-4.1.27/arch/um/include/shared/
H A Dos.h258 extern int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
/linux-4.1.27/arch/unicore32/kernel/
H A Dpci.c136 * We use GFP_DMA to allocate safe buffers to do map/unmap.
/linux-4.1.27/drivers/char/agp/
H A Dnvidia-agp.c179 /* unmap aperture */ nvidia_cleanup()
/linux-4.1.27/arch/mips/cavium-octeon/
H A Docteon-irq.c1235 .unmap = octeon_irq_free_cd,
1241 .unmap = octeon_irq_free_cd,
1925 .unmap = octeon_irq_free_cd,
2178 .unmap = octeon_irq_free_cd,
/linux-4.1.27/drivers/acpi/acpica/
H A Dtbutils.c325 * so unmap the RSDP here before mapping other tables acpi_tb_parse_root_table()
/linux-4.1.27/arch/m68k/include/asm/
H A Doplib.h51 /* Map and unmap devices in IO space at virtual addresses. Note that the
/linux-4.1.27/arch/arm/include/asm/
H A Ddma-mapping.h235 * Free (and unmap) a DMA buffer previously allocated by
/linux-4.1.27/drivers/net/ethernet/freescale/fs_enet/
H A Dfs_enet-main.c280 /* unmap */ fs_enet_tx_napi()
456 /* unmap */ fs_cleanup_bds()
471 /* unmap */ fs_cleanup_bds()
/linux-4.1.27/drivers/media/pci/sta2x11/
H A Dsta2x11_vip.c1072 goto unmap; sta2x11_vip_init_one()
1128 unmap: sta2x11_vip_init_one()
1156 * unmap ioadresses
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duislib.c661 /* you must disable channel interrupts BEFORE you unmap the channel, destroy_device()
662 * because if you unmap first, there may still be some activity going destroy_device()
668 /* unmap the channel memory for the device. */ destroy_device()
/linux-4.1.27/drivers/scsi/mpt2sas/
H A Dmpt2sas_transport.c1983 goto unmap; _transport_smp_handler()
1992 goto unmap; _transport_smp_handler()
2004 goto unmap; _transport_smp_handler()
2021 goto unmap; _transport_smp_handler()
2136 unmap: _transport_smp_handler()
/linux-4.1.27/drivers/scsi/mpt3sas/
H A Dmpt3sas_transport.c1966 goto unmap; _transport_smp_handler()
1975 goto unmap; _transport_smp_handler()
1987 goto unmap; _transport_smp_handler()
2004 goto unmap; _transport_smp_handler()
2097 unmap: _transport_smp_handler()
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c121 * New-style VMX'en will also unmap guest memory, if the guest is
347 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
407 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
2151 * When a guest endpoint detaches, it will unmap and unregister the guest
2228 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", vmci_qp_broker_detach()
2456 * On hosted, when we unmap queue pairs, the VMX will also vmci_qp_broker_unmap()
2457 * unmap the guest memory, so we invalidate the previously vmci_qp_broker_unmap()

Completed in 4437 milliseconds

123