1 #include <linux/mm.h>
2 #include <linux/init.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/scatterlist.h>
5 #include <linux/swiotlb.h>
6 #include <linux/bootmem.h>
7 
8 #include <asm/bootinfo.h>
9 #include <boot_param.h>
10 #include <dma-coherence.h>
11 
loongson_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,struct dma_attrs * attrs)12 static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
13 		dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
14 {
15 	void *ret;
16 
17 	/* ignore region specifiers */
18 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
19 
20 #ifdef CONFIG_ISA
21 	if (dev == NULL)
22 		gfp |= __GFP_DMA;
23 	else
24 #endif
25 #ifdef CONFIG_ZONE_DMA
26 	if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
27 		gfp |= __GFP_DMA;
28 	else
29 #endif
30 #ifdef CONFIG_ZONE_DMA32
31 	if (dev->coherent_dma_mask < DMA_BIT_MASK(40))
32 		gfp |= __GFP_DMA32;
33 	else
34 #endif
35 	;
36 	gfp |= __GFP_NORETRY;
37 
38 	ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
39 	mb();
40 	return ret;
41 }
42 
loongson_dma_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,struct dma_attrs * attrs)43 static void loongson_dma_free_coherent(struct device *dev, size_t size,
44 		void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
45 {
46 	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
47 }
48 
loongson_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)49 static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
50 				unsigned long offset, size_t size,
51 				enum dma_data_direction dir,
52 				struct dma_attrs *attrs)
53 {
54 	dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
55 					dir, attrs);
56 	mb();
57 	return daddr;
58 }
59 
loongson_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)60 static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
61 				int nents, enum dma_data_direction dir,
62 				struct dma_attrs *attrs)
63 {
64 	int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
65 	mb();
66 
67 	return r;
68 }
69 
loongson_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)70 static void loongson_dma_sync_single_for_device(struct device *dev,
71 				dma_addr_t dma_handle, size_t size,
72 				enum dma_data_direction dir)
73 {
74 	swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
75 	mb();
76 }
77 
loongson_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)78 static void loongson_dma_sync_sg_for_device(struct device *dev,
79 				struct scatterlist *sg, int nents,
80 				enum dma_data_direction dir)
81 {
82 	swiotlb_sync_sg_for_device(dev, sg, nents, dir);
83 	mb();
84 }
85 
loongson_dma_set_mask(struct device * dev,u64 mask)86 static int loongson_dma_set_mask(struct device *dev, u64 mask)
87 {
88 	if (!dev->dma_mask || !dma_supported(dev, mask))
89 		return -EIO;
90 
91 	if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
92 		*dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
93 		return -EIO;
94 	}
95 
96 	*dev->dma_mask = mask;
97 
98 	return 0;
99 }
100 
phys_to_dma(struct device * dev,phys_addr_t paddr)101 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
102 {
103 	long nid;
104 #ifdef CONFIG_PHYS48_TO_HT40
105 	/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
106 	 * Loongson-3's 48bit address space and embed it into 40bit */
107 	nid = (paddr >> 44) & 0x3;
108 	paddr = ((nid << 44) ^ paddr) | (nid << 37);
109 #endif
110 	return paddr;
111 }
112 
dma_to_phys(struct device * dev,dma_addr_t daddr)113 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
114 {
115 	long nid;
116 #ifdef CONFIG_PHYS48_TO_HT40
117 	/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
118 	 * Loongson-3's 48bit address space and embed it into 40bit */
119 	nid = (daddr >> 37) & 0x3;
120 	daddr = ((nid << 37) ^ daddr) | (nid << 44);
121 #endif
122 	return daddr;
123 }
124 
125 static struct dma_map_ops loongson_dma_map_ops = {
126 	.alloc = loongson_dma_alloc_coherent,
127 	.free = loongson_dma_free_coherent,
128 	.map_page = loongson_dma_map_page,
129 	.unmap_page = swiotlb_unmap_page,
130 	.map_sg = loongson_dma_map_sg,
131 	.unmap_sg = swiotlb_unmap_sg_attrs,
132 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
133 	.sync_single_for_device = loongson_dma_sync_single_for_device,
134 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
135 	.sync_sg_for_device = loongson_dma_sync_sg_for_device,
136 	.mapping_error = swiotlb_dma_mapping_error,
137 	.dma_supported = swiotlb_dma_supported,
138 	.set_dma_mask = loongson_dma_set_mask
139 };
140 
plat_swiotlb_setup(void)141 void __init plat_swiotlb_setup(void)
142 {
143 	swiotlb_init(1);
144 	mips_dma_map_ops = &loongson_dma_map_ops;
145 }
146