1#include <linux/err.h> 2#include <linux/pci.h> 3#include <linux/io.h> 4#include <linux/gfp.h> 5#include <linux/export.h> 6 7void devm_ioremap_release(struct device *dev, void *res) 8{ 9 iounmap(*(void __iomem **)res); 10} 11 12static int devm_ioremap_match(struct device *dev, void *res, void *match_data) 13{ 14 return *(void **)res == match_data; 15} 16 17/** 18 * devm_ioremap - Managed ioremap() 19 * @dev: Generic device to remap IO address for 20 * @offset: BUS offset to map 21 * @size: Size of map 22 * 23 * Managed ioremap(). Map is automatically unmapped on driver detach. 24 */ 25void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, 26 resource_size_t size) 27{ 28 void __iomem **ptr, *addr; 29 30 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 31 if (!ptr) 32 return NULL; 33 34 addr = ioremap(offset, size); 35 if (addr) { 36 *ptr = addr; 37 devres_add(dev, ptr); 38 } else 39 devres_free(ptr); 40 41 return addr; 42} 43EXPORT_SYMBOL(devm_ioremap); 44 45/** 46 * devm_ioremap_nocache - Managed ioremap_nocache() 47 * @dev: Generic device to remap IO address for 48 * @offset: BUS offset to map 49 * @size: Size of map 50 * 51 * Managed ioremap_nocache(). Map is automatically unmapped on driver 52 * detach. 53 */ 54void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 55 resource_size_t size) 56{ 57 void __iomem **ptr, *addr; 58 59 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 60 if (!ptr) 61 return NULL; 62 63 addr = ioremap_nocache(offset, size); 64 if (addr) { 65 *ptr = addr; 66 devres_add(dev, ptr); 67 } else 68 devres_free(ptr); 69 70 return addr; 71} 72EXPORT_SYMBOL(devm_ioremap_nocache); 73 74/** 75 * devm_ioremap_wc - Managed ioremap_wc() 76 * @dev: Generic device to remap IO address for 77 * @offset: BUS offset to map 78 * @size: Size of map 79 * 80 * Managed ioremap_wc(). Map is automatically unmapped on driver detach. 81 */ 82void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, 83 resource_size_t size) 84{ 85 void __iomem **ptr, *addr; 86 87 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 88 if (!ptr) 89 return NULL; 90 91 addr = ioremap_wc(offset, size); 92 if (addr) { 93 *ptr = addr; 94 devres_add(dev, ptr); 95 } else 96 devres_free(ptr); 97 98 return addr; 99} 100EXPORT_SYMBOL(devm_ioremap_wc); 101 102/** 103 * devm_iounmap - Managed iounmap() 104 * @dev: Generic device to unmap for 105 * @addr: Address to unmap 106 * 107 * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). 108 */ 109void devm_iounmap(struct device *dev, void __iomem *addr) 110{ 111 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, 112 (__force void *)addr)); 113 iounmap(addr); 114} 115EXPORT_SYMBOL(devm_iounmap); 116 117/** 118 * devm_ioremap_resource() - check, request region, and ioremap resource 119 * @dev: generic device to handle the resource for 120 * @res: resource to be handled 121 * 122 * Checks that a resource is a valid memory region, requests the memory region 123 * and ioremaps it either as cacheable or as non-cacheable memory depending on 124 * the resource's flags. All operations are managed and will be undone on 125 * driver detach. 126 * 127 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code 128 * on failure. Usage example: 129 * 130 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 131 * base = devm_ioremap_resource(&pdev->dev, res); 132 * if (IS_ERR(base)) 133 * return PTR_ERR(base); 134 */ 135void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) 136{ 137 resource_size_t size; 138 const char *name; 139 void __iomem *dest_ptr; 140 141 BUG_ON(!dev); 142 143 if (!res || resource_type(res) != IORESOURCE_MEM) { 144 dev_err(dev, "invalid resource\n"); 145 return IOMEM_ERR_PTR(-EINVAL); 146 } 147 148 size = resource_size(res); 149 name = res->name ?: dev_name(dev); 150 151 if (!devm_request_mem_region(dev, res->start, size, name)) { 152 dev_err(dev, "can't request region for resource %pR\n", res); 153 return IOMEM_ERR_PTR(-EBUSY); 154 } 155 156 if (res->flags & IORESOURCE_CACHEABLE) 157 dest_ptr = devm_ioremap(dev, res->start, size); 158 else 159 dest_ptr = devm_ioremap_nocache(dev, res->start, size); 160 161 if (!dest_ptr) { 162 dev_err(dev, "ioremap failed for resource %pR\n", res); 163 devm_release_mem_region(dev, res->start, size); 164 dest_ptr = IOMEM_ERR_PTR(-ENOMEM); 165 } 166 167 return dest_ptr; 168} 169EXPORT_SYMBOL(devm_ioremap_resource); 170 171#ifdef CONFIG_HAS_IOPORT_MAP 172/* 173 * Generic iomap devres 174 */ 175static void devm_ioport_map_release(struct device *dev, void *res) 176{ 177 ioport_unmap(*(void __iomem **)res); 178} 179 180static int devm_ioport_map_match(struct device *dev, void *res, 181 void *match_data) 182{ 183 return *(void **)res == match_data; 184} 185 186/** 187 * devm_ioport_map - Managed ioport_map() 188 * @dev: Generic device to map ioport for 189 * @port: Port to map 190 * @nr: Number of ports to map 191 * 192 * Managed ioport_map(). Map is automatically unmapped on driver 193 * detach. 194 */ 195void __iomem *devm_ioport_map(struct device *dev, unsigned long port, 196 unsigned int nr) 197{ 198 void __iomem **ptr, *addr; 199 200 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); 201 if (!ptr) 202 return NULL; 203 204 addr = ioport_map(port, nr); 205 if (addr) { 206 *ptr = addr; 207 devres_add(dev, ptr); 208 } else 209 devres_free(ptr); 210 211 return addr; 212} 213EXPORT_SYMBOL(devm_ioport_map); 214 215/** 216 * devm_ioport_unmap - Managed ioport_unmap() 217 * @dev: Generic device to unmap for 218 * @addr: Address to unmap 219 * 220 * Managed ioport_unmap(). @addr must have been mapped using 221 * devm_ioport_map(). 222 */ 223void devm_ioport_unmap(struct device *dev, void __iomem *addr) 224{ 225 ioport_unmap(addr); 226 WARN_ON(devres_destroy(dev, devm_ioport_map_release, 227 devm_ioport_map_match, (__force void *)addr)); 228} 229EXPORT_SYMBOL(devm_ioport_unmap); 230#endif /* CONFIG_HAS_IOPORT_MAP */ 231 232#ifdef CONFIG_PCI 233/* 234 * PCI iomap devres 235 */ 236#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE 237 238struct pcim_iomap_devres { 239 void __iomem *table[PCIM_IOMAP_MAX]; 240}; 241 242static void pcim_iomap_release(struct device *gendev, void *res) 243{ 244 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 245 struct pcim_iomap_devres *this = res; 246 int i; 247 248 for (i = 0; i < PCIM_IOMAP_MAX; i++) 249 if (this->table[i]) 250 pci_iounmap(dev, this->table[i]); 251} 252 253/** 254 * pcim_iomap_table - access iomap allocation table 255 * @pdev: PCI device to access iomap table for 256 * 257 * Access iomap allocation table for @dev. If iomap table doesn't 258 * exist and @pdev is managed, it will be allocated. All iomaps 259 * recorded in the iomap table are automatically unmapped on driver 260 * detach. 261 * 262 * This function might sleep when the table is first allocated but can 263 * be safely called without context and guaranteed to succed once 264 * allocated. 265 */ 266void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) 267{ 268 struct pcim_iomap_devres *dr, *new_dr; 269 270 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); 271 if (dr) 272 return dr->table; 273 274 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); 275 if (!new_dr) 276 return NULL; 277 dr = devres_get(&pdev->dev, new_dr, NULL, NULL); 278 return dr->table; 279} 280EXPORT_SYMBOL(pcim_iomap_table); 281 282/** 283 * pcim_iomap - Managed pcim_iomap() 284 * @pdev: PCI device to iomap for 285 * @bar: BAR to iomap 286 * @maxlen: Maximum length of iomap 287 * 288 * Managed pci_iomap(). Map is automatically unmapped on driver 289 * detach. 290 */ 291void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 292{ 293 void __iomem **tbl; 294 295 BUG_ON(bar >= PCIM_IOMAP_MAX); 296 297 tbl = (void __iomem **)pcim_iomap_table(pdev); 298 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ 299 return NULL; 300 301 tbl[bar] = pci_iomap(pdev, bar, maxlen); 302 return tbl[bar]; 303} 304EXPORT_SYMBOL(pcim_iomap); 305 306/** 307 * pcim_iounmap - Managed pci_iounmap() 308 * @pdev: PCI device to iounmap for 309 * @addr: Address to unmap 310 * 311 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). 312 */ 313void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) 314{ 315 void __iomem **tbl; 316 int i; 317 318 pci_iounmap(pdev, addr); 319 320 tbl = (void __iomem **)pcim_iomap_table(pdev); 321 BUG_ON(!tbl); 322 323 for (i = 0; i < PCIM_IOMAP_MAX; i++) 324 if (tbl[i] == addr) { 325 tbl[i] = NULL; 326 return; 327 } 328 WARN_ON(1); 329} 330EXPORT_SYMBOL(pcim_iounmap); 331 332/** 333 * pcim_iomap_regions - Request and iomap PCI BARs 334 * @pdev: PCI device to map IO resources for 335 * @mask: Mask of BARs to request and iomap 336 * @name: Name used when requesting regions 337 * 338 * Request and iomap regions specified by @mask. 339 */ 340int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) 341{ 342 void __iomem * const *iomap; 343 int i, rc; 344 345 iomap = pcim_iomap_table(pdev); 346 if (!iomap) 347 return -ENOMEM; 348 349 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 350 unsigned long len; 351 352 if (!(mask & (1 << i))) 353 continue; 354 355 rc = -EINVAL; 356 len = pci_resource_len(pdev, i); 357 if (!len) 358 goto err_inval; 359 360 rc = pci_request_region(pdev, i, name); 361 if (rc) 362 goto err_inval; 363 364 rc = -ENOMEM; 365 if (!pcim_iomap(pdev, i, 0)) 366 goto err_region; 367 } 368 369 return 0; 370 371 err_region: 372 pci_release_region(pdev, i); 373 err_inval: 374 while (--i >= 0) { 375 if (!(mask & (1 << i))) 376 continue; 377 pcim_iounmap(pdev, iomap[i]); 378 pci_release_region(pdev, i); 379 } 380 381 return rc; 382} 383EXPORT_SYMBOL(pcim_iomap_regions); 384 385/** 386 * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones 387 * @pdev: PCI device to map IO resources for 388 * @mask: Mask of BARs to iomap 389 * @name: Name used when requesting regions 390 * 391 * Request all PCI BARs and iomap regions specified by @mask. 392 */ 393int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, 394 const char *name) 395{ 396 int request_mask = ((1 << 6) - 1) & ~mask; 397 int rc; 398 399 rc = pci_request_selected_regions(pdev, request_mask, name); 400 if (rc) 401 return rc; 402 403 rc = pcim_iomap_regions(pdev, mask, name); 404 if (rc) 405 pci_release_selected_regions(pdev, request_mask); 406 return rc; 407} 408EXPORT_SYMBOL(pcim_iomap_regions_request_all); 409 410/** 411 * pcim_iounmap_regions - Unmap and release PCI BARs 412 * @pdev: PCI device to map IO resources for 413 * @mask: Mask of BARs to unmap and release 414 * 415 * Unmap and release regions specified by @mask. 416 */ 417void pcim_iounmap_regions(struct pci_dev *pdev, int mask) 418{ 419 void __iomem * const *iomap; 420 int i; 421 422 iomap = pcim_iomap_table(pdev); 423 if (!iomap) 424 return; 425 426 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 427 if (!(mask & (1 << i))) 428 continue; 429 430 pcim_iounmap(pdev, iomap[i]); 431 pci_release_region(pdev, i); 432 } 433} 434EXPORT_SYMBOL(pcim_iounmap_regions); 435#endif /* CONFIG_PCI */ 436