1/* 2 * firmware_class.c - Multi purpose firmware loading support 3 * 4 * Copyright (c) 2003 Manuel Estrada Sainz 5 * 6 * Please see Documentation/firmware_class/ for more information. 7 * 8 */ 9 10#include <linux/capability.h> 11#include <linux/device.h> 12#include <linux/module.h> 13#include <linux/init.h> 14#include <linux/timer.h> 15#include <linux/vmalloc.h> 16#include <linux/interrupt.h> 17#include <linux/bitops.h> 18#include <linux/mutex.h> 19#include <linux/workqueue.h> 20#include <linux/highmem.h> 21#include <linux/firmware.h> 22#include <linux/slab.h> 23#include <linux/sched.h> 24#include <linux/file.h> 25#include <linux/list.h> 26#include <linux/async.h> 27#include <linux/pm.h> 28#include <linux/suspend.h> 29#include <linux/syscore_ops.h> 30#include <linux/reboot.h> 31#include <linux/security.h> 32 33#include <generated/utsrelease.h> 34 35#include "base.h" 36 37MODULE_AUTHOR("Manuel Estrada Sainz"); 38MODULE_DESCRIPTION("Multi purpose firmware loading support"); 39MODULE_LICENSE("GPL"); 40 41/* Builtin firmware support */ 42 43#ifdef CONFIG_FW_LOADER 44 45extern struct builtin_fw __start_builtin_fw[]; 46extern struct builtin_fw __end_builtin_fw[]; 47 48static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) 49{ 50 struct builtin_fw *b_fw; 51 52 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { 53 if (strcmp(name, b_fw->name) == 0) { 54 fw->size = b_fw->size; 55 fw->data = b_fw->data; 56 return true; 57 } 58 } 59 60 return false; 61} 62 63static bool fw_is_builtin_firmware(const struct firmware *fw) 64{ 65 struct builtin_fw *b_fw; 66 67 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) 68 if (fw->data == b_fw->data) 69 return true; 70 71 return false; 72} 73 74#else /* Module case - no builtin firmware support */ 75 76static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) 77{ 78 return false; 79} 80 81static inline bool fw_is_builtin_firmware(const struct firmware *fw) 82{ 83 return false; 84} 85#endif 86 87enum { 88 FW_STATUS_LOADING, 89 FW_STATUS_DONE, 90 FW_STATUS_ABORT, 91}; 92 93static int loading_timeout = 60; /* In seconds */ 94 95static inline long firmware_loading_timeout(void) 96{ 97 return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET; 98} 99 100/* firmware behavior options */ 101#define FW_OPT_UEVENT (1U << 0) 102#define FW_OPT_NOWAIT (1U << 1) 103#ifdef CONFIG_FW_LOADER_USER_HELPER 104#define FW_OPT_USERHELPER (1U << 2) 105#else 106#define FW_OPT_USERHELPER 0 107#endif 108#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK 109#define FW_OPT_FALLBACK FW_OPT_USERHELPER 110#else 111#define FW_OPT_FALLBACK 0 112#endif 113#define FW_OPT_NO_WARN (1U << 3) 114 115struct firmware_cache { 116 /* firmware_buf instance will be added into the below list */ 117 spinlock_t lock; 118 struct list_head head; 119 int state; 120 121#ifdef CONFIG_PM_SLEEP 122 /* 123 * Names of firmware images which have been cached successfully 124 * will be added into the below list so that device uncache 125 * helper can trace which firmware images have been cached 126 * before. 127 */ 128 spinlock_t name_lock; 129 struct list_head fw_names; 130 131 struct delayed_work work; 132 133 struct notifier_block pm_notify; 134#endif 135}; 136 137struct firmware_buf { 138 struct kref ref; 139 struct list_head list; 140 struct completion completion; 141 struct firmware_cache *fwc; 142 unsigned long status; 143 void *data; 144 size_t size; 145#ifdef CONFIG_FW_LOADER_USER_HELPER 146 bool is_paged_buf; 147 bool need_uevent; 148 struct page **pages; 149 int nr_pages; 150 int page_array_size; 151 struct list_head pending_list; 152#endif 153 char fw_id[]; 154}; 155 156struct fw_cache_entry { 157 struct list_head list; 158 char name[]; 159}; 160 161struct fw_name_devm { 162 unsigned long magic; 163 char name[]; 164}; 165 166#define to_fwbuf(d) container_of(d, struct firmware_buf, ref) 167 168#define FW_LOADER_NO_CACHE 0 169#define FW_LOADER_START_CACHE 1 170 171static int fw_cache_piggyback_on_request(const char *name); 172 173/* fw_lock could be moved to 'struct firmware_priv' but since it is just 174 * guarding for corner cases a global lock should be OK */ 175static DEFINE_MUTEX(fw_lock); 176 177static struct firmware_cache fw_cache; 178 179static struct firmware_buf *__allocate_fw_buf(const char *fw_name, 180 struct firmware_cache *fwc) 181{ 182 struct firmware_buf *buf; 183 184 buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1, GFP_ATOMIC); 185 186 if (!buf) 187 return buf; 188 189 kref_init(&buf->ref); 190 strcpy(buf->fw_id, fw_name); 191 buf->fwc = fwc; 192 init_completion(&buf->completion); 193#ifdef CONFIG_FW_LOADER_USER_HELPER 194 INIT_LIST_HEAD(&buf->pending_list); 195#endif 196 197 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); 198 199 return buf; 200} 201 202static struct firmware_buf *__fw_lookup_buf(const char *fw_name) 203{ 204 struct firmware_buf *tmp; 205 struct firmware_cache *fwc = &fw_cache; 206 207 list_for_each_entry(tmp, &fwc->head, list) 208 if (!strcmp(tmp->fw_id, fw_name)) 209 return tmp; 210 return NULL; 211} 212 213static int fw_lookup_and_allocate_buf(const char *fw_name, 214 struct firmware_cache *fwc, 215 struct firmware_buf **buf) 216{ 217 struct firmware_buf *tmp; 218 219 spin_lock(&fwc->lock); 220 tmp = __fw_lookup_buf(fw_name); 221 if (tmp) { 222 kref_get(&tmp->ref); 223 spin_unlock(&fwc->lock); 224 *buf = tmp; 225 return 1; 226 } 227 tmp = __allocate_fw_buf(fw_name, fwc); 228 if (tmp) 229 list_add(&tmp->list, &fwc->head); 230 spin_unlock(&fwc->lock); 231 232 *buf = tmp; 233 234 return tmp ? 0 : -ENOMEM; 235} 236 237static void __fw_free_buf(struct kref *ref) 238 __releases(&fwc->lock) 239{ 240 struct firmware_buf *buf = to_fwbuf(ref); 241 struct firmware_cache *fwc = buf->fwc; 242 243 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", 244 __func__, buf->fw_id, buf, buf->data, 245 (unsigned int)buf->size); 246 247 list_del(&buf->list); 248 spin_unlock(&fwc->lock); 249 250#ifdef CONFIG_FW_LOADER_USER_HELPER 251 if (buf->is_paged_buf) { 252 int i; 253 vunmap(buf->data); 254 for (i = 0; i < buf->nr_pages; i++) 255 __free_page(buf->pages[i]); 256 kfree(buf->pages); 257 } else 258#endif 259 vfree(buf->data); 260 kfree(buf); 261} 262 263static void fw_free_buf(struct firmware_buf *buf) 264{ 265 struct firmware_cache *fwc = buf->fwc; 266 spin_lock(&fwc->lock); 267 if (!kref_put(&buf->ref, __fw_free_buf)) 268 spin_unlock(&fwc->lock); 269} 270 271/* direct firmware loading support */ 272static char fw_path_para[256]; 273static const char * const fw_path[] = { 274 fw_path_para, 275 "/lib/firmware/updates/" UTS_RELEASE, 276 "/lib/firmware/updates", 277 "/lib/firmware/" UTS_RELEASE, 278 "/lib/firmware" 279}; 280 281/* 282 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH' 283 * from kernel command line because firmware_class is generally built in 284 * kernel instead of module. 285 */ 286module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); 287MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); 288 289static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) 290{ 291 int size; 292 char *buf; 293 int rc; 294 295 if (!S_ISREG(file_inode(file)->i_mode)) 296 return -EINVAL; 297 size = i_size_read(file_inode(file)); 298 if (size <= 0) 299 return -EINVAL; 300 buf = vmalloc(size); 301 if (!buf) 302 return -ENOMEM; 303 rc = kernel_read(file, 0, buf, size); 304 if (rc != size) { 305 if (rc > 0) 306 rc = -EIO; 307 goto fail; 308 } 309 rc = security_kernel_fw_from_file(file, buf, size); 310 if (rc) 311 goto fail; 312 fw_buf->data = buf; 313 fw_buf->size = size; 314 return 0; 315fail: 316 vfree(buf); 317 return rc; 318} 319 320static int fw_get_filesystem_firmware(struct device *device, 321 struct firmware_buf *buf) 322{ 323 int i; 324 int rc = -ENOENT; 325 char *path = __getname(); 326 327 for (i = 0; i < ARRAY_SIZE(fw_path); i++) { 328 struct file *file; 329 330 /* skip the unset customized path */ 331 if (!fw_path[i][0]) 332 continue; 333 334 snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id); 335 336 file = filp_open(path, O_RDONLY, 0); 337 if (IS_ERR(file)) 338 continue; 339 rc = fw_read_file_contents(file, buf); 340 fput(file); 341 if (rc) 342 dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n", 343 path, rc); 344 else 345 break; 346 } 347 __putname(path); 348 349 if (!rc) { 350 dev_dbg(device, "firmware: direct-loading firmware %s\n", 351 buf->fw_id); 352 mutex_lock(&fw_lock); 353 set_bit(FW_STATUS_DONE, &buf->status); 354 complete_all(&buf->completion); 355 mutex_unlock(&fw_lock); 356 } 357 358 return rc; 359} 360 361/* firmware holds the ownership of pages */ 362static void firmware_free_data(const struct firmware *fw) 363{ 364 /* Loaded directly? */ 365 if (!fw->priv) { 366 vfree(fw->data); 367 return; 368 } 369 fw_free_buf(fw->priv); 370} 371 372/* store the pages buffer info firmware from buf */ 373static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) 374{ 375 fw->priv = buf; 376#ifdef CONFIG_FW_LOADER_USER_HELPER 377 fw->pages = buf->pages; 378#endif 379 fw->size = buf->size; 380 fw->data = buf->data; 381 382 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", 383 __func__, buf->fw_id, buf, buf->data, 384 (unsigned int)buf->size); 385} 386 387#ifdef CONFIG_PM_SLEEP 388static void fw_name_devm_release(struct device *dev, void *res) 389{ 390 struct fw_name_devm *fwn = res; 391 392 if (fwn->magic == (unsigned long)&fw_cache) 393 pr_debug("%s: fw_name-%s devm-%p released\n", 394 __func__, fwn->name, res); 395} 396 397static int fw_devm_match(struct device *dev, void *res, 398 void *match_data) 399{ 400 struct fw_name_devm *fwn = res; 401 402 return (fwn->magic == (unsigned long)&fw_cache) && 403 !strcmp(fwn->name, match_data); 404} 405 406static struct fw_name_devm *fw_find_devm_name(struct device *dev, 407 const char *name) 408{ 409 struct fw_name_devm *fwn; 410 411 fwn = devres_find(dev, fw_name_devm_release, 412 fw_devm_match, (void *)name); 413 return fwn; 414} 415 416/* add firmware name into devres list */ 417static int fw_add_devm_name(struct device *dev, const char *name) 418{ 419 struct fw_name_devm *fwn; 420 421 fwn = fw_find_devm_name(dev, name); 422 if (fwn) 423 return 1; 424 425 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) + 426 strlen(name) + 1, GFP_KERNEL); 427 if (!fwn) 428 return -ENOMEM; 429 430 fwn->magic = (unsigned long)&fw_cache; 431 strcpy(fwn->name, name); 432 devres_add(dev, fwn); 433 434 return 0; 435} 436#else 437static int fw_add_devm_name(struct device *dev, const char *name) 438{ 439 return 0; 440} 441#endif 442 443 444/* 445 * user-mode helper code 446 */ 447#ifdef CONFIG_FW_LOADER_USER_HELPER 448struct firmware_priv { 449 bool nowait; 450 struct device dev; 451 struct firmware_buf *buf; 452 struct firmware *fw; 453}; 454 455static struct firmware_priv *to_firmware_priv(struct device *dev) 456{ 457 return container_of(dev, struct firmware_priv, dev); 458} 459 460static void __fw_load_abort(struct firmware_buf *buf) 461{ 462 /* 463 * There is a small window in which user can write to 'loading' 464 * between loading done and disappearance of 'loading' 465 */ 466 if (test_bit(FW_STATUS_DONE, &buf->status)) 467 return; 468 469 list_del_init(&buf->pending_list); 470 set_bit(FW_STATUS_ABORT, &buf->status); 471 complete_all(&buf->completion); 472} 473 474static void fw_load_abort(struct firmware_priv *fw_priv) 475{ 476 struct firmware_buf *buf = fw_priv->buf; 477 478 __fw_load_abort(buf); 479 480 /* avoid user action after loading abort */ 481 fw_priv->buf = NULL; 482} 483 484#define is_fw_load_aborted(buf) \ 485 test_bit(FW_STATUS_ABORT, &(buf)->status) 486 487static LIST_HEAD(pending_fw_head); 488 489/* reboot notifier for avoid deadlock with usermode_lock */ 490static int fw_shutdown_notify(struct notifier_block *unused1, 491 unsigned long unused2, void *unused3) 492{ 493 mutex_lock(&fw_lock); 494 while (!list_empty(&pending_fw_head)) 495 __fw_load_abort(list_first_entry(&pending_fw_head, 496 struct firmware_buf, 497 pending_list)); 498 mutex_unlock(&fw_lock); 499 return NOTIFY_DONE; 500} 501 502static struct notifier_block fw_shutdown_nb = { 503 .notifier_call = fw_shutdown_notify, 504}; 505 506static ssize_t timeout_show(struct class *class, struct class_attribute *attr, 507 char *buf) 508{ 509 return sprintf(buf, "%d\n", loading_timeout); 510} 511 512/** 513 * firmware_timeout_store - set number of seconds to wait for firmware 514 * @class: device class pointer 515 * @attr: device attribute pointer 516 * @buf: buffer to scan for timeout value 517 * @count: number of bytes in @buf 518 * 519 * Sets the number of seconds to wait for the firmware. Once 520 * this expires an error will be returned to the driver and no 521 * firmware will be provided. 522 * 523 * Note: zero means 'wait forever'. 524 **/ 525static ssize_t timeout_store(struct class *class, struct class_attribute *attr, 526 const char *buf, size_t count) 527{ 528 loading_timeout = simple_strtol(buf, NULL, 10); 529 if (loading_timeout < 0) 530 loading_timeout = 0; 531 532 return count; 533} 534 535static struct class_attribute firmware_class_attrs[] = { 536 __ATTR_RW(timeout), 537 __ATTR_NULL 538}; 539 540static void fw_dev_release(struct device *dev) 541{ 542 struct firmware_priv *fw_priv = to_firmware_priv(dev); 543 544 kfree(fw_priv); 545} 546 547static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env) 548{ 549 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) 550 return -ENOMEM; 551 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) 552 return -ENOMEM; 553 if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait)) 554 return -ENOMEM; 555 556 return 0; 557} 558 559static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 560{ 561 struct firmware_priv *fw_priv = to_firmware_priv(dev); 562 int err = 0; 563 564 mutex_lock(&fw_lock); 565 if (fw_priv->buf) 566 err = do_firmware_uevent(fw_priv, env); 567 mutex_unlock(&fw_lock); 568 return err; 569} 570 571static struct class firmware_class = { 572 .name = "firmware", 573 .class_attrs = firmware_class_attrs, 574 .dev_uevent = firmware_uevent, 575 .dev_release = fw_dev_release, 576}; 577 578static ssize_t firmware_loading_show(struct device *dev, 579 struct device_attribute *attr, char *buf) 580{ 581 struct firmware_priv *fw_priv = to_firmware_priv(dev); 582 int loading = 0; 583 584 mutex_lock(&fw_lock); 585 if (fw_priv->buf) 586 loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); 587 mutex_unlock(&fw_lock); 588 589 return sprintf(buf, "%d\n", loading); 590} 591 592/* Some architectures don't have PAGE_KERNEL_RO */ 593#ifndef PAGE_KERNEL_RO 594#define PAGE_KERNEL_RO PAGE_KERNEL 595#endif 596 597/* one pages buffer should be mapped/unmapped only once */ 598static int fw_map_pages_buf(struct firmware_buf *buf) 599{ 600 if (!buf->is_paged_buf) 601 return 0; 602 603 vunmap(buf->data); 604 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); 605 if (!buf->data) 606 return -ENOMEM; 607 return 0; 608} 609 610/** 611 * firmware_loading_store - set value in the 'loading' control file 612 * @dev: device pointer 613 * @attr: device attribute pointer 614 * @buf: buffer to scan for loading control value 615 * @count: number of bytes in @buf 616 * 617 * The relevant values are: 618 * 619 * 1: Start a load, discarding any previous partial load. 620 * 0: Conclude the load and hand the data to the driver code. 621 * -1: Conclude the load with an error and discard any written data. 622 **/ 623static ssize_t firmware_loading_store(struct device *dev, 624 struct device_attribute *attr, 625 const char *buf, size_t count) 626{ 627 struct firmware_priv *fw_priv = to_firmware_priv(dev); 628 struct firmware_buf *fw_buf; 629 ssize_t written = count; 630 int loading = simple_strtol(buf, NULL, 10); 631 int i; 632 633 mutex_lock(&fw_lock); 634 fw_buf = fw_priv->buf; 635 if (!fw_buf) 636 goto out; 637 638 switch (loading) { 639 case 1: 640 /* discarding any previous partial load */ 641 if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { 642 for (i = 0; i < fw_buf->nr_pages; i++) 643 __free_page(fw_buf->pages[i]); 644 kfree(fw_buf->pages); 645 fw_buf->pages = NULL; 646 fw_buf->page_array_size = 0; 647 fw_buf->nr_pages = 0; 648 set_bit(FW_STATUS_LOADING, &fw_buf->status); 649 } 650 break; 651 case 0: 652 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) { 653 int rc; 654 655 set_bit(FW_STATUS_DONE, &fw_buf->status); 656 clear_bit(FW_STATUS_LOADING, &fw_buf->status); 657 658 /* 659 * Several loading requests may be pending on 660 * one same firmware buf, so let all requests 661 * see the mapped 'buf->data' once the loading 662 * is completed. 663 * */ 664 rc = fw_map_pages_buf(fw_buf); 665 if (rc) 666 dev_err(dev, "%s: map pages failed\n", 667 __func__); 668 else 669 rc = security_kernel_fw_from_file(NULL, 670 fw_buf->data, fw_buf->size); 671 672 /* 673 * Same logic as fw_load_abort, only the DONE bit 674 * is ignored and we set ABORT only on failure. 675 */ 676 list_del_init(&fw_buf->pending_list); 677 if (rc) { 678 set_bit(FW_STATUS_ABORT, &fw_buf->status); 679 written = rc; 680 } 681 complete_all(&fw_buf->completion); 682 break; 683 } 684 /* fallthrough */ 685 default: 686 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 687 /* fallthrough */ 688 case -1: 689 fw_load_abort(fw_priv); 690 break; 691 } 692out: 693 mutex_unlock(&fw_lock); 694 return written; 695} 696 697static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); 698 699static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, 700 struct bin_attribute *bin_attr, 701 char *buffer, loff_t offset, size_t count) 702{ 703 struct device *dev = kobj_to_dev(kobj); 704 struct firmware_priv *fw_priv = to_firmware_priv(dev); 705 struct firmware_buf *buf; 706 ssize_t ret_count; 707 708 mutex_lock(&fw_lock); 709 buf = fw_priv->buf; 710 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) { 711 ret_count = -ENODEV; 712 goto out; 713 } 714 if (offset > buf->size) { 715 ret_count = 0; 716 goto out; 717 } 718 if (count > buf->size - offset) 719 count = buf->size - offset; 720 721 ret_count = count; 722 723 while (count) { 724 void *page_data; 725 int page_nr = offset >> PAGE_SHIFT; 726 int page_ofs = offset & (PAGE_SIZE-1); 727 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 728 729 page_data = kmap(buf->pages[page_nr]); 730 731 memcpy(buffer, page_data + page_ofs, page_cnt); 732 733 kunmap(buf->pages[page_nr]); 734 buffer += page_cnt; 735 offset += page_cnt; 736 count -= page_cnt; 737 } 738out: 739 mutex_unlock(&fw_lock); 740 return ret_count; 741} 742 743static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 744{ 745 struct firmware_buf *buf = fw_priv->buf; 746 int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT; 747 748 /* If the array of pages is too small, grow it... */ 749 if (buf->page_array_size < pages_needed) { 750 int new_array_size = max(pages_needed, 751 buf->page_array_size * 2); 752 struct page **new_pages; 753 754 new_pages = kmalloc(new_array_size * sizeof(void *), 755 GFP_KERNEL); 756 if (!new_pages) { 757 fw_load_abort(fw_priv); 758 return -ENOMEM; 759 } 760 memcpy(new_pages, buf->pages, 761 buf->page_array_size * sizeof(void *)); 762 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * 763 (new_array_size - buf->page_array_size)); 764 kfree(buf->pages); 765 buf->pages = new_pages; 766 buf->page_array_size = new_array_size; 767 } 768 769 while (buf->nr_pages < pages_needed) { 770 buf->pages[buf->nr_pages] = 771 alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 772 773 if (!buf->pages[buf->nr_pages]) { 774 fw_load_abort(fw_priv); 775 return -ENOMEM; 776 } 777 buf->nr_pages++; 778 } 779 return 0; 780} 781 782/** 783 * firmware_data_write - write method for firmware 784 * @filp: open sysfs file 785 * @kobj: kobject for the device 786 * @bin_attr: bin_attr structure 787 * @buffer: buffer being written 788 * @offset: buffer offset for write in total data store area 789 * @count: buffer size 790 * 791 * Data written to the 'data' attribute will be later handed to 792 * the driver as a firmware image. 793 **/ 794static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, 795 struct bin_attribute *bin_attr, 796 char *buffer, loff_t offset, size_t count) 797{ 798 struct device *dev = kobj_to_dev(kobj); 799 struct firmware_priv *fw_priv = to_firmware_priv(dev); 800 struct firmware_buf *buf; 801 ssize_t retval; 802 803 if (!capable(CAP_SYS_RAWIO)) 804 return -EPERM; 805 806 mutex_lock(&fw_lock); 807 buf = fw_priv->buf; 808 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) { 809 retval = -ENODEV; 810 goto out; 811 } 812 813 retval = fw_realloc_buffer(fw_priv, offset + count); 814 if (retval) 815 goto out; 816 817 retval = count; 818 819 while (count) { 820 void *page_data; 821 int page_nr = offset >> PAGE_SHIFT; 822 int page_ofs = offset & (PAGE_SIZE - 1); 823 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 824 825 page_data = kmap(buf->pages[page_nr]); 826 827 memcpy(page_data + page_ofs, buffer, page_cnt); 828 829 kunmap(buf->pages[page_nr]); 830 buffer += page_cnt; 831 offset += page_cnt; 832 count -= page_cnt; 833 } 834 835 buf->size = max_t(size_t, offset, buf->size); 836out: 837 mutex_unlock(&fw_lock); 838 return retval; 839} 840 841static struct bin_attribute firmware_attr_data = { 842 .attr = { .name = "data", .mode = 0644 }, 843 .size = 0, 844 .read = firmware_data_read, 845 .write = firmware_data_write, 846}; 847 848static struct attribute *fw_dev_attrs[] = { 849 &dev_attr_loading.attr, 850 NULL 851}; 852 853static struct bin_attribute *fw_dev_bin_attrs[] = { 854 &firmware_attr_data, 855 NULL 856}; 857 858static const struct attribute_group fw_dev_attr_group = { 859 .attrs = fw_dev_attrs, 860 .bin_attrs = fw_dev_bin_attrs, 861}; 862 863static const struct attribute_group *fw_dev_attr_groups[] = { 864 &fw_dev_attr_group, 865 NULL 866}; 867 868static struct firmware_priv * 869fw_create_instance(struct firmware *firmware, const char *fw_name, 870 struct device *device, unsigned int opt_flags) 871{ 872 struct firmware_priv *fw_priv; 873 struct device *f_dev; 874 875 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); 876 if (!fw_priv) { 877 fw_priv = ERR_PTR(-ENOMEM); 878 goto exit; 879 } 880 881 fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT); 882 fw_priv->fw = firmware; 883 f_dev = &fw_priv->dev; 884 885 device_initialize(f_dev); 886 dev_set_name(f_dev, "%s", fw_name); 887 f_dev->parent = device; 888 f_dev->class = &firmware_class; 889 f_dev->groups = fw_dev_attr_groups; 890exit: 891 return fw_priv; 892} 893 894/* load a firmware via user helper */ 895static int _request_firmware_load(struct firmware_priv *fw_priv, 896 unsigned int opt_flags, long timeout) 897{ 898 int retval = 0; 899 struct device *f_dev = &fw_priv->dev; 900 struct firmware_buf *buf = fw_priv->buf; 901 902 /* fall back on userspace loading */ 903 buf->is_paged_buf = true; 904 905 dev_set_uevent_suppress(f_dev, true); 906 907 retval = device_add(f_dev); 908 if (retval) { 909 dev_err(f_dev, "%s: device_register failed\n", __func__); 910 goto err_put_dev; 911 } 912 913 mutex_lock(&fw_lock); 914 list_add(&buf->pending_list, &pending_fw_head); 915 mutex_unlock(&fw_lock); 916 917 if (opt_flags & FW_OPT_UEVENT) { 918 buf->need_uevent = true; 919 dev_set_uevent_suppress(f_dev, false); 920 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); 921 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); 922 } else { 923 timeout = MAX_JIFFY_OFFSET; 924 } 925 926 retval = wait_for_completion_interruptible_timeout(&buf->completion, 927 timeout); 928 if (retval == -ERESTARTSYS || !retval) { 929 mutex_lock(&fw_lock); 930 fw_load_abort(fw_priv); 931 mutex_unlock(&fw_lock); 932 } else if (retval > 0) { 933 retval = 0; 934 } 935 936 if (is_fw_load_aborted(buf)) 937 retval = -EAGAIN; 938 else if (!buf->data) 939 retval = -ENOMEM; 940 941 device_del(f_dev); 942err_put_dev: 943 put_device(f_dev); 944 return retval; 945} 946 947static int fw_load_from_user_helper(struct firmware *firmware, 948 const char *name, struct device *device, 949 unsigned int opt_flags, long timeout) 950{ 951 struct firmware_priv *fw_priv; 952 953 fw_priv = fw_create_instance(firmware, name, device, opt_flags); 954 if (IS_ERR(fw_priv)) 955 return PTR_ERR(fw_priv); 956 957 fw_priv->buf = firmware->priv; 958 return _request_firmware_load(fw_priv, opt_flags, timeout); 959} 960 961#ifdef CONFIG_PM_SLEEP 962/* kill pending requests without uevent to avoid blocking suspend */ 963static void kill_requests_without_uevent(void) 964{ 965 struct firmware_buf *buf; 966 struct firmware_buf *next; 967 968 mutex_lock(&fw_lock); 969 list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) { 970 if (!buf->need_uevent) 971 __fw_load_abort(buf); 972 } 973 mutex_unlock(&fw_lock); 974} 975#endif 976 977#else /* CONFIG_FW_LOADER_USER_HELPER */ 978static inline int 979fw_load_from_user_helper(struct firmware *firmware, const char *name, 980 struct device *device, unsigned int opt_flags, 981 long timeout) 982{ 983 return -ENOENT; 984} 985 986/* No abort during direct loading */ 987#define is_fw_load_aborted(buf) false 988 989#ifdef CONFIG_PM_SLEEP 990static inline void kill_requests_without_uevent(void) { } 991#endif 992 993#endif /* CONFIG_FW_LOADER_USER_HELPER */ 994 995 996/* wait until the shared firmware_buf becomes ready (or error) */ 997static int sync_cached_firmware_buf(struct firmware_buf *buf) 998{ 999 int ret = 0; 1000 1001 mutex_lock(&fw_lock); 1002 while (!test_bit(FW_STATUS_DONE, &buf->status)) { 1003 if (is_fw_load_aborted(buf)) { 1004 ret = -ENOENT; 1005 break; 1006 } 1007 mutex_unlock(&fw_lock); 1008 ret = wait_for_completion_interruptible(&buf->completion); 1009 mutex_lock(&fw_lock); 1010 } 1011 mutex_unlock(&fw_lock); 1012 return ret; 1013} 1014 1015/* prepare firmware and firmware_buf structs; 1016 * return 0 if a firmware is already assigned, 1 if need to load one, 1017 * or a negative error code 1018 */ 1019static int 1020_request_firmware_prepare(struct firmware **firmware_p, const char *name, 1021 struct device *device) 1022{ 1023 struct firmware *firmware; 1024 struct firmware_buf *buf; 1025 int ret; 1026 1027 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 1028 if (!firmware) { 1029 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 1030 __func__); 1031 return -ENOMEM; 1032 } 1033 1034 if (fw_get_builtin_firmware(firmware, name)) { 1035 dev_dbg(device, "firmware: using built-in firmware %s\n", name); 1036 return 0; /* assigned */ 1037 } 1038 1039 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); 1040 1041 /* 1042 * bind with 'buf' now to avoid warning in failure path 1043 * of requesting firmware. 1044 */ 1045 firmware->priv = buf; 1046 1047 if (ret > 0) { 1048 ret = sync_cached_firmware_buf(buf); 1049 if (!ret) { 1050 fw_set_page_data(buf, firmware); 1051 return 0; /* assigned */ 1052 } 1053 } 1054 1055 if (ret < 0) 1056 return ret; 1057 return 1; /* need to load */ 1058} 1059 1060static int assign_firmware_buf(struct firmware *fw, struct device *device, 1061 unsigned int opt_flags) 1062{ 1063 struct firmware_buf *buf = fw->priv; 1064 1065 mutex_lock(&fw_lock); 1066 if (!buf->size || is_fw_load_aborted(buf)) { 1067 mutex_unlock(&fw_lock); 1068 return -ENOENT; 1069 } 1070 1071 /* 1072 * add firmware name into devres list so that we can auto cache 1073 * and uncache firmware for device. 1074 * 1075 * device may has been deleted already, but the problem 1076 * should be fixed in devres or driver core. 1077 */ 1078 /* don't cache firmware handled without uevent */ 1079 if (device && (opt_flags & FW_OPT_UEVENT)) 1080 fw_add_devm_name(device, buf->fw_id); 1081 1082 /* 1083 * After caching firmware image is started, let it piggyback 1084 * on request firmware. 1085 */ 1086 if (buf->fwc->state == FW_LOADER_START_CACHE) { 1087 if (fw_cache_piggyback_on_request(buf->fw_id)) 1088 kref_get(&buf->ref); 1089 } 1090 1091 /* pass the pages buffer to driver at the last minute */ 1092 fw_set_page_data(buf, fw); 1093 mutex_unlock(&fw_lock); 1094 return 0; 1095} 1096 1097/* called from request_firmware() and request_firmware_work_func() */ 1098static int 1099_request_firmware(const struct firmware **firmware_p, const char *name, 1100 struct device *device, unsigned int opt_flags) 1101{ 1102 struct firmware *fw; 1103 long timeout; 1104 int ret; 1105 1106 if (!firmware_p) 1107 return -EINVAL; 1108 1109 if (!name || name[0] == '\0') 1110 return -EINVAL; 1111 1112 ret = _request_firmware_prepare(&fw, name, device); 1113 if (ret <= 0) /* error or already assigned */ 1114 goto out; 1115 1116 ret = 0; 1117 timeout = firmware_loading_timeout(); 1118 if (opt_flags & FW_OPT_NOWAIT) { 1119 timeout = usermodehelper_read_lock_wait(timeout); 1120 if (!timeout) { 1121 dev_dbg(device, "firmware: %s loading timed out\n", 1122 name); 1123 ret = -EBUSY; 1124 goto out; 1125 } 1126 } else { 1127 ret = usermodehelper_read_trylock(); 1128 if (WARN_ON(ret)) { 1129 dev_err(device, "firmware: %s will not be loaded\n", 1130 name); 1131 goto out; 1132 } 1133 } 1134 1135 ret = fw_get_filesystem_firmware(device, fw->priv); 1136 if (ret) { 1137 if (!(opt_flags & FW_OPT_NO_WARN)) 1138 dev_warn(device, 1139 "Direct firmware load for %s failed with error %d\n", 1140 name, ret); 1141 if (opt_flags & FW_OPT_USERHELPER) { 1142 dev_warn(device, "Falling back to user helper\n"); 1143 ret = fw_load_from_user_helper(fw, name, device, 1144 opt_flags, timeout); 1145 } 1146 } 1147 1148 if (!ret) 1149 ret = assign_firmware_buf(fw, device, opt_flags); 1150 1151 usermodehelper_read_unlock(); 1152 1153 out: 1154 if (ret < 0) { 1155 release_firmware(fw); 1156 fw = NULL; 1157 } 1158 1159 *firmware_p = fw; 1160 return ret; 1161} 1162 1163/** 1164 * request_firmware: - send firmware request and wait for it 1165 * @firmware_p: pointer to firmware image 1166 * @name: name of firmware file 1167 * @device: device for which firmware is being loaded 1168 * 1169 * @firmware_p will be used to return a firmware image by the name 1170 * of @name for device @device. 1171 * 1172 * Should be called from user context where sleeping is allowed. 1173 * 1174 * @name will be used as $FIRMWARE in the uevent environment and 1175 * should be distinctive enough not to be confused with any other 1176 * firmware image for this or any other device. 1177 * 1178 * Caller must hold the reference count of @device. 1179 * 1180 * The function can be called safely inside device's suspend and 1181 * resume callback. 1182 **/ 1183int 1184request_firmware(const struct firmware **firmware_p, const char *name, 1185 struct device *device) 1186{ 1187 int ret; 1188 1189 /* Need to pin this module until return */ 1190 __module_get(THIS_MODULE); 1191 ret = _request_firmware(firmware_p, name, device, 1192 FW_OPT_UEVENT | FW_OPT_FALLBACK); 1193 module_put(THIS_MODULE); 1194 return ret; 1195} 1196EXPORT_SYMBOL(request_firmware); 1197 1198/** 1199 * request_firmware_direct: - load firmware directly without usermode helper 1200 * @firmware_p: pointer to firmware image 1201 * @name: name of firmware file 1202 * @device: device for which firmware is being loaded 1203 * 1204 * This function works pretty much like request_firmware(), but this doesn't 1205 * fall back to usermode helper even if the firmware couldn't be loaded 1206 * directly from fs. Hence it's useful for loading optional firmwares, which 1207 * aren't always present, without extra long timeouts of udev. 1208 **/ 1209int request_firmware_direct(const struct firmware **firmware_p, 1210 const char *name, struct device *device) 1211{ 1212 int ret; 1213 1214 __module_get(THIS_MODULE); 1215 ret = _request_firmware(firmware_p, name, device, 1216 FW_OPT_UEVENT | FW_OPT_NO_WARN); 1217 module_put(THIS_MODULE); 1218 return ret; 1219} 1220EXPORT_SYMBOL_GPL(request_firmware_direct); 1221 1222/** 1223 * release_firmware: - release the resource associated with a firmware image 1224 * @fw: firmware resource to release 1225 **/ 1226void release_firmware(const struct firmware *fw) 1227{ 1228 if (fw) { 1229 if (!fw_is_builtin_firmware(fw)) 1230 firmware_free_data(fw); 1231 kfree(fw); 1232 } 1233} 1234EXPORT_SYMBOL(release_firmware); 1235 1236/* Async support */ 1237struct firmware_work { 1238 struct work_struct work; 1239 struct module *module; 1240 const char *name; 1241 struct device *device; 1242 void *context; 1243 void (*cont)(const struct firmware *fw, void *context); 1244 unsigned int opt_flags; 1245}; 1246 1247static void request_firmware_work_func(struct work_struct *work) 1248{ 1249 struct firmware_work *fw_work; 1250 const struct firmware *fw; 1251 1252 fw_work = container_of(work, struct firmware_work, work); 1253 1254 _request_firmware(&fw, fw_work->name, fw_work->device, 1255 fw_work->opt_flags); 1256 fw_work->cont(fw, fw_work->context); 1257 put_device(fw_work->device); /* taken in request_firmware_nowait() */ 1258 1259 module_put(fw_work->module); 1260 kfree(fw_work); 1261} 1262 1263/** 1264 * request_firmware_nowait - asynchronous version of request_firmware 1265 * @module: module requesting the firmware 1266 * @uevent: sends uevent to copy the firmware image if this flag 1267 * is non-zero else the firmware copy must be done manually. 1268 * @name: name of firmware file 1269 * @device: device for which firmware is being loaded 1270 * @gfp: allocation flags 1271 * @context: will be passed over to @cont, and 1272 * @fw may be %NULL if firmware request fails. 1273 * @cont: function will be called asynchronously when the firmware 1274 * request is over. 1275 * 1276 * Caller must hold the reference count of @device. 1277 * 1278 * Asynchronous variant of request_firmware() for user contexts: 1279 * - sleep for as small periods as possible since it may 1280 * increase kernel boot time of built-in device drivers 1281 * requesting firmware in their ->probe() methods, if 1282 * @gfp is GFP_KERNEL. 1283 * 1284 * - can't sleep at all if @gfp is GFP_ATOMIC. 1285 **/ 1286int 1287request_firmware_nowait( 1288 struct module *module, bool uevent, 1289 const char *name, struct device *device, gfp_t gfp, void *context, 1290 void (*cont)(const struct firmware *fw, void *context)) 1291{ 1292 struct firmware_work *fw_work; 1293 1294 fw_work = kzalloc(sizeof(struct firmware_work), gfp); 1295 if (!fw_work) 1296 return -ENOMEM; 1297 1298 fw_work->module = module; 1299 fw_work->name = name; 1300 fw_work->device = device; 1301 fw_work->context = context; 1302 fw_work->cont = cont; 1303 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | 1304 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); 1305 1306 if (!try_module_get(module)) { 1307 kfree(fw_work); 1308 return -EFAULT; 1309 } 1310 1311 get_device(fw_work->device); 1312 INIT_WORK(&fw_work->work, request_firmware_work_func); 1313 schedule_work(&fw_work->work); 1314 return 0; 1315} 1316EXPORT_SYMBOL(request_firmware_nowait); 1317 1318#ifdef CONFIG_PM_SLEEP 1319static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); 1320 1321/** 1322 * cache_firmware - cache one firmware image in kernel memory space 1323 * @fw_name: the firmware image name 1324 * 1325 * Cache firmware in kernel memory so that drivers can use it when 1326 * system isn't ready for them to request firmware image from userspace. 1327 * Once it returns successfully, driver can use request_firmware or its 1328 * nowait version to get the cached firmware without any interacting 1329 * with userspace 1330 * 1331 * Return 0 if the firmware image has been cached successfully 1332 * Return !0 otherwise 1333 * 1334 */ 1335static int cache_firmware(const char *fw_name) 1336{ 1337 int ret; 1338 const struct firmware *fw; 1339 1340 pr_debug("%s: %s\n", __func__, fw_name); 1341 1342 ret = request_firmware(&fw, fw_name, NULL); 1343 if (!ret) 1344 kfree(fw); 1345 1346 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret); 1347 1348 return ret; 1349} 1350 1351static struct firmware_buf *fw_lookup_buf(const char *fw_name) 1352{ 1353 struct firmware_buf *tmp; 1354 struct firmware_cache *fwc = &fw_cache; 1355 1356 spin_lock(&fwc->lock); 1357 tmp = __fw_lookup_buf(fw_name); 1358 spin_unlock(&fwc->lock); 1359 1360 return tmp; 1361} 1362 1363/** 1364 * uncache_firmware - remove one cached firmware image 1365 * @fw_name: the firmware image name 1366 * 1367 * Uncache one firmware image which has been cached successfully 1368 * before. 1369 * 1370 * Return 0 if the firmware cache has been removed successfully 1371 * Return !0 otherwise 1372 * 1373 */ 1374static int uncache_firmware(const char *fw_name) 1375{ 1376 struct firmware_buf *buf; 1377 struct firmware fw; 1378 1379 pr_debug("%s: %s\n", __func__, fw_name); 1380 1381 if (fw_get_builtin_firmware(&fw, fw_name)) 1382 return 0; 1383 1384 buf = fw_lookup_buf(fw_name); 1385 if (buf) { 1386 fw_free_buf(buf); 1387 return 0; 1388 } 1389 1390 return -EINVAL; 1391} 1392 1393static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) 1394{ 1395 struct fw_cache_entry *fce; 1396 1397 fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC); 1398 if (!fce) 1399 goto exit; 1400 1401 strcpy(fce->name, name); 1402exit: 1403 return fce; 1404} 1405 1406static int __fw_entry_found(const char *name) 1407{ 1408 struct firmware_cache *fwc = &fw_cache; 1409 struct fw_cache_entry *fce; 1410 1411 list_for_each_entry(fce, &fwc->fw_names, list) { 1412 if (!strcmp(fce->name, name)) 1413 return 1; 1414 } 1415 return 0; 1416} 1417 1418static int fw_cache_piggyback_on_request(const char *name) 1419{ 1420 struct firmware_cache *fwc = &fw_cache; 1421 struct fw_cache_entry *fce; 1422 int ret = 0; 1423 1424 spin_lock(&fwc->name_lock); 1425 if (__fw_entry_found(name)) 1426 goto found; 1427 1428 fce = alloc_fw_cache_entry(name); 1429 if (fce) { 1430 ret = 1; 1431 list_add(&fce->list, &fwc->fw_names); 1432 pr_debug("%s: fw: %s\n", __func__, name); 1433 } 1434found: 1435 spin_unlock(&fwc->name_lock); 1436 return ret; 1437} 1438 1439static void free_fw_cache_entry(struct fw_cache_entry *fce) 1440{ 1441 kfree(fce); 1442} 1443 1444static void __async_dev_cache_fw_image(void *fw_entry, 1445 async_cookie_t cookie) 1446{ 1447 struct fw_cache_entry *fce = fw_entry; 1448 struct firmware_cache *fwc = &fw_cache; 1449 int ret; 1450 1451 ret = cache_firmware(fce->name); 1452 if (ret) { 1453 spin_lock(&fwc->name_lock); 1454 list_del(&fce->list); 1455 spin_unlock(&fwc->name_lock); 1456 1457 free_fw_cache_entry(fce); 1458 } 1459} 1460 1461/* called with dev->devres_lock held */ 1462static void dev_create_fw_entry(struct device *dev, void *res, 1463 void *data) 1464{ 1465 struct fw_name_devm *fwn = res; 1466 const char *fw_name = fwn->name; 1467 struct list_head *head = data; 1468 struct fw_cache_entry *fce; 1469 1470 fce = alloc_fw_cache_entry(fw_name); 1471 if (fce) 1472 list_add(&fce->list, head); 1473} 1474 1475static int devm_name_match(struct device *dev, void *res, 1476 void *match_data) 1477{ 1478 struct fw_name_devm *fwn = res; 1479 return (fwn->magic == (unsigned long)match_data); 1480} 1481 1482static void dev_cache_fw_image(struct device *dev, void *data) 1483{ 1484 LIST_HEAD(todo); 1485 struct fw_cache_entry *fce; 1486 struct fw_cache_entry *fce_next; 1487 struct firmware_cache *fwc = &fw_cache; 1488 1489 devres_for_each_res(dev, fw_name_devm_release, 1490 devm_name_match, &fw_cache, 1491 dev_create_fw_entry, &todo); 1492 1493 list_for_each_entry_safe(fce, fce_next, &todo, list) { 1494 list_del(&fce->list); 1495 1496 spin_lock(&fwc->name_lock); 1497 /* only one cache entry for one firmware */ 1498 if (!__fw_entry_found(fce->name)) { 1499 list_add(&fce->list, &fwc->fw_names); 1500 } else { 1501 free_fw_cache_entry(fce); 1502 fce = NULL; 1503 } 1504 spin_unlock(&fwc->name_lock); 1505 1506 if (fce) 1507 async_schedule_domain(__async_dev_cache_fw_image, 1508 (void *)fce, 1509 &fw_cache_domain); 1510 } 1511} 1512 1513static void __device_uncache_fw_images(void) 1514{ 1515 struct firmware_cache *fwc = &fw_cache; 1516 struct fw_cache_entry *fce; 1517 1518 spin_lock(&fwc->name_lock); 1519 while (!list_empty(&fwc->fw_names)) { 1520 fce = list_entry(fwc->fw_names.next, 1521 struct fw_cache_entry, list); 1522 list_del(&fce->list); 1523 spin_unlock(&fwc->name_lock); 1524 1525 uncache_firmware(fce->name); 1526 free_fw_cache_entry(fce); 1527 1528 spin_lock(&fwc->name_lock); 1529 } 1530 spin_unlock(&fwc->name_lock); 1531} 1532 1533/** 1534 * device_cache_fw_images - cache devices' firmware 1535 * 1536 * If one device called request_firmware or its nowait version 1537 * successfully before, the firmware names are recored into the 1538 * device's devres link list, so device_cache_fw_images can call 1539 * cache_firmware() to cache these firmwares for the device, 1540 * then the device driver can load its firmwares easily at 1541 * time when system is not ready to complete loading firmware. 1542 */ 1543static void device_cache_fw_images(void) 1544{ 1545 struct firmware_cache *fwc = &fw_cache; 1546 int old_timeout; 1547 DEFINE_WAIT(wait); 1548 1549 pr_debug("%s\n", __func__); 1550 1551 /* cancel uncache work */ 1552 cancel_delayed_work_sync(&fwc->work); 1553 1554 /* 1555 * use small loading timeout for caching devices' firmware 1556 * because all these firmware images have been loaded 1557 * successfully at lease once, also system is ready for 1558 * completing firmware loading now. The maximum size of 1559 * firmware in current distributions is about 2M bytes, 1560 * so 10 secs should be enough. 1561 */ 1562 old_timeout = loading_timeout; 1563 loading_timeout = 10; 1564 1565 mutex_lock(&fw_lock); 1566 fwc->state = FW_LOADER_START_CACHE; 1567 dpm_for_each_dev(NULL, dev_cache_fw_image); 1568 mutex_unlock(&fw_lock); 1569 1570 /* wait for completion of caching firmware for all devices */ 1571 async_synchronize_full_domain(&fw_cache_domain); 1572 1573 loading_timeout = old_timeout; 1574} 1575 1576/** 1577 * device_uncache_fw_images - uncache devices' firmware 1578 * 1579 * uncache all firmwares which have been cached successfully 1580 * by device_uncache_fw_images earlier 1581 */ 1582static void device_uncache_fw_images(void) 1583{ 1584 pr_debug("%s\n", __func__); 1585 __device_uncache_fw_images(); 1586} 1587 1588static void device_uncache_fw_images_work(struct work_struct *work) 1589{ 1590 device_uncache_fw_images(); 1591} 1592 1593/** 1594 * device_uncache_fw_images_delay - uncache devices firmwares 1595 * @delay: number of milliseconds to delay uncache device firmwares 1596 * 1597 * uncache all devices's firmwares which has been cached successfully 1598 * by device_cache_fw_images after @delay milliseconds. 1599 */ 1600static void device_uncache_fw_images_delay(unsigned long delay) 1601{ 1602 queue_delayed_work(system_power_efficient_wq, &fw_cache.work, 1603 msecs_to_jiffies(delay)); 1604} 1605 1606static int fw_pm_notify(struct notifier_block *notify_block, 1607 unsigned long mode, void *unused) 1608{ 1609 switch (mode) { 1610 case PM_HIBERNATION_PREPARE: 1611 case PM_SUSPEND_PREPARE: 1612 case PM_RESTORE_PREPARE: 1613 kill_requests_without_uevent(); 1614 device_cache_fw_images(); 1615 break; 1616 1617 case PM_POST_SUSPEND: 1618 case PM_POST_HIBERNATION: 1619 case PM_POST_RESTORE: 1620 /* 1621 * In case that system sleep failed and syscore_suspend is 1622 * not called. 1623 */ 1624 mutex_lock(&fw_lock); 1625 fw_cache.state = FW_LOADER_NO_CACHE; 1626 mutex_unlock(&fw_lock); 1627 1628 device_uncache_fw_images_delay(10 * MSEC_PER_SEC); 1629 break; 1630 } 1631 1632 return 0; 1633} 1634 1635/* stop caching firmware once syscore_suspend is reached */ 1636static int fw_suspend(void) 1637{ 1638 fw_cache.state = FW_LOADER_NO_CACHE; 1639 return 0; 1640} 1641 1642static struct syscore_ops fw_syscore_ops = { 1643 .suspend = fw_suspend, 1644}; 1645#else 1646static int fw_cache_piggyback_on_request(const char *name) 1647{ 1648 return 0; 1649} 1650#endif 1651 1652static void __init fw_cache_init(void) 1653{ 1654 spin_lock_init(&fw_cache.lock); 1655 INIT_LIST_HEAD(&fw_cache.head); 1656 fw_cache.state = FW_LOADER_NO_CACHE; 1657 1658#ifdef CONFIG_PM_SLEEP 1659 spin_lock_init(&fw_cache.name_lock); 1660 INIT_LIST_HEAD(&fw_cache.fw_names); 1661 1662 INIT_DELAYED_WORK(&fw_cache.work, 1663 device_uncache_fw_images_work); 1664 1665 fw_cache.pm_notify.notifier_call = fw_pm_notify; 1666 register_pm_notifier(&fw_cache.pm_notify); 1667 1668 register_syscore_ops(&fw_syscore_ops); 1669#endif 1670} 1671 1672static int __init firmware_class_init(void) 1673{ 1674 fw_cache_init(); 1675#ifdef CONFIG_FW_LOADER_USER_HELPER 1676 register_reboot_notifier(&fw_shutdown_nb); 1677 return class_register(&firmware_class); 1678#else 1679 return 0; 1680#endif 1681} 1682 1683static void __exit firmware_class_exit(void) 1684{ 1685#ifdef CONFIG_PM_SLEEP 1686 unregister_syscore_ops(&fw_syscore_ops); 1687 unregister_pm_notifier(&fw_cache.pm_notify); 1688#endif 1689#ifdef CONFIG_FW_LOADER_USER_HELPER 1690 unregister_reboot_notifier(&fw_shutdown_nb); 1691 class_unregister(&firmware_class); 1692#endif 1693} 1694 1695fs_initcall(firmware_class_init); 1696module_exit(firmware_class_exit); 1697