root/drivers/acpi/ec.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. acpi_ec_started
  2. acpi_ec_event_enabled
  3. acpi_ec_flushed
  4. acpi_ec_read_status
  5. acpi_ec_read_data
  6. acpi_ec_write_cmd
  7. acpi_ec_write_data
  8. acpi_ec_cmd_string
  9. acpi_ec_is_gpe_raised
  10. acpi_ec_enable_gpe
  11. acpi_ec_disable_gpe
  12. acpi_ec_clear_gpe
  13. acpi_ec_submit_request
  14. acpi_ec_complete_request
  15. acpi_ec_mask_gpe
  16. acpi_ec_unmask_gpe
  17. acpi_ec_submit_flushable_request
  18. acpi_ec_submit_query
  19. acpi_ec_complete_query
  20. __acpi_ec_enable_event
  21. __acpi_ec_disable_event
  22. acpi_ec_clear
  23. acpi_ec_enable_event
  24. __acpi_ec_flush_work
  25. acpi_ec_disable_event
  26. acpi_ec_flush_work
  27. acpi_ec_guard_event
  28. ec_transaction_polled
  29. ec_transaction_completed
  30. ec_transaction_transition
  31. advance_transaction
  32. start_transaction
  33. ec_guard
  34. ec_poll
  35. acpi_ec_transaction_unlocked
  36. acpi_ec_transaction
  37. acpi_ec_burst_enable
  38. acpi_ec_burst_disable
  39. acpi_ec_read
  40. acpi_ec_write
  41. ec_read
  42. ec_write
  43. ec_transaction
  44. ec_get_handle
  45. acpi_ec_start
  46. acpi_ec_stopped
  47. acpi_ec_stop
  48. acpi_ec_enter_noirq
  49. acpi_ec_leave_noirq
  50. acpi_ec_block_transactions
  51. acpi_ec_unblock_transactions
  52. acpi_ec_get_query_handler
  53. acpi_ec_get_query_handler_by_value
  54. acpi_ec_query_handler_release
  55. acpi_ec_put_query_handler
  56. acpi_ec_add_query_handler
  57. acpi_ec_remove_query_handlers
  58. acpi_ec_remove_query_handler
  59. acpi_ec_create_query
  60. acpi_ec_delete_query
  61. acpi_ec_event_processor
  62. acpi_ec_query
  63. acpi_ec_check_event
  64. acpi_ec_event_handler
  65. acpi_ec_gpe_handler
  66. acpi_ec_space_handler
  67. acpi_ec_free
  68. acpi_ec_alloc
  69. acpi_ec_register_query_methods
  70. ec_parse_device
  71. ec_install_handlers
  72. ec_remove_handlers
  73. acpi_ec_setup
  74. acpi_ec_ecdt_get_handle
  75. acpi_ec_add
  76. acpi_ec_remove
  77. ec_parse_io_ports
  78. acpi_ec_dsdt_probe
  79. acpi_ec_ecdt_start
  80. ec_flag_query_handshake
  81. ec_clear_on_resume
  82. ec_correct_ecdt
  83. ec_honor_ecdt_gpe
  84. acpi_ec_ecdt_probe
  85. acpi_ec_suspend
  86. acpi_ec_suspend_noirq
  87. acpi_ec_resume_noirq
  88. acpi_ec_resume
  89. acpi_ec_mark_gpe_for_wake
  90. acpi_ec_set_gpe_wake_mask
  91. acpi_ec_dispatch_gpe
  92. param_set_event_clearing
  93. param_get_event_clearing
  94. acpi_ec_destroy_workqueues
  95. acpi_ec_init_workqueues
  96. acpi_ec_init
  97. acpi_ec_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  ec.c - ACPI Embedded Controller Driver (v3)
   4  *
   5  *  Copyright (C) 2001-2015 Intel Corporation
   6  *    Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
   7  *            2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
   8  *            2006       Denis Sadykov <denis.m.sadykov@intel.com>
   9  *            2004       Luming Yu <luming.yu@intel.com>
  10  *            2001, 2002 Andy Grover <andrew.grover@intel.com>
  11  *            2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  12  *  Copyright (C) 2008      Alexey Starikovskiy <astarikovskiy@suse.de>
  13  */
  14 
  15 /* Uncomment next line to get verbose printout */
  16 /* #define DEBUG */
  17 #define pr_fmt(fmt) "ACPI: EC: " fmt
  18 
  19 #include <linux/kernel.h>
  20 #include <linux/module.h>
  21 #include <linux/init.h>
  22 #include <linux/types.h>
  23 #include <linux/delay.h>
  24 #include <linux/interrupt.h>
  25 #include <linux/list.h>
  26 #include <linux/spinlock.h>
  27 #include <linux/slab.h>
  28 #include <linux/suspend.h>
  29 #include <linux/acpi.h>
  30 #include <linux/dmi.h>
  31 #include <asm/io.h>
  32 
  33 #include "internal.h"
  34 
  35 #define ACPI_EC_CLASS                   "embedded_controller"
  36 #define ACPI_EC_DEVICE_NAME             "Embedded Controller"
  37 #define ACPI_EC_FILE_INFO               "info"
  38 
  39 /* EC status register */
  40 #define ACPI_EC_FLAG_OBF        0x01    /* Output buffer full */
  41 #define ACPI_EC_FLAG_IBF        0x02    /* Input buffer full */
  42 #define ACPI_EC_FLAG_CMD        0x08    /* Input buffer contains a command */
  43 #define ACPI_EC_FLAG_BURST      0x10    /* burst mode */
  44 #define ACPI_EC_FLAG_SCI        0x20    /* EC-SCI occurred */
  45 
  46 /*
  47  * The SCI_EVT clearing timing is not defined by the ACPI specification.
  48  * This leads to lots of practical timing issues for the host EC driver.
  49  * The following variations are defined (from the target EC firmware's
  50  * perspective):
  51  * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  52  *         target can clear SCI_EVT at any time so long as the host can see
  53  *         the indication by reading the status register (EC_SC). So the
  54  *         host should re-check SCI_EVT after the first time the SCI_EVT
  55  *         indication is seen, which is the same time the query request
  56  *         (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  57  *         at any later time could indicate another event. Normally such
  58  *         kind of EC firmware has implemented an event queue and will
  59  *         return 0x00 to indicate "no outstanding event".
  60  * QUERY: After seeing the query request (QR_EC) written to the command
  61  *        register (EC_CMD) by the host and having prepared the responding
  62  *        event value in the data register (EC_DATA), the target can safely
  63  *        clear SCI_EVT because the target can confirm that the current
  64  *        event is being handled by the host. The host then should check
  65  *        SCI_EVT right after reading the event response from the data
  66  *        register (EC_DATA).
  67  * EVENT: After seeing the event response read from the data register
  68  *        (EC_DATA) by the host, the target can clear SCI_EVT. As the
  69  *        target requires time to notice the change in the data register
  70  *        (EC_DATA), the host may be required to wait additional guarding
  71  *        time before checking the SCI_EVT again. Such guarding may not be
  72  *        necessary if the host is notified via another IRQ.
  73  */
  74 #define ACPI_EC_EVT_TIMING_STATUS       0x00
  75 #define ACPI_EC_EVT_TIMING_QUERY        0x01
  76 #define ACPI_EC_EVT_TIMING_EVENT        0x02
  77 
  78 /* EC commands */
  79 enum ec_command {
  80         ACPI_EC_COMMAND_READ = 0x80,
  81         ACPI_EC_COMMAND_WRITE = 0x81,
  82         ACPI_EC_BURST_ENABLE = 0x82,
  83         ACPI_EC_BURST_DISABLE = 0x83,
  84         ACPI_EC_COMMAND_QUERY = 0x84,
  85 };
  86 
  87 #define ACPI_EC_DELAY           500     /* Wait 500ms max. during EC ops */
  88 #define ACPI_EC_UDELAY_GLK      1000    /* Wait 1ms max. to get global lock */
  89 #define ACPI_EC_UDELAY_POLL     550     /* Wait 1ms for EC transaction polling */
  90 #define ACPI_EC_CLEAR_MAX       100     /* Maximum number of events to query
  91                                          * when trying to clear the EC */
  92 #define ACPI_EC_MAX_QUERIES     16      /* Maximum number of parallel queries */
  93 
  94 enum {
  95         EC_FLAGS_QUERY_ENABLED,         /* Query is enabled */
  96         EC_FLAGS_QUERY_PENDING,         /* Query is pending */
  97         EC_FLAGS_QUERY_GUARDING,        /* Guard for SCI_EVT check */
  98         EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
  99         EC_FLAGS_EC_HANDLER_INSTALLED,  /* OpReg handler installed */
 100         EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
 101         EC_FLAGS_STARTED,               /* Driver is started */
 102         EC_FLAGS_STOPPED,               /* Driver is stopped */
 103         EC_FLAGS_GPE_MASKED,            /* GPE masked */
 104 };
 105 
 106 #define ACPI_EC_COMMAND_POLL            0x01 /* Available for command byte */
 107 #define ACPI_EC_COMMAND_COMPLETE        0x02 /* Completed last byte */
 108 
 109 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
 110 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
 111 module_param(ec_delay, uint, 0644);
 112 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
 113 
 114 static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
 115 module_param(ec_max_queries, uint, 0644);
 116 MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
 117 
 118 static bool ec_busy_polling __read_mostly;
 119 module_param(ec_busy_polling, bool, 0644);
 120 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
 121 
 122 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
 123 module_param(ec_polling_guard, uint, 0644);
 124 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
 125 
 126 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
 127 
 128 /*
 129  * If the number of false interrupts per one transaction exceeds
 130  * this threshold, will think there is a GPE storm happened and
 131  * will disable the GPE for normal transaction.
 132  */
 133 static unsigned int ec_storm_threshold  __read_mostly = 8;
 134 module_param(ec_storm_threshold, uint, 0644);
 135 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
 136 
 137 static bool ec_freeze_events __read_mostly = false;
 138 module_param(ec_freeze_events, bool, 0644);
 139 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
 140 
 141 static bool ec_no_wakeup __read_mostly;
 142 module_param(ec_no_wakeup, bool, 0644);
 143 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
 144 
 145 struct acpi_ec_query_handler {
 146         struct list_head node;
 147         acpi_ec_query_func func;
 148         acpi_handle handle;
 149         void *data;
 150         u8 query_bit;
 151         struct kref kref;
 152 };
 153 
 154 struct transaction {
 155         const u8 *wdata;
 156         u8 *rdata;
 157         unsigned short irq_count;
 158         u8 command;
 159         u8 wi;
 160         u8 ri;
 161         u8 wlen;
 162         u8 rlen;
 163         u8 flags;
 164 };
 165 
 166 struct acpi_ec_query {
 167         struct transaction transaction;
 168         struct work_struct work;
 169         struct acpi_ec_query_handler *handler;
 170 };
 171 
 172 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
 173 static void advance_transaction(struct acpi_ec *ec);
 174 static void acpi_ec_event_handler(struct work_struct *work);
 175 static void acpi_ec_event_processor(struct work_struct *work);
 176 
 177 struct acpi_ec *first_ec;
 178 EXPORT_SYMBOL(first_ec);
 179 
 180 static struct acpi_ec *boot_ec;
 181 static bool boot_ec_is_ecdt = false;
 182 static struct workqueue_struct *ec_wq;
 183 static struct workqueue_struct *ec_query_wq;
 184 
 185 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
 186 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
 187 static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
 188 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
 189 
 190 /* --------------------------------------------------------------------------
 191  *                           Logging/Debugging
 192  * -------------------------------------------------------------------------- */
 193 
 194 /*
 195  * Splitters used by the developers to track the boundary of the EC
 196  * handling processes.
 197  */
 198 #ifdef DEBUG
 199 #define EC_DBG_SEP      " "
 200 #define EC_DBG_DRV      "+++++"
 201 #define EC_DBG_STM      "====="
 202 #define EC_DBG_REQ      "*****"
 203 #define EC_DBG_EVT      "#####"
 204 #else
 205 #define EC_DBG_SEP      ""
 206 #define EC_DBG_DRV
 207 #define EC_DBG_STM
 208 #define EC_DBG_REQ
 209 #define EC_DBG_EVT
 210 #endif
 211 
 212 #define ec_log_raw(fmt, ...) \
 213         pr_info(fmt "\n", ##__VA_ARGS__)
 214 #define ec_dbg_raw(fmt, ...) \
 215         pr_debug(fmt "\n", ##__VA_ARGS__)
 216 #define ec_log(filter, fmt, ...) \
 217         ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
 218 #define ec_dbg(filter, fmt, ...) \
 219         ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
 220 
 221 #define ec_log_drv(fmt, ...) \
 222         ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
 223 #define ec_dbg_drv(fmt, ...) \
 224         ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
 225 #define ec_dbg_stm(fmt, ...) \
 226         ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
 227 #define ec_dbg_req(fmt, ...) \
 228         ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
 229 #define ec_dbg_evt(fmt, ...) \
 230         ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
 231 #define ec_dbg_ref(ec, fmt, ...) \
 232         ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
 233 
 234 /* --------------------------------------------------------------------------
 235  *                           Device Flags
 236  * -------------------------------------------------------------------------- */
 237 
 238 static bool acpi_ec_started(struct acpi_ec *ec)
 239 {
 240         return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
 241                !test_bit(EC_FLAGS_STOPPED, &ec->flags);
 242 }
 243 
 244 static bool acpi_ec_event_enabled(struct acpi_ec *ec)
 245 {
 246         /*
 247          * There is an OSPM early stage logic. During the early stages
 248          * (boot/resume), OSPMs shouldn't enable the event handling, only
 249          * the EC transactions are allowed to be performed.
 250          */
 251         if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
 252                 return false;
 253         /*
 254          * However, disabling the event handling is experimental for late
 255          * stage (suspend), and is controlled by the boot parameter of
 256          * "ec_freeze_events":
 257          * 1. true:  The EC event handling is disabled before entering
 258          *           the noirq stage.
 259          * 2. false: The EC event handling is automatically disabled as
 260          *           soon as the EC driver is stopped.
 261          */
 262         if (ec_freeze_events)
 263                 return acpi_ec_started(ec);
 264         else
 265                 return test_bit(EC_FLAGS_STARTED, &ec->flags);
 266 }
 267 
 268 static bool acpi_ec_flushed(struct acpi_ec *ec)
 269 {
 270         return ec->reference_count == 1;
 271 }
 272 
 273 /* --------------------------------------------------------------------------
 274  *                           EC Registers
 275  * -------------------------------------------------------------------------- */
 276 
 277 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 278 {
 279         u8 x = inb(ec->command_addr);
 280 
 281         ec_dbg_raw("EC_SC(R) = 0x%2.2x "
 282                    "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
 283                    x,
 284                    !!(x & ACPI_EC_FLAG_SCI),
 285                    !!(x & ACPI_EC_FLAG_BURST),
 286                    !!(x & ACPI_EC_FLAG_CMD),
 287                    !!(x & ACPI_EC_FLAG_IBF),
 288                    !!(x & ACPI_EC_FLAG_OBF));
 289         return x;
 290 }
 291 
 292 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 293 {
 294         u8 x = inb(ec->data_addr);
 295 
 296         ec->timestamp = jiffies;
 297         ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
 298         return x;
 299 }
 300 
 301 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 302 {
 303         ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
 304         outb(command, ec->command_addr);
 305         ec->timestamp = jiffies;
 306 }
 307 
 308 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 309 {
 310         ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
 311         outb(data, ec->data_addr);
 312         ec->timestamp = jiffies;
 313 }
 314 
 315 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
 316 static const char *acpi_ec_cmd_string(u8 cmd)
 317 {
 318         switch (cmd) {
 319         case 0x80:
 320                 return "RD_EC";
 321         case 0x81:
 322                 return "WR_EC";
 323         case 0x82:
 324                 return "BE_EC";
 325         case 0x83:
 326                 return "BD_EC";
 327         case 0x84:
 328                 return "QR_EC";
 329         }
 330         return "UNKNOWN";
 331 }
 332 #else
 333 #define acpi_ec_cmd_string(cmd)         "UNDEF"
 334 #endif
 335 
 336 /* --------------------------------------------------------------------------
 337  *                           GPE Registers
 338  * -------------------------------------------------------------------------- */
 339 
 340 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
 341 {
 342         acpi_event_status gpe_status = 0;
 343 
 344         (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
 345         return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
 346 }
 347 
 348 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
 349 {
 350         if (open)
 351                 acpi_enable_gpe(NULL, ec->gpe);
 352         else {
 353                 BUG_ON(ec->reference_count < 1);
 354                 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
 355         }
 356         if (acpi_ec_is_gpe_raised(ec)) {
 357                 /*
 358                  * On some platforms, EN=1 writes cannot trigger GPE. So
 359                  * software need to manually trigger a pseudo GPE event on
 360                  * EN=1 writes.
 361                  */
 362                 ec_dbg_raw("Polling quirk");
 363                 advance_transaction(ec);
 364         }
 365 }
 366 
 367 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
 368 {
 369         if (close)
 370                 acpi_disable_gpe(NULL, ec->gpe);
 371         else {
 372                 BUG_ON(ec->reference_count < 1);
 373                 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
 374         }
 375 }
 376 
 377 static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
 378 {
 379         /*
 380          * GPE STS is a W1C register, which means:
 381          * 1. Software can clear it without worrying about clearing other
 382          *    GPEs' STS bits when the hardware sets them in parallel.
 383          * 2. As long as software can ensure only clearing it when it is
 384          *    set, hardware won't set it in parallel.
 385          * So software can clear GPE in any contexts.
 386          * Warning: do not move the check into advance_transaction() as the
 387          * EC commands will be sent without GPE raised.
 388          */
 389         if (!acpi_ec_is_gpe_raised(ec))
 390                 return;
 391         acpi_clear_gpe(NULL, ec->gpe);
 392 }
 393 
 394 /* --------------------------------------------------------------------------
 395  *                           Transaction Management
 396  * -------------------------------------------------------------------------- */
 397 
 398 static void acpi_ec_submit_request(struct acpi_ec *ec)
 399 {
 400         ec->reference_count++;
 401         if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
 402             ec->reference_count == 1)
 403                 acpi_ec_enable_gpe(ec, true);
 404 }
 405 
 406 static void acpi_ec_complete_request(struct acpi_ec *ec)
 407 {
 408         bool flushed = false;
 409 
 410         ec->reference_count--;
 411         if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
 412             ec->reference_count == 0)
 413                 acpi_ec_disable_gpe(ec, true);
 414         flushed = acpi_ec_flushed(ec);
 415         if (flushed)
 416                 wake_up(&ec->wait);
 417 }
 418 
 419 static void acpi_ec_mask_gpe(struct acpi_ec *ec)
 420 {
 421         if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
 422                 acpi_ec_disable_gpe(ec, false);
 423                 ec_dbg_drv("Polling enabled");
 424                 set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
 425         }
 426 }
 427 
 428 static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
 429 {
 430         if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
 431                 clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
 432                 acpi_ec_enable_gpe(ec, false);
 433                 ec_dbg_drv("Polling disabled");
 434         }
 435 }
 436 
 437 /*
 438  * acpi_ec_submit_flushable_request() - Increase the reference count unless
 439  *                                      the flush operation is not in
 440  *                                      progress
 441  * @ec: the EC device
 442  *
 443  * This function must be used before taking a new action that should hold
 444  * the reference count.  If this function returns false, then the action
 445  * must be discarded or it will prevent the flush operation from being
 446  * completed.
 447  */
 448 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
 449 {
 450         if (!acpi_ec_started(ec))
 451                 return false;
 452         acpi_ec_submit_request(ec);
 453         return true;
 454 }
 455 
 456 static void acpi_ec_submit_query(struct acpi_ec *ec)
 457 {
 458         acpi_ec_mask_gpe(ec);
 459         if (!acpi_ec_event_enabled(ec))
 460                 return;
 461         if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
 462                 ec_dbg_evt("Command(%s) submitted/blocked",
 463                            acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
 464                 ec->nr_pending_queries++;
 465                 queue_work(ec_wq, &ec->work);
 466         }
 467 }
 468 
 469 static void acpi_ec_complete_query(struct acpi_ec *ec)
 470 {
 471         if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
 472                 ec_dbg_evt("Command(%s) unblocked",
 473                            acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
 474         acpi_ec_unmask_gpe(ec);
 475 }
 476 
 477 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
 478 {
 479         if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
 480                 ec_log_drv("event unblocked");
 481         /*
 482          * Unconditionally invoke this once after enabling the event
 483          * handling mechanism to detect the pending events.
 484          */
 485         advance_transaction(ec);
 486 }
 487 
 488 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
 489 {
 490         if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
 491                 ec_log_drv("event blocked");
 492 }
 493 
 494 /*
 495  * Process _Q events that might have accumulated in the EC.
 496  * Run with locked ec mutex.
 497  */
 498 static void acpi_ec_clear(struct acpi_ec *ec)
 499 {
 500         int i, status;
 501         u8 value = 0;
 502 
 503         for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
 504                 status = acpi_ec_query(ec, &value);
 505                 if (status || !value)
 506                         break;
 507         }
 508         if (unlikely(i == ACPI_EC_CLEAR_MAX))
 509                 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
 510         else
 511                 pr_info("%d stale EC events cleared\n", i);
 512 }
 513 
 514 static void acpi_ec_enable_event(struct acpi_ec *ec)
 515 {
 516         unsigned long flags;
 517 
 518         spin_lock_irqsave(&ec->lock, flags);
 519         if (acpi_ec_started(ec))
 520                 __acpi_ec_enable_event(ec);
 521         spin_unlock_irqrestore(&ec->lock, flags);
 522 
 523         /* Drain additional events if hardware requires that */
 524         if (EC_FLAGS_CLEAR_ON_RESUME)
 525                 acpi_ec_clear(ec);
 526 }
 527 
 528 #ifdef CONFIG_PM_SLEEP
 529 static void __acpi_ec_flush_work(void)
 530 {
 531         drain_workqueue(ec_wq); /* flush ec->work */
 532         flush_workqueue(ec_query_wq); /* flush queries */
 533 }
 534 
 535 static void acpi_ec_disable_event(struct acpi_ec *ec)
 536 {
 537         unsigned long flags;
 538 
 539         spin_lock_irqsave(&ec->lock, flags);
 540         __acpi_ec_disable_event(ec);
 541         spin_unlock_irqrestore(&ec->lock, flags);
 542 
 543         /*
 544          * When ec_freeze_events is true, we need to flush events in
 545          * the proper position before entering the noirq stage.
 546          */
 547         __acpi_ec_flush_work();
 548 }
 549 
 550 void acpi_ec_flush_work(void)
 551 {
 552         /* Without ec_wq there is nothing to flush. */
 553         if (!ec_wq)
 554                 return;
 555 
 556         __acpi_ec_flush_work();
 557 }
 558 #endif /* CONFIG_PM_SLEEP */
 559 
 560 static bool acpi_ec_guard_event(struct acpi_ec *ec)
 561 {
 562         bool guarded = true;
 563         unsigned long flags;
 564 
 565         spin_lock_irqsave(&ec->lock, flags);
 566         /*
 567          * If firmware SCI_EVT clearing timing is "event", we actually
 568          * don't know when the SCI_EVT will be cleared by firmware after
 569          * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
 570          * acceptable period.
 571          *
 572          * The guarding period begins when EC_FLAGS_QUERY_PENDING is
 573          * flagged, which means SCI_EVT check has just been performed.
 574          * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
 575          * guarding should have already been performed (via
 576          * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
 577          * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
 578          * ACPI_EC_COMMAND_POLL state immediately.
 579          */
 580         if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
 581             ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
 582             !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
 583             (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
 584                 guarded = false;
 585         spin_unlock_irqrestore(&ec->lock, flags);
 586         return guarded;
 587 }
 588 
 589 static int ec_transaction_polled(struct acpi_ec *ec)
 590 {
 591         unsigned long flags;
 592         int ret = 0;
 593 
 594         spin_lock_irqsave(&ec->lock, flags);
 595         if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
 596                 ret = 1;
 597         spin_unlock_irqrestore(&ec->lock, flags);
 598         return ret;
 599 }
 600 
 601 static int ec_transaction_completed(struct acpi_ec *ec)
 602 {
 603         unsigned long flags;
 604         int ret = 0;
 605 
 606         spin_lock_irqsave(&ec->lock, flags);
 607         if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
 608                 ret = 1;
 609         spin_unlock_irqrestore(&ec->lock, flags);
 610         return ret;
 611 }
 612 
 613 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
 614 {
 615         ec->curr->flags |= flag;
 616         if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
 617                 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
 618                     flag == ACPI_EC_COMMAND_POLL)
 619                         acpi_ec_complete_query(ec);
 620                 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
 621                     flag == ACPI_EC_COMMAND_COMPLETE)
 622                         acpi_ec_complete_query(ec);
 623                 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
 624                     flag == ACPI_EC_COMMAND_COMPLETE)
 625                         set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
 626         }
 627 }
 628 
 629 static void advance_transaction(struct acpi_ec *ec)
 630 {
 631         struct transaction *t;
 632         u8 status;
 633         bool wakeup = false;
 634 
 635         ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
 636                    smp_processor_id());
 637         /*
 638          * By always clearing STS before handling all indications, we can
 639          * ensure a hardware STS 0->1 change after this clearing can always
 640          * trigger a GPE interrupt.
 641          */
 642         acpi_ec_clear_gpe(ec);
 643         status = acpi_ec_read_status(ec);
 644         t = ec->curr;
 645         /*
 646          * Another IRQ or a guarded polling mode advancement is detected,
 647          * the next QR_EC submission is then allowed.
 648          */
 649         if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
 650                 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
 651                     (!ec->nr_pending_queries ||
 652                      test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
 653                         clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
 654                         acpi_ec_complete_query(ec);
 655                 }
 656         }
 657         if (!t)
 658                 goto err;
 659         if (t->flags & ACPI_EC_COMMAND_POLL) {
 660                 if (t->wlen > t->wi) {
 661                         if ((status & ACPI_EC_FLAG_IBF) == 0)
 662                                 acpi_ec_write_data(ec, t->wdata[t->wi++]);
 663                         else
 664                                 goto err;
 665                 } else if (t->rlen > t->ri) {
 666                         if ((status & ACPI_EC_FLAG_OBF) == 1) {
 667                                 t->rdata[t->ri++] = acpi_ec_read_data(ec);
 668                                 if (t->rlen == t->ri) {
 669                                         ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
 670                                         if (t->command == ACPI_EC_COMMAND_QUERY)
 671                                                 ec_dbg_evt("Command(%s) completed by hardware",
 672                                                            acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
 673                                         wakeup = true;
 674                                 }
 675                         } else
 676                                 goto err;
 677                 } else if (t->wlen == t->wi &&
 678                            (status & ACPI_EC_FLAG_IBF) == 0) {
 679                         ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
 680                         wakeup = true;
 681                 }
 682                 goto out;
 683         } else {
 684                 if (EC_FLAGS_QUERY_HANDSHAKE &&
 685                     !(status & ACPI_EC_FLAG_SCI) &&
 686                     (t->command == ACPI_EC_COMMAND_QUERY)) {
 687                         ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
 688                         t->rdata[t->ri++] = 0x00;
 689                         ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
 690                         ec_dbg_evt("Command(%s) completed by software",
 691                                    acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
 692                         wakeup = true;
 693                 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
 694                         acpi_ec_write_cmd(ec, t->command);
 695                         ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
 696                 } else
 697                         goto err;
 698                 goto out;
 699         }
 700 err:
 701         /*
 702          * If SCI bit is set, then don't think it's a false IRQ
 703          * otherwise will take a not handled IRQ as a false one.
 704          */
 705         if (!(status & ACPI_EC_FLAG_SCI)) {
 706                 if (in_interrupt() && t) {
 707                         if (t->irq_count < ec_storm_threshold)
 708                                 ++t->irq_count;
 709                         /* Allow triggering on 0 threshold */
 710                         if (t->irq_count == ec_storm_threshold)
 711                                 acpi_ec_mask_gpe(ec);
 712                 }
 713         }
 714 out:
 715         if (status & ACPI_EC_FLAG_SCI)
 716                 acpi_ec_submit_query(ec);
 717         if (wakeup && in_interrupt())
 718                 wake_up(&ec->wait);
 719 }
 720 
 721 static void start_transaction(struct acpi_ec *ec)
 722 {
 723         ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
 724         ec->curr->flags = 0;
 725 }
 726 
 727 static int ec_guard(struct acpi_ec *ec)
 728 {
 729         unsigned long guard = usecs_to_jiffies(ec->polling_guard);
 730         unsigned long timeout = ec->timestamp + guard;
 731 
 732         /* Ensure guarding period before polling EC status */
 733         do {
 734                 if (ec->busy_polling) {
 735                         /* Perform busy polling */
 736                         if (ec_transaction_completed(ec))
 737                                 return 0;
 738                         udelay(jiffies_to_usecs(guard));
 739                 } else {
 740                         /*
 741                          * Perform wait polling
 742                          * 1. Wait the transaction to be completed by the
 743                          *    GPE handler after the transaction enters
 744                          *    ACPI_EC_COMMAND_POLL state.
 745                          * 2. A special guarding logic is also required
 746                          *    for event clearing mode "event" before the
 747                          *    transaction enters ACPI_EC_COMMAND_POLL
 748                          *    state.
 749                          */
 750                         if (!ec_transaction_polled(ec) &&
 751                             !acpi_ec_guard_event(ec))
 752                                 break;
 753                         if (wait_event_timeout(ec->wait,
 754                                                ec_transaction_completed(ec),
 755                                                guard))
 756                                 return 0;
 757                 }
 758         } while (time_before(jiffies, timeout));
 759         return -ETIME;
 760 }
 761 
 762 static int ec_poll(struct acpi_ec *ec)
 763 {
 764         unsigned long flags;
 765         int repeat = 5; /* number of command restarts */
 766 
 767         while (repeat--) {
 768                 unsigned long delay = jiffies +
 769                         msecs_to_jiffies(ec_delay);
 770                 do {
 771                         if (!ec_guard(ec))
 772                                 return 0;
 773                         spin_lock_irqsave(&ec->lock, flags);
 774                         advance_transaction(ec);
 775                         spin_unlock_irqrestore(&ec->lock, flags);
 776                 } while (time_before(jiffies, delay));
 777                 pr_debug("controller reset, restart transaction\n");
 778                 spin_lock_irqsave(&ec->lock, flags);
 779                 start_transaction(ec);
 780                 spin_unlock_irqrestore(&ec->lock, flags);
 781         }
 782         return -ETIME;
 783 }
 784 
 785 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
 786                                         struct transaction *t)
 787 {
 788         unsigned long tmp;
 789         int ret = 0;
 790 
 791         /* start transaction */
 792         spin_lock_irqsave(&ec->lock, tmp);
 793         /* Enable GPE for command processing (IBF=0/OBF=1) */
 794         if (!acpi_ec_submit_flushable_request(ec)) {
 795                 ret = -EINVAL;
 796                 goto unlock;
 797         }
 798         ec_dbg_ref(ec, "Increase command");
 799         /* following two actions should be kept atomic */
 800         ec->curr = t;
 801         ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
 802         start_transaction(ec);
 803         spin_unlock_irqrestore(&ec->lock, tmp);
 804 
 805         ret = ec_poll(ec);
 806 
 807         spin_lock_irqsave(&ec->lock, tmp);
 808         if (t->irq_count == ec_storm_threshold)
 809                 acpi_ec_unmask_gpe(ec);
 810         ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
 811         ec->curr = NULL;
 812         /* Disable GPE for command processing (IBF=0/OBF=1) */
 813         acpi_ec_complete_request(ec);
 814         ec_dbg_ref(ec, "Decrease command");
 815 unlock:
 816         spin_unlock_irqrestore(&ec->lock, tmp);
 817         return ret;
 818 }
 819 
 820 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
 821 {
 822         int status;
 823         u32 glk;
 824 
 825         if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
 826                 return -EINVAL;
 827         if (t->rdata)
 828                 memset(t->rdata, 0, t->rlen);
 829 
 830         mutex_lock(&ec->mutex);
 831         if (ec->global_lock) {
 832                 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
 833                 if (ACPI_FAILURE(status)) {
 834                         status = -ENODEV;
 835                         goto unlock;
 836                 }
 837         }
 838 
 839         status = acpi_ec_transaction_unlocked(ec, t);
 840 
 841         if (ec->global_lock)
 842                 acpi_release_global_lock(glk);
 843 unlock:
 844         mutex_unlock(&ec->mutex);
 845         return status;
 846 }
 847 
 848 static int acpi_ec_burst_enable(struct acpi_ec *ec)
 849 {
 850         u8 d;
 851         struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
 852                                 .wdata = NULL, .rdata = &d,
 853                                 .wlen = 0, .rlen = 1};
 854 
 855         return acpi_ec_transaction(ec, &t);
 856 }
 857 
 858 static int acpi_ec_burst_disable(struct acpi_ec *ec)
 859 {
 860         struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
 861                                 .wdata = NULL, .rdata = NULL,
 862                                 .wlen = 0, .rlen = 0};
 863 
 864         return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
 865                                 acpi_ec_transaction(ec, &t) : 0;
 866 }
 867 
 868 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
 869 {
 870         int result;
 871         u8 d;
 872         struct transaction t = {.command = ACPI_EC_COMMAND_READ,
 873                                 .wdata = &address, .rdata = &d,
 874                                 .wlen = 1, .rlen = 1};
 875 
 876         result = acpi_ec_transaction(ec, &t);
 877         *data = d;
 878         return result;
 879 }
 880 
 881 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
 882 {
 883         u8 wdata[2] = { address, data };
 884         struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
 885                                 .wdata = wdata, .rdata = NULL,
 886                                 .wlen = 2, .rlen = 0};
 887 
 888         return acpi_ec_transaction(ec, &t);
 889 }
 890 
 891 int ec_read(u8 addr, u8 *val)
 892 {
 893         int err;
 894         u8 temp_data;
 895 
 896         if (!first_ec)
 897                 return -ENODEV;
 898 
 899         err = acpi_ec_read(first_ec, addr, &temp_data);
 900 
 901         if (!err) {
 902                 *val = temp_data;
 903                 return 0;
 904         }
 905         return err;
 906 }
 907 EXPORT_SYMBOL(ec_read);
 908 
 909 int ec_write(u8 addr, u8 val)
 910 {
 911         int err;
 912 
 913         if (!first_ec)
 914                 return -ENODEV;
 915 
 916         err = acpi_ec_write(first_ec, addr, val);
 917 
 918         return err;
 919 }
 920 EXPORT_SYMBOL(ec_write);
 921 
 922 int ec_transaction(u8 command,
 923                    const u8 *wdata, unsigned wdata_len,
 924                    u8 *rdata, unsigned rdata_len)
 925 {
 926         struct transaction t = {.command = command,
 927                                 .wdata = wdata, .rdata = rdata,
 928                                 .wlen = wdata_len, .rlen = rdata_len};
 929 
 930         if (!first_ec)
 931                 return -ENODEV;
 932 
 933         return acpi_ec_transaction(first_ec, &t);
 934 }
 935 EXPORT_SYMBOL(ec_transaction);
 936 
 937 /* Get the handle to the EC device */
 938 acpi_handle ec_get_handle(void)
 939 {
 940         if (!first_ec)
 941                 return NULL;
 942         return first_ec->handle;
 943 }
 944 EXPORT_SYMBOL(ec_get_handle);
 945 
 946 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
 947 {
 948         unsigned long flags;
 949 
 950         spin_lock_irqsave(&ec->lock, flags);
 951         if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
 952                 ec_dbg_drv("Starting EC");
 953                 /* Enable GPE for event processing (SCI_EVT=1) */
 954                 if (!resuming) {
 955                         acpi_ec_submit_request(ec);
 956                         ec_dbg_ref(ec, "Increase driver");
 957                 }
 958                 ec_log_drv("EC started");
 959         }
 960         spin_unlock_irqrestore(&ec->lock, flags);
 961 }
 962 
 963 static bool acpi_ec_stopped(struct acpi_ec *ec)
 964 {
 965         unsigned long flags;
 966         bool flushed;
 967 
 968         spin_lock_irqsave(&ec->lock, flags);
 969         flushed = acpi_ec_flushed(ec);
 970         spin_unlock_irqrestore(&ec->lock, flags);
 971         return flushed;
 972 }
 973 
 974 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
 975 {
 976         unsigned long flags;
 977 
 978         spin_lock_irqsave(&ec->lock, flags);
 979         if (acpi_ec_started(ec)) {
 980                 ec_dbg_drv("Stopping EC");
 981                 set_bit(EC_FLAGS_STOPPED, &ec->flags);
 982                 spin_unlock_irqrestore(&ec->lock, flags);
 983                 wait_event(ec->wait, acpi_ec_stopped(ec));
 984                 spin_lock_irqsave(&ec->lock, flags);
 985                 /* Disable GPE for event processing (SCI_EVT=1) */
 986                 if (!suspending) {
 987                         acpi_ec_complete_request(ec);
 988                         ec_dbg_ref(ec, "Decrease driver");
 989                 } else if (!ec_freeze_events)
 990                         __acpi_ec_disable_event(ec);
 991                 clear_bit(EC_FLAGS_STARTED, &ec->flags);
 992                 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
 993                 ec_log_drv("EC stopped");
 994         }
 995         spin_unlock_irqrestore(&ec->lock, flags);
 996 }
 997 
 998 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
 999 {
1000         unsigned long flags;
1001 
1002         spin_lock_irqsave(&ec->lock, flags);
1003         ec->busy_polling = true;
1004         ec->polling_guard = 0;
1005         ec_log_drv("interrupt blocked");
1006         spin_unlock_irqrestore(&ec->lock, flags);
1007 }
1008 
1009 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
1010 {
1011         unsigned long flags;
1012 
1013         spin_lock_irqsave(&ec->lock, flags);
1014         ec->busy_polling = ec_busy_polling;
1015         ec->polling_guard = ec_polling_guard;
1016         ec_log_drv("interrupt unblocked");
1017         spin_unlock_irqrestore(&ec->lock, flags);
1018 }
1019 
1020 void acpi_ec_block_transactions(void)
1021 {
1022         struct acpi_ec *ec = first_ec;
1023 
1024         if (!ec)
1025                 return;
1026 
1027         mutex_lock(&ec->mutex);
1028         /* Prevent transactions from being carried out */
1029         acpi_ec_stop(ec, true);
1030         mutex_unlock(&ec->mutex);
1031 }
1032 
1033 void acpi_ec_unblock_transactions(void)
1034 {
1035         /*
1036          * Allow transactions to happen again (this function is called from
1037          * atomic context during wakeup, so we don't need to acquire the mutex).
1038          */
1039         if (first_ec)
1040                 acpi_ec_start(first_ec, true);
1041 }
1042 
1043 /* --------------------------------------------------------------------------
1044                                 Event Management
1045    -------------------------------------------------------------------------- */
1046 static struct acpi_ec_query_handler *
1047 acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
1048 {
1049         if (handler)
1050                 kref_get(&handler->kref);
1051         return handler;
1052 }
1053 
1054 static struct acpi_ec_query_handler *
1055 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1056 {
1057         struct acpi_ec_query_handler *handler;
1058         bool found = false;
1059 
1060         mutex_lock(&ec->mutex);
1061         list_for_each_entry(handler, &ec->list, node) {
1062                 if (value == handler->query_bit) {
1063                         found = true;
1064                         break;
1065                 }
1066         }
1067         mutex_unlock(&ec->mutex);
1068         return found ? acpi_ec_get_query_handler(handler) : NULL;
1069 }
1070 
1071 static void acpi_ec_query_handler_release(struct kref *kref)
1072 {
1073         struct acpi_ec_query_handler *handler =
1074                 container_of(kref, struct acpi_ec_query_handler, kref);
1075 
1076         kfree(handler);
1077 }
1078 
1079 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
1080 {
1081         kref_put(&handler->kref, acpi_ec_query_handler_release);
1082 }
1083 
1084 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1085                               acpi_handle handle, acpi_ec_query_func func,
1086                               void *data)
1087 {
1088         struct acpi_ec_query_handler *handler =
1089             kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
1090 
1091         if (!handler)
1092                 return -ENOMEM;
1093 
1094         handler->query_bit = query_bit;
1095         handler->handle = handle;
1096         handler->func = func;
1097         handler->data = data;
1098         mutex_lock(&ec->mutex);
1099         kref_init(&handler->kref);
1100         list_add(&handler->node, &ec->list);
1101         mutex_unlock(&ec->mutex);
1102         return 0;
1103 }
1104 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1105 
1106 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1107                                           bool remove_all, u8 query_bit)
1108 {
1109         struct acpi_ec_query_handler *handler, *tmp;
1110         LIST_HEAD(free_list);
1111 
1112         mutex_lock(&ec->mutex);
1113         list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1114                 if (remove_all || query_bit == handler->query_bit) {
1115                         list_del_init(&handler->node);
1116                         list_add(&handler->node, &free_list);
1117                 }
1118         }
1119         mutex_unlock(&ec->mutex);
1120         list_for_each_entry_safe(handler, tmp, &free_list, node)
1121                 acpi_ec_put_query_handler(handler);
1122 }
1123 
1124 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1125 {
1126         acpi_ec_remove_query_handlers(ec, false, query_bit);
1127 }
1128 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1129 
1130 static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
1131 {
1132         struct acpi_ec_query *q;
1133         struct transaction *t;
1134 
1135         q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1136         if (!q)
1137                 return NULL;
1138         INIT_WORK(&q->work, acpi_ec_event_processor);
1139         t = &q->transaction;
1140         t->command = ACPI_EC_COMMAND_QUERY;
1141         t->rdata = pval;
1142         t->rlen = 1;
1143         return q;
1144 }
1145 
1146 static void acpi_ec_delete_query(struct acpi_ec_query *q)
1147 {
1148         if (q) {
1149                 if (q->handler)
1150                         acpi_ec_put_query_handler(q->handler);
1151                 kfree(q);
1152         }
1153 }
1154 
1155 static void acpi_ec_event_processor(struct work_struct *work)
1156 {
1157         struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1158         struct acpi_ec_query_handler *handler = q->handler;
1159 
1160         ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1161         if (handler->func)
1162                 handler->func(handler->data);
1163         else if (handler->handle)
1164                 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1165         ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1166         acpi_ec_delete_query(q);
1167 }
1168 
1169 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1170 {
1171         u8 value = 0;
1172         int result;
1173         struct acpi_ec_query *q;
1174 
1175         q = acpi_ec_create_query(&value);
1176         if (!q)
1177                 return -ENOMEM;
1178 
1179         /*
1180          * Query the EC to find out which _Qxx method we need to evaluate.
1181          * Note that successful completion of the query causes the ACPI_EC_SCI
1182          * bit to be cleared (and thus clearing the interrupt source).
1183          */
1184         result = acpi_ec_transaction(ec, &q->transaction);
1185         if (!value)
1186                 result = -ENODATA;
1187         if (result)
1188                 goto err_exit;
1189 
1190         q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1191         if (!q->handler) {
1192                 result = -ENODATA;
1193                 goto err_exit;
1194         }
1195 
1196         /*
1197          * It is reported that _Qxx are evaluated in a parallel way on
1198          * Windows:
1199          * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1200          *
1201          * Put this log entry before schedule_work() in order to make
1202          * it appearing before any other log entries occurred during the
1203          * work queue execution.
1204          */
1205         ec_dbg_evt("Query(0x%02x) scheduled", value);
1206         if (!queue_work(ec_query_wq, &q->work)) {
1207                 ec_dbg_evt("Query(0x%02x) overlapped", value);
1208                 result = -EBUSY;
1209         }
1210 
1211 err_exit:
1212         if (result)
1213                 acpi_ec_delete_query(q);
1214         if (data)
1215                 *data = value;
1216         return result;
1217 }
1218 
1219 static void acpi_ec_check_event(struct acpi_ec *ec)
1220 {
1221         unsigned long flags;
1222 
1223         if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1224                 if (ec_guard(ec)) {
1225                         spin_lock_irqsave(&ec->lock, flags);
1226                         /*
1227                          * Take care of the SCI_EVT unless no one else is
1228                          * taking care of it.
1229                          */
1230                         if (!ec->curr)
1231                                 advance_transaction(ec);
1232                         spin_unlock_irqrestore(&ec->lock, flags);
1233                 }
1234         }
1235 }
1236 
1237 static void acpi_ec_event_handler(struct work_struct *work)
1238 {
1239         unsigned long flags;
1240         struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1241 
1242         ec_dbg_evt("Event started");
1243 
1244         spin_lock_irqsave(&ec->lock, flags);
1245         while (ec->nr_pending_queries) {
1246                 spin_unlock_irqrestore(&ec->lock, flags);
1247                 (void)acpi_ec_query(ec, NULL);
1248                 spin_lock_irqsave(&ec->lock, flags);
1249                 ec->nr_pending_queries--;
1250                 /*
1251                  * Before exit, make sure that this work item can be
1252                  * scheduled again. There might be QR_EC failures, leaving
1253                  * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1254                  * item from being scheduled again.
1255                  */
1256                 if (!ec->nr_pending_queries) {
1257                         if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
1258                             ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
1259                                 acpi_ec_complete_query(ec);
1260                 }
1261         }
1262         spin_unlock_irqrestore(&ec->lock, flags);
1263 
1264         ec_dbg_evt("Event stopped");
1265 
1266         acpi_ec_check_event(ec);
1267 }
1268 
1269 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1270         u32 gpe_number, void *data)
1271 {
1272         unsigned long flags;
1273         struct acpi_ec *ec = data;
1274 
1275         spin_lock_irqsave(&ec->lock, flags);
1276         advance_transaction(ec);
1277         spin_unlock_irqrestore(&ec->lock, flags);
1278         return ACPI_INTERRUPT_HANDLED;
1279 }
1280 
1281 /* --------------------------------------------------------------------------
1282  *                           Address Space Management
1283  * -------------------------------------------------------------------------- */
1284 
1285 static acpi_status
1286 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1287                       u32 bits, u64 *value64,
1288                       void *handler_context, void *region_context)
1289 {
1290         struct acpi_ec *ec = handler_context;
1291         int result = 0, i, bytes = bits / 8;
1292         u8 *value = (u8 *)value64;
1293 
1294         if ((address > 0xFF) || !value || !handler_context)
1295                 return AE_BAD_PARAMETER;
1296 
1297         if (function != ACPI_READ && function != ACPI_WRITE)
1298                 return AE_BAD_PARAMETER;
1299 
1300         if (ec->busy_polling || bits > 8)
1301                 acpi_ec_burst_enable(ec);
1302 
1303         for (i = 0; i < bytes; ++i, ++address, ++value)
1304                 result = (function == ACPI_READ) ?
1305                         acpi_ec_read(ec, address, value) :
1306                         acpi_ec_write(ec, address, *value);
1307 
1308         if (ec->busy_polling || bits > 8)
1309                 acpi_ec_burst_disable(ec);
1310 
1311         switch (result) {
1312         case -EINVAL:
1313                 return AE_BAD_PARAMETER;
1314         case -ENODEV:
1315                 return AE_NOT_FOUND;
1316         case -ETIME:
1317                 return AE_TIME;
1318         default:
1319                 return AE_OK;
1320         }
1321 }
1322 
1323 /* --------------------------------------------------------------------------
1324  *                             Driver Interface
1325  * -------------------------------------------------------------------------- */
1326 
1327 static acpi_status
1328 ec_parse_io_ports(struct acpi_resource *resource, void *context);
1329 
1330 static void acpi_ec_free(struct acpi_ec *ec)
1331 {
1332         if (first_ec == ec)
1333                 first_ec = NULL;
1334         if (boot_ec == ec)
1335                 boot_ec = NULL;
1336         kfree(ec);
1337 }
1338 
1339 static struct acpi_ec *acpi_ec_alloc(void)
1340 {
1341         struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1342 
1343         if (!ec)
1344                 return NULL;
1345         mutex_init(&ec->mutex);
1346         init_waitqueue_head(&ec->wait);
1347         INIT_LIST_HEAD(&ec->list);
1348         spin_lock_init(&ec->lock);
1349         INIT_WORK(&ec->work, acpi_ec_event_handler);
1350         ec->timestamp = jiffies;
1351         ec->busy_polling = true;
1352         ec->polling_guard = 0;
1353         return ec;
1354 }
1355 
1356 static acpi_status
1357 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1358                                void *context, void **return_value)
1359 {
1360         char node_name[5];
1361         struct acpi_buffer buffer = { sizeof(node_name), node_name };
1362         struct acpi_ec *ec = context;
1363         int value = 0;
1364         acpi_status status;
1365 
1366         status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1367 
1368         if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1369                 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1370         return AE_OK;
1371 }
1372 
1373 static acpi_status
1374 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1375 {
1376         acpi_status status;
1377         unsigned long long tmp = 0;
1378         struct acpi_ec *ec = context;
1379 
1380         /* clear addr values, ec_parse_io_ports depend on it */
1381         ec->command_addr = ec->data_addr = 0;
1382 
1383         status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1384                                      ec_parse_io_ports, ec);
1385         if (ACPI_FAILURE(status))
1386                 return status;
1387         if (ec->data_addr == 0 || ec->command_addr == 0)
1388                 return AE_OK;
1389 
1390         if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
1391                 /*
1392                  * Always inherit the GPE number setting from the ECDT
1393                  * EC.
1394                  */
1395                 ec->gpe = boot_ec->gpe;
1396         } else {
1397                 /* Get GPE bit assignment (EC events). */
1398                 /* TODO: Add support for _GPE returning a package */
1399                 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1400                 if (ACPI_FAILURE(status))
1401                         return status;
1402                 ec->gpe = tmp;
1403         }
1404         /* Use the global lock for all EC transactions? */
1405         tmp = 0;
1406         acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1407         ec->global_lock = tmp;
1408         ec->handle = handle;
1409         return AE_CTRL_TERMINATE;
1410 }
1411 
1412 /*
1413  * Note: This function returns an error code only when the address space
1414  *       handler is not installed, which means "not able to handle
1415  *       transactions".
1416  */
1417 static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
1418 {
1419         acpi_status status;
1420 
1421         acpi_ec_start(ec, false);
1422 
1423         if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1424                 acpi_ec_enter_noirq(ec);
1425                 status = acpi_install_address_space_handler(ec->handle,
1426                                                             ACPI_ADR_SPACE_EC,
1427                                                             &acpi_ec_space_handler,
1428                                                             NULL, ec);
1429                 if (ACPI_FAILURE(status)) {
1430                         if (status == AE_NOT_FOUND) {
1431                                 /*
1432                                  * Maybe OS fails in evaluating the _REG
1433                                  * object. The AE_NOT_FOUND error will be
1434                                  * ignored and OS * continue to initialize
1435                                  * EC.
1436                                  */
1437                                 pr_err("Fail in evaluating the _REG object"
1438                                         " of EC device. Broken bios is suspected.\n");
1439                         } else {
1440                                 acpi_ec_stop(ec, false);
1441                                 return -ENODEV;
1442                         }
1443                 }
1444                 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1445         }
1446 
1447         if (!handle_events)
1448                 return 0;
1449 
1450         if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
1451                 /* Find and register all query methods */
1452                 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1453                                     acpi_ec_register_query_methods,
1454                                     NULL, ec, NULL);
1455                 set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
1456         }
1457         if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
1458                 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1459                                           ACPI_GPE_EDGE_TRIGGERED,
1460                                           &acpi_ec_gpe_handler, ec);
1461                 /* This is not fatal as we can poll EC events */
1462                 if (ACPI_SUCCESS(status)) {
1463                         set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
1464                         acpi_ec_leave_noirq(ec);
1465                         if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1466                             ec->reference_count >= 1)
1467                                 acpi_ec_enable_gpe(ec, true);
1468                 }
1469         }
1470         /* EC is fully operational, allow queries */
1471         acpi_ec_enable_event(ec);
1472 
1473         return 0;
1474 }
1475 
1476 static void ec_remove_handlers(struct acpi_ec *ec)
1477 {
1478         if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1479                 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1480                                         ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1481                         pr_err("failed to remove space handler\n");
1482                 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1483         }
1484 
1485         /*
1486          * Stops handling the EC transactions after removing the operation
1487          * region handler. This is required because _REG(DISCONNECT)
1488          * invoked during the removal can result in new EC transactions.
1489          *
1490          * Flushes the EC requests and thus disables the GPE before
1491          * removing the GPE handler. This is required by the current ACPICA
1492          * GPE core. ACPICA GPE core will automatically disable a GPE when
1493          * it is indicated but there is no way to handle it. So the drivers
1494          * must disable the GPEs prior to removing the GPE handlers.
1495          */
1496         acpi_ec_stop(ec, false);
1497 
1498         if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
1499                 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1500                                         &acpi_ec_gpe_handler)))
1501                         pr_err("failed to remove gpe handler\n");
1502                 clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
1503         }
1504         if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
1505                 acpi_ec_remove_query_handlers(ec, true, 0);
1506                 clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
1507         }
1508 }
1509 
1510 static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
1511 {
1512         int ret;
1513 
1514         ret = ec_install_handlers(ec, handle_events);
1515         if (ret)
1516                 return ret;
1517 
1518         /* First EC capable of handling transactions */
1519         if (!first_ec) {
1520                 first_ec = ec;
1521                 acpi_handle_info(first_ec->handle, "Used as first EC\n");
1522         }
1523 
1524         acpi_handle_info(ec->handle,
1525                          "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1526                          ec->gpe, ec->command_addr, ec->data_addr);
1527         return ret;
1528 }
1529 
1530 static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
1531 {
1532         struct acpi_table_ecdt *ecdt_ptr;
1533         acpi_status status;
1534         acpi_handle handle;
1535 
1536         status = acpi_get_table(ACPI_SIG_ECDT, 1,
1537                                 (struct acpi_table_header **)&ecdt_ptr);
1538         if (ACPI_FAILURE(status))
1539                 return false;
1540 
1541         status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
1542         if (ACPI_FAILURE(status))
1543                 return false;
1544 
1545         *phandle = handle;
1546         return true;
1547 }
1548 
1549 static int acpi_ec_add(struct acpi_device *device)
1550 {
1551         struct acpi_ec *ec = NULL;
1552         bool dep_update = true;
1553         acpi_status status;
1554         int ret;
1555 
1556         strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1557         strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1558 
1559         if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
1560                 boot_ec_is_ecdt = true;
1561                 ec = boot_ec;
1562                 dep_update = false;
1563         } else {
1564                 ec = acpi_ec_alloc();
1565                 if (!ec)
1566                         return -ENOMEM;
1567 
1568                 status = ec_parse_device(device->handle, 0, ec, NULL);
1569                 if (status != AE_CTRL_TERMINATE) {
1570                         ret = -EINVAL;
1571                         goto err_alloc;
1572                 }
1573 
1574                 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1575                     ec->data_addr == boot_ec->data_addr) {
1576                         boot_ec_is_ecdt = false;
1577                         /*
1578                          * Trust PNP0C09 namespace location rather than
1579                          * ECDT ID. But trust ECDT GPE rather than _GPE
1580                          * because of ASUS quirks, so do not change
1581                          * boot_ec->gpe to ec->gpe.
1582                          */
1583                         boot_ec->handle = ec->handle;
1584                         acpi_handle_debug(ec->handle, "duplicated.\n");
1585                         acpi_ec_free(ec);
1586                         ec = boot_ec;
1587                 }
1588         }
1589 
1590         ret = acpi_ec_setup(ec, true);
1591         if (ret)
1592                 goto err_query;
1593 
1594         if (ec == boot_ec)
1595                 acpi_handle_info(boot_ec->handle,
1596                                  "Boot %s EC used to handle transactions and events\n",
1597                                  boot_ec_is_ecdt ? "ECDT" : "DSDT");
1598 
1599         device->driver_data = ec;
1600 
1601         ret = !!request_region(ec->data_addr, 1, "EC data");
1602         WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1603         ret = !!request_region(ec->command_addr, 1, "EC cmd");
1604         WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1605 
1606         if (dep_update) {
1607                 /* Reprobe devices depending on the EC */
1608                 acpi_walk_dep_device_list(ec->handle);
1609         }
1610         acpi_handle_debug(ec->handle, "enumerated.\n");
1611         return 0;
1612 
1613 err_query:
1614         if (ec != boot_ec)
1615                 acpi_ec_remove_query_handlers(ec, true, 0);
1616 err_alloc:
1617         if (ec != boot_ec)
1618                 acpi_ec_free(ec);
1619         return ret;
1620 }
1621 
1622 static int acpi_ec_remove(struct acpi_device *device)
1623 {
1624         struct acpi_ec *ec;
1625 
1626         if (!device)
1627                 return -EINVAL;
1628 
1629         ec = acpi_driver_data(device);
1630         release_region(ec->data_addr, 1);
1631         release_region(ec->command_addr, 1);
1632         device->driver_data = NULL;
1633         if (ec != boot_ec) {
1634                 ec_remove_handlers(ec);
1635                 acpi_ec_free(ec);
1636         }
1637         return 0;
1638 }
1639 
1640 static acpi_status
1641 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1642 {
1643         struct acpi_ec *ec = context;
1644 
1645         if (resource->type != ACPI_RESOURCE_TYPE_IO)
1646                 return AE_OK;
1647 
1648         /*
1649          * The first address region returned is the data port, and
1650          * the second address region returned is the status/command
1651          * port.
1652          */
1653         if (ec->data_addr == 0)
1654                 ec->data_addr = resource->data.io.minimum;
1655         else if (ec->command_addr == 0)
1656                 ec->command_addr = resource->data.io.minimum;
1657         else
1658                 return AE_CTRL_TERMINATE;
1659 
1660         return AE_OK;
1661 }
1662 
1663 static const struct acpi_device_id ec_device_ids[] = {
1664         {"PNP0C09", 0},
1665         {ACPI_ECDT_HID, 0},
1666         {"", 0},
1667 };
1668 
1669 /*
1670  * This function is not Windows-compatible as Windows never enumerates the
1671  * namespace EC before the main ACPI device enumeration process. It is
1672  * retained for historical reason and will be deprecated in the future.
1673  */
1674 void __init acpi_ec_dsdt_probe(void)
1675 {
1676         struct acpi_ec *ec;
1677         acpi_status status;
1678         int ret;
1679 
1680         /*
1681          * If a platform has ECDT, there is no need to proceed as the
1682          * following probe is not a part of the ACPI device enumeration,
1683          * executing _STA is not safe, and thus this probe may risk of
1684          * picking up an invalid EC device.
1685          */
1686         if (boot_ec)
1687                 return;
1688 
1689         ec = acpi_ec_alloc();
1690         if (!ec)
1691                 return;
1692 
1693         /*
1694          * At this point, the namespace is initialized, so start to find
1695          * the namespace objects.
1696          */
1697         status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1698         if (ACPI_FAILURE(status) || !ec->handle) {
1699                 acpi_ec_free(ec);
1700                 return;
1701         }
1702 
1703         /*
1704          * When the DSDT EC is available, always re-configure boot EC to
1705          * have _REG evaluated. _REG can only be evaluated after the
1706          * namespace initialization.
1707          * At this point, the GPE is not fully initialized, so do not to
1708          * handle the events.
1709          */
1710         ret = acpi_ec_setup(ec, false);
1711         if (ret) {
1712                 acpi_ec_free(ec);
1713                 return;
1714         }
1715 
1716         boot_ec = ec;
1717 
1718         acpi_handle_info(ec->handle,
1719                          "Boot DSDT EC used to handle transactions\n");
1720 }
1721 
1722 /*
1723  * If the DSDT EC is not functioning, we still need to prepare a fully
1724  * functioning ECDT EC first in order to handle the events.
1725  * https://bugzilla.kernel.org/show_bug.cgi?id=115021
1726  */
1727 static int __init acpi_ec_ecdt_start(void)
1728 {
1729         acpi_handle handle;
1730 
1731         if (!boot_ec)
1732                 return -ENODEV;
1733         /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
1734         if (!boot_ec_is_ecdt)
1735                 return -ENODEV;
1736 
1737         /*
1738          * At this point, the namespace and the GPE is initialized, so
1739          * start to find the namespace objects and handle the events.
1740          *
1741          * Note: ec->handle can be valid if this function is called after
1742          * acpi_ec_add(), hence the fast path.
1743          */
1744         if (boot_ec->handle == ACPI_ROOT_OBJECT) {
1745                 if (!acpi_ec_ecdt_get_handle(&handle))
1746                         return -ENODEV;
1747                 boot_ec->handle = handle;
1748         }
1749 
1750         /* Register to ACPI bus with PM ops attached */
1751         return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
1752 }
1753 
1754 #if 0
1755 /*
1756  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
1757  * set, for which case, we complete the QR_EC without issuing it to the
1758  * firmware.
1759  * https://bugzilla.kernel.org/show_bug.cgi?id=82611
1760  * https://bugzilla.kernel.org/show_bug.cgi?id=97381
1761  */
1762 static int ec_flag_query_handshake(const struct dmi_system_id *id)
1763 {
1764         pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1765         EC_FLAGS_QUERY_HANDSHAKE = 1;
1766         return 0;
1767 }
1768 #endif
1769 
1770 /*
1771  * On some hardware it is necessary to clear events accumulated by the EC during
1772  * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1773  * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1774  *
1775  * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1776  *
1777  * Ideally, the EC should also be instructed NOT to accumulate events during
1778  * sleep (which Windows seems to do somehow), but the interface to control this
1779  * behaviour is not known at this time.
1780  *
1781  * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1782  * however it is very likely that other Samsung models are affected.
1783  *
1784  * On systems which don't accumulate _Q events during sleep, this extra check
1785  * should be harmless.
1786  */
1787 static int ec_clear_on_resume(const struct dmi_system_id *id)
1788 {
1789         pr_debug("Detected system needing EC poll on resume.\n");
1790         EC_FLAGS_CLEAR_ON_RESUME = 1;
1791         ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1792         return 0;
1793 }
1794 
1795 /*
1796  * Some ECDTs contain wrong register addresses.
1797  * MSI MS-171F
1798  * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1799  */
1800 static int ec_correct_ecdt(const struct dmi_system_id *id)
1801 {
1802         pr_debug("Detected system needing ECDT address correction.\n");
1803         EC_FLAGS_CORRECT_ECDT = 1;
1804         return 0;
1805 }
1806 
1807 /*
1808  * Some DSDTs contain wrong GPE setting.
1809  * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
1810  * https://bugzilla.kernel.org/show_bug.cgi?id=195651
1811  */
1812 static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
1813 {
1814         pr_debug("Detected system needing ignore DSDT GPE setting.\n");
1815         EC_FLAGS_IGNORE_DSDT_GPE = 1;
1816         return 0;
1817 }
1818 
1819 static const struct dmi_system_id ec_dmi_table[] __initconst = {
1820         {
1821         ec_correct_ecdt, "MSI MS-171F", {
1822         DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1823         DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1824         {
1825         ec_honor_ecdt_gpe, "ASUS FX502VD", {
1826         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1827         DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
1828         {
1829         ec_honor_ecdt_gpe, "ASUS FX502VE", {
1830         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1831         DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
1832         {
1833         ec_honor_ecdt_gpe, "ASUS GL702VMK", {
1834         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1835         DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
1836         {
1837         ec_honor_ecdt_gpe, "ASUS X550VXK", {
1838         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1839         DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
1840         {
1841         ec_honor_ecdt_gpe, "ASUS X580VD", {
1842         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1843         DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
1844         {
1845         ec_clear_on_resume, "Samsung hardware", {
1846         DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1847         {},
1848 };
1849 
1850 void __init acpi_ec_ecdt_probe(void)
1851 {
1852         struct acpi_table_ecdt *ecdt_ptr;
1853         struct acpi_ec *ec;
1854         acpi_status status;
1855         int ret;
1856 
1857         /* Generate a boot ec context. */
1858         dmi_check_system(ec_dmi_table);
1859         status = acpi_get_table(ACPI_SIG_ECDT, 1,
1860                                 (struct acpi_table_header **)&ecdt_ptr);
1861         if (ACPI_FAILURE(status))
1862                 return;
1863 
1864         if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
1865                 /*
1866                  * Asus X50GL:
1867                  * https://bugzilla.kernel.org/show_bug.cgi?id=11880
1868                  */
1869                 return;
1870         }
1871 
1872         ec = acpi_ec_alloc();
1873         if (!ec)
1874                 return;
1875 
1876         if (EC_FLAGS_CORRECT_ECDT) {
1877                 ec->command_addr = ecdt_ptr->data.address;
1878                 ec->data_addr = ecdt_ptr->control.address;
1879         } else {
1880                 ec->command_addr = ecdt_ptr->control.address;
1881                 ec->data_addr = ecdt_ptr->data.address;
1882         }
1883         ec->gpe = ecdt_ptr->gpe;
1884         ec->handle = ACPI_ROOT_OBJECT;
1885 
1886         /*
1887          * At this point, the namespace is not initialized, so do not find
1888          * the namespace objects, or handle the events.
1889          */
1890         ret = acpi_ec_setup(ec, false);
1891         if (ret) {
1892                 acpi_ec_free(ec);
1893                 return;
1894         }
1895 
1896         boot_ec = ec;
1897         boot_ec_is_ecdt = true;
1898 
1899         pr_info("Boot ECDT EC used to handle transactions\n");
1900 }
1901 
1902 #ifdef CONFIG_PM_SLEEP
1903 static int acpi_ec_suspend(struct device *dev)
1904 {
1905         struct acpi_ec *ec =
1906                 acpi_driver_data(to_acpi_device(dev));
1907 
1908         if (!pm_suspend_no_platform() && ec_freeze_events)
1909                 acpi_ec_disable_event(ec);
1910         return 0;
1911 }
1912 
1913 static int acpi_ec_suspend_noirq(struct device *dev)
1914 {
1915         struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1916 
1917         /*
1918          * The SCI handler doesn't run at this point, so the GPE can be
1919          * masked at the low level without side effects.
1920          */
1921         if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1922             ec->reference_count >= 1)
1923                 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1924 
1925         acpi_ec_enter_noirq(ec);
1926 
1927         return 0;
1928 }
1929 
1930 static int acpi_ec_resume_noirq(struct device *dev)
1931 {
1932         struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1933 
1934         acpi_ec_leave_noirq(ec);
1935 
1936         if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1937             ec->reference_count >= 1)
1938                 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
1939 
1940         return 0;
1941 }
1942 
1943 static int acpi_ec_resume(struct device *dev)
1944 {
1945         struct acpi_ec *ec =
1946                 acpi_driver_data(to_acpi_device(dev));
1947 
1948         acpi_ec_enable_event(ec);
1949         return 0;
1950 }
1951 
1952 void acpi_ec_mark_gpe_for_wake(void)
1953 {
1954         if (first_ec && !ec_no_wakeup)
1955                 acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
1956 }
1957 EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
1958 
1959 void acpi_ec_set_gpe_wake_mask(u8 action)
1960 {
1961         if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
1962                 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
1963 }
1964 
1965 bool acpi_ec_dispatch_gpe(void)
1966 {
1967         u32 ret;
1968 
1969         if (!first_ec)
1970                 return acpi_any_gpe_status_set(U32_MAX);
1971 
1972         /*
1973          * Report wakeup if the status bit is set for any enabled GPE other
1974          * than the EC one.
1975          */
1976         if (acpi_any_gpe_status_set(first_ec->gpe))
1977                 return true;
1978 
1979         if (ec_no_wakeup)
1980                 return false;
1981 
1982         /*
1983          * Dispatch the EC GPE in-band, but do not report wakeup in any case
1984          * to allow the caller to process events properly after that.
1985          */
1986         ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
1987         if (ret == ACPI_INTERRUPT_HANDLED) {
1988                 pm_pr_dbg("EC GPE dispatched\n");
1989 
1990                 /* Flush the event and query workqueues. */
1991                 acpi_ec_flush_work();
1992         }
1993 
1994         return false;
1995 }
1996 #endif /* CONFIG_PM_SLEEP */
1997 
1998 static const struct dev_pm_ops acpi_ec_pm = {
1999         SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
2000         SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
2001 };
2002 
2003 static int param_set_event_clearing(const char *val,
2004                                     const struct kernel_param *kp)
2005 {
2006         int result = 0;
2007 
2008         if (!strncmp(val, "status", sizeof("status") - 1)) {
2009                 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
2010                 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
2011         } else if (!strncmp(val, "query", sizeof("query") - 1)) {
2012                 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
2013                 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
2014         } else if (!strncmp(val, "event", sizeof("event") - 1)) {
2015                 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
2016                 pr_info("Assuming SCI_EVT clearing on event reads\n");
2017         } else
2018                 result = -EINVAL;
2019         return result;
2020 }
2021 
2022 static int param_get_event_clearing(char *buffer,
2023                                     const struct kernel_param *kp)
2024 {
2025         switch (ec_event_clearing) {
2026         case ACPI_EC_EVT_TIMING_STATUS:
2027                 return sprintf(buffer, "status");
2028         case ACPI_EC_EVT_TIMING_QUERY:
2029                 return sprintf(buffer, "query");
2030         case ACPI_EC_EVT_TIMING_EVENT:
2031                 return sprintf(buffer, "event");
2032         default:
2033                 return sprintf(buffer, "invalid");
2034         }
2035         return 0;
2036 }
2037 
2038 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
2039                   NULL, 0644);
2040 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
2041 
2042 static struct acpi_driver acpi_ec_driver = {
2043         .name = "ec",
2044         .class = ACPI_EC_CLASS,
2045         .ids = ec_device_ids,
2046         .ops = {
2047                 .add = acpi_ec_add,
2048                 .remove = acpi_ec_remove,
2049                 },
2050         .drv.pm = &acpi_ec_pm,
2051 };
2052 
2053 static void acpi_ec_destroy_workqueues(void)
2054 {
2055         if (ec_wq) {
2056                 destroy_workqueue(ec_wq);
2057                 ec_wq = NULL;
2058         }
2059         if (ec_query_wq) {
2060                 destroy_workqueue(ec_query_wq);
2061                 ec_query_wq = NULL;
2062         }
2063 }
2064 
2065 static int acpi_ec_init_workqueues(void)
2066 {
2067         if (!ec_wq)
2068                 ec_wq = alloc_ordered_workqueue("kec", 0);
2069 
2070         if (!ec_query_wq)
2071                 ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
2072 
2073         if (!ec_wq || !ec_query_wq) {
2074                 acpi_ec_destroy_workqueues();
2075                 return -ENODEV;
2076         }
2077         return 0;
2078 }
2079 
2080 static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2081         {
2082                 .ident = "Thinkpad X1 Carbon 6th",
2083                 .matches = {
2084                         DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2085                         DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2086                 },
2087         },
2088         {
2089                 .ident = "ThinkPad X1 Carbon 6th",
2090                 .matches = {
2091                         DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2092                         DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
2093                 },
2094         },
2095         {
2096                 .ident = "ThinkPad X1 Yoga 3rd",
2097                 .matches = {
2098                         DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2099                         DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
2100                 },
2101         },
2102         { },
2103 };
2104 
2105 int __init acpi_ec_init(void)
2106 {
2107         int result;
2108         int ecdt_fail, dsdt_fail;
2109 
2110         result = acpi_ec_init_workqueues();
2111         if (result)
2112                 return result;
2113 
2114         /*
2115          * Disable EC wakeup on following systems to prevent periodic
2116          * wakeup from EC GPE.
2117          */
2118         if (dmi_check_system(acpi_ec_no_wakeup)) {
2119                 ec_no_wakeup = true;
2120                 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2121         }
2122 
2123         /* Drivers must be started after acpi_ec_query_init() */
2124         dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
2125         /*
2126          * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
2127          * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
2128          * settings but invalid DSDT settings.
2129          * https://bugzilla.kernel.org/show_bug.cgi?id=196847
2130          */
2131         ecdt_fail = acpi_ec_ecdt_start();
2132         return ecdt_fail && dsdt_fail ? -ENODEV : 0;
2133 }
2134 
2135 /* EC driver currently not unloadable */
2136 #if 0
2137 static void __exit acpi_ec_exit(void)
2138 {
2139 
2140         acpi_bus_unregister_driver(&acpi_ec_driver);
2141         acpi_ec_destroy_workqueues();
2142 }
2143 #endif  /* 0 */

/* [<][>][^][v][top][bottom][index][help] */