This source file includes following definitions.
- ufshcd_dump_regs
- ufs_get_pm_lvl_to_dev_pwr_mode
- ufs_get_pm_lvl_to_link_pwr_state
- ufs_get_desired_pm_lvl_for_dev_link_state
- ufshcd_valid_tag
- ufshcd_enable_irq
- ufshcd_disable_irq
- ufshcd_scsi_unblock_requests
- ufshcd_scsi_block_requests
- ufshcd_add_cmd_upiu_trace
- ufshcd_add_query_upiu_trace
- ufshcd_add_tm_upiu_trace
- ufshcd_add_command_trace
- ufshcd_print_clk_freqs
- ufshcd_print_err_hist
- ufshcd_print_host_regs
- ufshcd_print_trs
- ufshcd_print_tmrs
- ufshcd_print_host_state
- ufshcd_print_pwr_info
- ufshcd_wait_for_register
- ufshcd_get_intr_mask
- ufshcd_get_ufs_version
- ufshcd_is_device_present
- ufshcd_get_tr_ocs
- ufshcd_get_tm_free_slot
- ufshcd_put_tm_slot
- ufshcd_utrl_clear
- ufshcd_utmrl_clear
- ufshcd_outstanding_req_clear
- ufshcd_get_lists_status
- ufshcd_get_uic_cmd_result
- ufshcd_get_dme_attr_val
- ufshcd_get_req_rsp
- ufshcd_get_rsp_upiu_result
- ufshcd_get_rsp_upiu_data_seg_len
- ufshcd_is_exception_event
- ufshcd_reset_intr_aggr
- ufshcd_config_intr_aggr
- ufshcd_disable_intr_aggr
- ufshcd_enable_run_stop_reg
- ufshcd_hba_start
- ufshcd_is_hba_active
- ufshcd_get_local_unipro_ver
- ufshcd_is_unipro_pa_params_tuning_req
- ufshcd_scale_clks
- ufshcd_is_devfreq_scaling_required
- ufshcd_wait_for_doorbell_clr
- ufshcd_scale_gear
- ufshcd_clock_scaling_prepare
- ufshcd_clock_scaling_unprepare
- ufshcd_devfreq_scale
- ufshcd_clk_scaling_suspend_work
- ufshcd_clk_scaling_resume_work
- ufshcd_devfreq_target
- ufshcd_devfreq_get_dev_status
- ufshcd_devfreq_init
- ufshcd_devfreq_remove
- __ufshcd_suspend_clkscaling
- ufshcd_suspend_clkscaling
- ufshcd_resume_clkscaling
- ufshcd_clkscale_enable_show
- ufshcd_clkscale_enable_store
- ufshcd_clkscaling_init_sysfs
- ufshcd_ungate_work
- ufshcd_hold
- ufshcd_gate_work
- __ufshcd_release
- ufshcd_release
- ufshcd_clkgate_delay_show
- ufshcd_clkgate_delay_store
- ufshcd_clkgate_enable_show
- ufshcd_clkgate_enable_store
- ufshcd_init_clk_scaling
- ufshcd_exit_clk_scaling
- ufshcd_init_clk_gating
- ufshcd_exit_clk_gating
- ufshcd_clk_scaling_start_busy
- ufshcd_clk_scaling_update_busy
- ufshcd_send_command
- ufshcd_copy_sense_data
- ufshcd_copy_query_response
- ufshcd_hba_capabilities
- ufshcd_ready_for_uic_cmd
- ufshcd_get_upmcrs
- ufshcd_dispatch_uic_cmd
- ufshcd_wait_for_uic_cmd
- __ufshcd_send_uic_cmd
- ufshcd_send_uic_cmd
- ufshcd_map_sg
- ufshcd_enable_intr
- ufshcd_disable_intr
- ufshcd_prepare_req_desc_hdr
- ufshcd_prepare_utp_scsi_cmd_upiu
- ufshcd_prepare_utp_query_req_upiu
- ufshcd_prepare_utp_nop_upiu
- ufshcd_comp_devman_upiu
- ufshcd_comp_scsi_upiu
- ufshcd_upiu_wlun_to_scsi_wlun
- ufshcd_queuecommand
- ufshcd_compose_dev_cmd
- ufshcd_clear_cmd
- ufshcd_check_query_response
- ufshcd_dev_cmd_completion
- ufshcd_wait_for_dev_cmd
- ufshcd_get_dev_cmd_tag
- ufshcd_put_dev_cmd_tag
- ufshcd_exec_dev_cmd
- ufshcd_init_query
- ufshcd_query_flag_retry
- ufshcd_query_flag
- ufshcd_query_attr
- ufshcd_query_attr_retry
- __ufshcd_query_descriptor
- ufshcd_query_descriptor_retry
- ufshcd_read_desc_length
- ufshcd_map_desc_id_to_length
- ufshcd_read_desc_param
- ufshcd_read_desc
- ufshcd_read_power_desc
- ufshcd_read_device_desc
- ufshcd_remove_non_printable
- ufshcd_read_string_desc
- ufshcd_read_unit_desc_param
- ufshcd_memory_alloc
- ufshcd_host_memory_configure
- ufshcd_dme_link_startup
- ufshcd_dme_reset
- ufshcd_dme_enable
- ufshcd_add_delay_before_dme_cmd
- ufshcd_dme_set_attr
- ufshcd_dme_get_attr
- ufshcd_uic_pwr_ctrl
- ufshcd_uic_change_pwr_mode
- ufshcd_link_recovery
- __ufshcd_uic_hibern8_enter
- ufshcd_uic_hibern8_enter
- ufshcd_uic_hibern8_exit
- ufshcd_auto_hibern8_enable
- ufshcd_init_pwr_info
- ufshcd_get_max_pwr_mode
- ufshcd_change_power_mode
- ufshcd_config_pwr_mode
- ufshcd_complete_dev_init
- ufshcd_make_hba_operational
- ufshcd_hba_stop
- ufshcd_hba_execute_hce
- ufshcd_hba_enable
- ufshcd_disable_tx_lcc
- ufshcd_disable_device_tx_lcc
- ufshcd_update_reg_hist
- ufshcd_link_startup
- ufshcd_verify_dev_init
- ufshcd_set_queue_depth
- ufshcd_get_lu_wp
- ufshcd_get_lu_power_on_wp_status
- ufshcd_slave_alloc
- ufshcd_change_queue_depth
- ufshcd_slave_configure
- ufshcd_slave_destroy
- ufshcd_scsi_cmd_status
- ufshcd_transfer_rsp_status
- ufshcd_uic_cmd_compl
- __ufshcd_transfer_req_compl
- ufshcd_transfer_req_compl
- ufshcd_disable_ee
- ufshcd_enable_ee
- ufshcd_enable_auto_bkops
- ufshcd_disable_auto_bkops
- ufshcd_force_reset_auto_bkops
- ufshcd_get_bkops_status
- ufshcd_bkops_ctrl
- ufshcd_urgent_bkops
- ufshcd_get_ee_status
- ufshcd_bkops_exception_event_handler
- ufshcd_exception_event_handler
- ufshcd_complete_requests
- ufshcd_quirk_dl_nac_errors
- ufshcd_err_handler
- ufshcd_update_uic_error
- ufshcd_is_auto_hibern8_error
- ufshcd_check_errors
- ufshcd_tmc_handler
- ufshcd_sl_intr
- ufshcd_intr
- ufshcd_clear_tm_cmd
- __ufshcd_issue_tm_cmd
- ufshcd_issue_tm_cmd
- ufshcd_issue_devman_upiu_cmd
- ufshcd_exec_raw_upiu_cmd
- ufshcd_eh_device_reset_handler
- ufshcd_set_req_abort_skip
- ufshcd_abort
- ufshcd_host_reset_and_restore
- ufshcd_reset_and_restore
- ufshcd_eh_host_reset_handler
- ufshcd_get_max_icc_level
- ufshcd_find_max_sup_active_icc_level
- ufshcd_init_icc_levels
- ufshcd_scsi_add_wlus
- ufs_get_device_desc
- ufs_put_device_desc
- ufs_fixup_device_setup
- ufshcd_tune_pa_tactivate
- ufshcd_tune_pa_hibern8time
- ufshcd_quirk_tune_host_pa_tactivate
- ufshcd_tune_unipro_params
- ufshcd_clear_dbg_ufs_stats
- ufshcd_init_desc_sizes
- ufs_get_bref_clk_from_hz
- ufshcd_parse_dev_ref_clk_freq
- ufshcd_set_dev_ref_clk
- ufshcd_probe_hba
- ufshcd_async_scan
- ufshcd_eh_timed_out
- ufshcd_config_vreg_load
- ufshcd_config_vreg_lpm
- ufshcd_config_vreg_hpm
- ufshcd_config_vreg
- ufshcd_enable_vreg
- ufshcd_disable_vreg
- ufshcd_setup_vreg
- ufshcd_setup_hba_vreg
- ufshcd_get_vreg
- ufshcd_init_vreg
- ufshcd_init_hba_vreg
- __ufshcd_setup_clocks
- ufshcd_setup_clocks
- ufshcd_init_clocks
- ufshcd_variant_hba_init
- ufshcd_variant_hba_exit
- ufshcd_hba_init
- ufshcd_hba_exit
- ufshcd_send_request_sense
- ufshcd_set_dev_pwr_mode
- ufshcd_link_state_transition
- ufshcd_vreg_set_lpm
- ufshcd_vreg_set_hpm
- ufshcd_hba_vreg_set_lpm
- ufshcd_hba_vreg_set_hpm
- ufshcd_suspend
- ufshcd_resume
- ufshcd_system_suspend
- ufshcd_system_resume
- ufshcd_runtime_suspend
- ufshcd_runtime_resume
- ufshcd_runtime_idle
- ufshcd_shutdown
- ufshcd_remove
- ufshcd_dealloc_host
- ufshcd_set_dma_mask
- ufshcd_alloc_host
- ufshcd_init
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 
  32 
  33 
  34 
  35 
  36 
  37 
  38 
  39 
  40 #include <linux/async.h>
  41 #include <linux/devfreq.h>
  42 #include <linux/nls.h>
  43 #include <linux/of.h>
  44 #include <linux/bitfield.h>
  45 #include "ufshcd.h"
  46 #include "ufs_quirks.h"
  47 #include "unipro.h"
  48 #include "ufs-sysfs.h"
  49 #include "ufs_bsg.h"
  50 
  51 #define CREATE_TRACE_POINTS
  52 #include <trace/events/ufs.h>
  53 
  54 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
  55                                  UTP_TASK_REQ_COMPL |\
  56                                  UFSHCD_ERROR_MASK)
  57 
  58 #define UIC_CMD_TIMEOUT 500
  59 
  60 
  61 #define NOP_OUT_RETRIES    10
  62 
  63 #define NOP_OUT_TIMEOUT    30 
  64 
  65 
  66 #define QUERY_REQ_RETRIES 3
  67 
  68 #define QUERY_REQ_TIMEOUT 1500 
  69 
  70 
  71 #define TM_CMD_TIMEOUT  100 
  72 
  73 
  74 #define UFS_UIC_COMMAND_RETRIES 3
  75 
  76 
  77 #define DME_LINKSTARTUP_RETRIES 3
  78 
  79 
  80 #define UIC_HIBERN8_ENTER_RETRIES 3
  81 
  82 
  83 #define MAX_HOST_RESET_RETRIES 5
  84 
  85 
  86 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  87 
  88 
  89 #define INT_AGGR_DEF_TO 0x02
  90 
  91 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
  92         ({                                                              \
  93                 int _ret;                                               \
  94                 if (_on)                                                \
  95                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
  96                 else                                                    \
  97                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
  98                 _ret;                                                   \
  99         })
 100 
 101 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
 102         size_t __len = (len);                                            \
 103         print_hex_dump(KERN_ERR, prefix_str,                             \
 104                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
 105                        16, 4, buf, __len, false);                        \
 106 } while (0)
 107 
 108 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
 109                      const char *prefix)
 110 {
 111         u32 *regs;
 112         size_t pos;
 113 
 114         if (offset % 4 != 0 || len % 4 != 0) 
 115                 return -EINVAL;
 116 
 117         regs = kzalloc(len, GFP_KERNEL);
 118         if (!regs)
 119                 return -ENOMEM;
 120 
 121         for (pos = 0; pos < len; pos += 4)
 122                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
 123 
 124         ufshcd_hex_dump(prefix, regs, len);
 125         kfree(regs);
 126 
 127         return 0;
 128 }
 129 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
 130 
 131 enum {
 132         UFSHCD_MAX_CHANNEL      = 0,
 133         UFSHCD_MAX_ID           = 1,
 134         UFSHCD_CMD_PER_LUN      = 32,
 135         UFSHCD_CAN_QUEUE        = 32,
 136 };
 137 
 138 
 139 enum {
 140         UFSHCD_STATE_RESET,
 141         UFSHCD_STATE_ERROR,
 142         UFSHCD_STATE_OPERATIONAL,
 143         UFSHCD_STATE_EH_SCHEDULED,
 144 };
 145 
 146 
 147 enum {
 148         UFSHCD_EH_IN_PROGRESS = (1 << 0),
 149 };
 150 
 151 
 152 enum {
 153         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), 
 154         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), 
 155         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), 
 156         UFSHCD_UIC_NL_ERROR = (1 << 3), 
 157         UFSHCD_UIC_TL_ERROR = (1 << 4), 
 158         UFSHCD_UIC_DME_ERROR = (1 << 5), 
 159 };
 160 
 161 #define ufshcd_set_eh_in_progress(h) \
 162         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
 163 #define ufshcd_eh_in_progress(h) \
 164         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
 165 #define ufshcd_clear_eh_in_progress(h) \
 166         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
 167 
 168 #define ufshcd_set_ufs_dev_active(h) \
 169         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
 170 #define ufshcd_set_ufs_dev_sleep(h) \
 171         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
 172 #define ufshcd_set_ufs_dev_poweroff(h) \
 173         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
 174 #define ufshcd_is_ufs_dev_active(h) \
 175         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
 176 #define ufshcd_is_ufs_dev_sleep(h) \
 177         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
 178 #define ufshcd_is_ufs_dev_poweroff(h) \
 179         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
 180 
 181 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
 182         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
 183         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 184         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
 185         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 186         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 187         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
 188 };
 189 
 190 static inline enum ufs_dev_pwr_mode
 191 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
 192 {
 193         return ufs_pm_lvl_states[lvl].dev_state;
 194 }
 195 
 196 static inline enum uic_link_state
 197 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
 198 {
 199         return ufs_pm_lvl_states[lvl].link_state;
 200 }
 201 
 202 static inline enum ufs_pm_level
 203 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 204                                         enum uic_link_state link_state)
 205 {
 206         enum ufs_pm_level lvl;
 207 
 208         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
 209                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
 210                         (ufs_pm_lvl_states[lvl].link_state == link_state))
 211                         return lvl;
 212         }
 213 
 214         
 215         return UFS_PM_LVL_0;
 216 }
 217 
 218 static struct ufs_dev_fix ufs_fixups[] = {
 219         
 220         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 221                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 222         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 223                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 224         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 225                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 226         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 227                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 228         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
 229                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
 230         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
 231                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
 232         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
 233                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
 234         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" ,
 235                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
 236 
 237         END_FIX
 238 };
 239 
 240 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 241 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 242 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 243 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 244 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 245 static void ufshcd_hba_exit(struct ufs_hba *hba);
 246 static int ufshcd_probe_hba(struct ufs_hba *hba);
 247 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 248                                  bool skip_ref_clk);
 249 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 250 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 251 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 252 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 253 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 254 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 255 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 256 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 257 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 258 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 259 static int ufshcd_change_power_mode(struct ufs_hba *hba,
 260                              struct ufs_pa_layer_attr *pwr_mode);
 261 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 262 {
 263         return tag >= 0 && tag < hba->nutrs;
 264 }
 265 
 266 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
 267 {
 268         int ret = 0;
 269 
 270         if (!hba->is_irq_enabled) {
 271                 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
 272                                 hba);
 273                 if (ret)
 274                         dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
 275                                 __func__, ret);
 276                 hba->is_irq_enabled = true;
 277         }
 278 
 279         return ret;
 280 }
 281 
 282 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 283 {
 284         if (hba->is_irq_enabled) {
 285                 free_irq(hba->irq, hba);
 286                 hba->is_irq_enabled = false;
 287         }
 288 }
 289 
 290 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
 291 {
 292         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
 293                 scsi_unblock_requests(hba->host);
 294 }
 295 
 296 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
 297 {
 298         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
 299                 scsi_block_requests(hba->host);
 300 }
 301 
 302 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 303                 const char *str)
 304 {
 305         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
 306 
 307         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
 308 }
 309 
 310 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 311                 const char *str)
 312 {
 313         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
 314 
 315         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
 316 }
 317 
 318 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 319                 const char *str)
 320 {
 321         int off = (int)tag - hba->nutrs;
 322         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
 323 
 324         trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
 325                         &descp->input_param1);
 326 }
 327 
 328 static void ufshcd_add_command_trace(struct ufs_hba *hba,
 329                 unsigned int tag, const char *str)
 330 {
 331         sector_t lba = -1;
 332         u8 opcode = 0;
 333         u32 intr, doorbell;
 334         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
 335         int transfer_len = -1;
 336 
 337         if (!trace_ufshcd_command_enabled()) {
 338                 
 339                 if (lrbp->cmd)
 340                         ufshcd_add_cmd_upiu_trace(hba, tag, str);
 341                 return;
 342         }
 343 
 344         if (lrbp->cmd) { 
 345                 
 346                 ufshcd_add_cmd_upiu_trace(hba, tag, str);
 347                 opcode = (u8)(*lrbp->cmd->cmnd);
 348                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
 349                         
 350 
 351 
 352 
 353                         if (lrbp->cmd->request && lrbp->cmd->request->bio)
 354                                 lba =
 355                                   lrbp->cmd->request->bio->bi_iter.bi_sector;
 356                         transfer_len = be32_to_cpu(
 357                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 358                 }
 359         }
 360 
 361         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 362         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 363         trace_ufshcd_command(dev_name(hba->dev), str, tag,
 364                                 doorbell, transfer_len, intr, lba, opcode);
 365 }
 366 
 367 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
 368 {
 369         struct ufs_clk_info *clki;
 370         struct list_head *head = &hba->clk_list_head;
 371 
 372         if (list_empty(head))
 373                 return;
 374 
 375         list_for_each_entry(clki, head, list) {
 376                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
 377                                 clki->max_freq)
 378                         dev_err(hba->dev, "clk: %s, rate: %u\n",
 379                                         clki->name, clki->curr_freq);
 380         }
 381 }
 382 
 383 static void ufshcd_print_err_hist(struct ufs_hba *hba,
 384                                   struct ufs_err_reg_hist *err_hist,
 385                                   char *err_name)
 386 {
 387         int i;
 388         bool found = false;
 389 
 390         for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
 391                 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
 392 
 393                 if (err_hist->reg[p] == 0)
 394                         continue;
 395                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
 396                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
 397                 found = true;
 398         }
 399 
 400         if (!found)
 401                 dev_err(hba->dev, "No record of %s errors\n", err_name);
 402 }
 403 
 404 static void ufshcd_print_host_regs(struct ufs_hba *hba)
 405 {
 406         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
 407         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
 408                 hba->ufs_version, hba->capabilities);
 409         dev_err(hba->dev,
 410                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
 411                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
 412         dev_err(hba->dev,
 413                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
 414                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 415                 hba->ufs_stats.hibern8_exit_cnt);
 416 
 417         ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
 418         ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
 419         ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
 420         ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
 421         ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
 422         ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
 423                               "auto_hibern8_err");
 424         ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
 425         ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
 426                               "link_startup_fail");
 427         ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
 428         ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
 429                               "suspend_fail");
 430         ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
 431         ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
 432         ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
 433 
 434         ufshcd_print_clk_freqs(hba);
 435 
 436         if (hba->vops && hba->vops->dbg_register_dump)
 437                 hba->vops->dbg_register_dump(hba);
 438 }
 439 
 440 static
 441 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 442 {
 443         struct ufshcd_lrb *lrbp;
 444         int prdt_length;
 445         int tag;
 446 
 447         for_each_set_bit(tag, &bitmap, hba->nutrs) {
 448                 lrbp = &hba->lrb[tag];
 449 
 450                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
 451                                 tag, ktime_to_us(lrbp->issue_time_stamp));
 452                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
 453                                 tag, ktime_to_us(lrbp->compl_time_stamp));
 454                 dev_err(hba->dev,
 455                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
 456                         tag, (u64)lrbp->utrd_dma_addr);
 457 
 458                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
 459                                 sizeof(struct utp_transfer_req_desc));
 460                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
 461                         (u64)lrbp->ucd_req_dma_addr);
 462                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
 463                                 sizeof(struct utp_upiu_req));
 464                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
 465                         (u64)lrbp->ucd_rsp_dma_addr);
 466                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
 467                                 sizeof(struct utp_upiu_rsp));
 468 
 469                 prdt_length = le16_to_cpu(
 470                         lrbp->utr_descriptor_ptr->prd_table_length);
 471                 dev_err(hba->dev,
 472                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
 473                         tag, prdt_length,
 474                         (u64)lrbp->ucd_prdt_dma_addr);
 475 
 476                 if (pr_prdt)
 477                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 478                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
 479         }
 480 }
 481 
 482 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 483 {
 484         int tag;
 485 
 486         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
 487                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
 488 
 489                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
 490                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
 491         }
 492 }
 493 
 494 static void ufshcd_print_host_state(struct ufs_hba *hba)
 495 {
 496         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 497         dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
 498                 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
 499         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
 500                 hba->saved_err, hba->saved_uic_err);
 501         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
 502                 hba->curr_dev_pwr_mode, hba->uic_link_state);
 503         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
 504                 hba->pm_op_in_progress, hba->is_sys_suspended);
 505         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
 506                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
 507         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
 508         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
 509                 hba->eh_flags, hba->req_abort_count);
 510         dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
 511                 hba->capabilities, hba->caps);
 512         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
 513                 hba->dev_quirks);
 514 }
 515 
 516 
 517 
 518 
 519 
 520 
 521 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 522 {
 523         static const char * const names[] = {
 524                 "INVALID MODE",
 525                 "FAST MODE",
 526                 "SLOW_MODE",
 527                 "INVALID MODE",
 528                 "FASTAUTO_MODE",
 529                 "SLOWAUTO_MODE",
 530                 "INVALID MODE",
 531         };
 532 
 533         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 534                  __func__,
 535                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 536                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 537                  names[hba->pwr_info.pwr_rx],
 538                  names[hba->pwr_info.pwr_tx],
 539                  hba->pwr_info.hs_rate);
 540 }
 541 
 542 
 543 
 544 
 545 
 546 
 547 
 548 
 549 
 550 
 551 
 552 
 553 
 554 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 555                                 u32 val, unsigned long interval_us,
 556                                 unsigned long timeout_ms, bool can_sleep)
 557 {
 558         int err = 0;
 559         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 560 
 561         
 562         val = val & mask;
 563 
 564         while ((ufshcd_readl(hba, reg) & mask) != val) {
 565                 if (can_sleep)
 566                         usleep_range(interval_us, interval_us + 50);
 567                 else
 568                         udelay(interval_us);
 569                 if (time_after(jiffies, timeout)) {
 570                         if ((ufshcd_readl(hba, reg) & mask) != val)
 571                                 err = -ETIMEDOUT;
 572                         break;
 573                 }
 574         }
 575 
 576         return err;
 577 }
 578 
 579 
 580 
 581 
 582 
 583 
 584 
 585 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 586 {
 587         u32 intr_mask = 0;
 588 
 589         switch (hba->ufs_version) {
 590         case UFSHCI_VERSION_10:
 591                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
 592                 break;
 593         case UFSHCI_VERSION_11:
 594         case UFSHCI_VERSION_20:
 595                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
 596                 break;
 597         case UFSHCI_VERSION_21:
 598         default:
 599                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
 600                 break;
 601         }
 602 
 603         return intr_mask;
 604 }
 605 
 606 
 607 
 608 
 609 
 610 
 611 
 612 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 613 {
 614         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 615                 return ufshcd_vops_get_ufs_hci_version(hba);
 616 
 617         return ufshcd_readl(hba, REG_UFS_VERSION);
 618 }
 619 
 620 
 621 
 622 
 623 
 624 
 625 
 626 
 627 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
 628 {
 629         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 630                                                 DEVICE_PRESENT) ? true : false;
 631 }
 632 
 633 
 634 
 635 
 636 
 637 
 638 
 639 
 640 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 641 {
 642         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 643 }
 644 
 645 
 646 
 647 
 648 
 649 
 650 
 651 
 652 
 653 
 654 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 655 {
 656         int tag;
 657         bool ret = false;
 658 
 659         if (!free_slot)
 660                 goto out;
 661 
 662         do {
 663                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 664                 if (tag >= hba->nutmrs)
 665                         goto out;
 666         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 667 
 668         *free_slot = tag;
 669         ret = true;
 670 out:
 671         return ret;
 672 }
 673 
 674 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 675 {
 676         clear_bit_unlock(slot, &hba->tm_slots_in_use);
 677 }
 678 
 679 
 680 
 681 
 682 
 683 
 684 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 685 {
 686         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
 687                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 688         else
 689                 ufshcd_writel(hba, ~(1 << pos),
 690                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 691 }
 692 
 693 
 694 
 695 
 696 
 697 
 698 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
 699 {
 700         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
 701                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 702         else
 703                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 704 }
 705 
 706 
 707 
 708 
 709 
 710 
 711 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
 712 {
 713         __clear_bit(tag, &hba->outstanding_reqs);
 714 }
 715 
 716 
 717 
 718 
 719 
 720 
 721 
 722 static inline int ufshcd_get_lists_status(u32 reg)
 723 {
 724         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
 725 }
 726 
 727 
 728 
 729 
 730 
 731 
 732 
 733 
 734 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 735 {
 736         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 737                MASK_UIC_COMMAND_RESULT;
 738 }
 739 
 740 
 741 
 742 
 743 
 744 
 745 
 746 
 747 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 748 {
 749         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 750 }
 751 
 752 
 753 
 754 
 755 
 756 static inline int
 757 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 758 {
 759         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 760 }
 761 
 762 
 763 
 764 
 765 
 766 
 767 
 768 
 769 static inline int
 770 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 771 {
 772         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 773 }
 774 
 775 
 776 
 777 
 778 
 779 
 780 
 781 
 782 static inline unsigned int
 783 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 784 {
 785         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 786                 MASK_RSP_UPIU_DATA_SEG_LEN;
 787 }
 788 
 789 
 790 
 791 
 792 
 793 
 794 
 795 
 796 
 797 
 798 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 799 {
 800         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 801                         MASK_RSP_EXCEPTION_EVENT ? true : false;
 802 }
 803 
 804 
 805 
 806 
 807 
 808 static inline void
 809 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 810 {
 811         ufshcd_writel(hba, INT_AGGR_ENABLE |
 812                       INT_AGGR_COUNTER_AND_TIMER_RESET,
 813                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 814 }
 815 
 816 
 817 
 818 
 819 
 820 
 821 
 822 static inline void
 823 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 824 {
 825         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 826                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
 827                       INT_AGGR_TIMEOUT_VAL(tmout),
 828                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 829 }
 830 
 831 
 832 
 833 
 834 
 835 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 836 {
 837         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 838 }
 839 
 840 
 841 
 842 
 843 
 844 
 845 
 846 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 847 {
 848         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 849                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
 850         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 851                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 852 }
 853 
 854 
 855 
 856 
 857 
 858 static inline void ufshcd_hba_start(struct ufs_hba *hba)
 859 {
 860         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
 861 }
 862 
 863 
 864 
 865 
 866 
 867 
 868 
 869 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
 870 {
 871         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
 872                 ? false : true;
 873 }
 874 
 875 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 876 {
 877         
 878         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
 879             (hba->ufs_version == UFSHCI_VERSION_11))
 880                 return UFS_UNIPRO_VER_1_41;
 881         else
 882                 return UFS_UNIPRO_VER_1_6;
 883 }
 884 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
 885 
 886 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 887 {
 888         
 889 
 890 
 891 
 892 
 893 
 894 
 895 
 896 
 897         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
 898                 return true;
 899         else
 900                 return false;
 901 }
 902 
 903 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 904 {
 905         int ret = 0;
 906         struct ufs_clk_info *clki;
 907         struct list_head *head = &hba->clk_list_head;
 908         ktime_t start = ktime_get();
 909         bool clk_state_changed = false;
 910 
 911         if (list_empty(head))
 912                 goto out;
 913 
 914         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 915         if (ret)
 916                 return ret;
 917 
 918         list_for_each_entry(clki, head, list) {
 919                 if (!IS_ERR_OR_NULL(clki->clk)) {
 920                         if (scale_up && clki->max_freq) {
 921                                 if (clki->curr_freq == clki->max_freq)
 922                                         continue;
 923 
 924                                 clk_state_changed = true;
 925                                 ret = clk_set_rate(clki->clk, clki->max_freq);
 926                                 if (ret) {
 927                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 928                                                 __func__, clki->name,
 929                                                 clki->max_freq, ret);
 930                                         break;
 931                                 }
 932                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
 933                                                 "scaled up", clki->name,
 934                                                 clki->curr_freq,
 935                                                 clki->max_freq);
 936 
 937                                 clki->curr_freq = clki->max_freq;
 938 
 939                         } else if (!scale_up && clki->min_freq) {
 940                                 if (clki->curr_freq == clki->min_freq)
 941                                         continue;
 942 
 943                                 clk_state_changed = true;
 944                                 ret = clk_set_rate(clki->clk, clki->min_freq);
 945                                 if (ret) {
 946                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 947                                                 __func__, clki->name,
 948                                                 clki->min_freq, ret);
 949                                         break;
 950                                 }
 951                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
 952                                                 "scaled down", clki->name,
 953                                                 clki->curr_freq,
 954                                                 clki->min_freq);
 955                                 clki->curr_freq = clki->min_freq;
 956                         }
 957                 }
 958                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 959                                 clki->name, clk_get_rate(clki->clk));
 960         }
 961 
 962         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 963 
 964 out:
 965         if (clk_state_changed)
 966                 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
 967                         (scale_up ? "up" : "down"),
 968                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 969         return ret;
 970 }
 971 
 972 
 973 
 974 
 975 
 976 
 977 
 978 
 979 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
 980                                                bool scale_up)
 981 {
 982         struct ufs_clk_info *clki;
 983         struct list_head *head = &hba->clk_list_head;
 984 
 985         if (list_empty(head))
 986                 return false;
 987 
 988         list_for_each_entry(clki, head, list) {
 989                 if (!IS_ERR_OR_NULL(clki->clk)) {
 990                         if (scale_up && clki->max_freq) {
 991                                 if (clki->curr_freq == clki->max_freq)
 992                                         continue;
 993                                 return true;
 994                         } else if (!scale_up && clki->min_freq) {
 995                                 if (clki->curr_freq == clki->min_freq)
 996                                         continue;
 997                                 return true;
 998                         }
 999                 }
1000         }
1001 
1002         return false;
1003 }
1004 
1005 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1006                                         u64 wait_timeout_us)
1007 {
1008         unsigned long flags;
1009         int ret = 0;
1010         u32 tm_doorbell;
1011         u32 tr_doorbell;
1012         bool timeout = false, do_last_check = false;
1013         ktime_t start;
1014 
1015         ufshcd_hold(hba, false);
1016         spin_lock_irqsave(hba->host->host_lock, flags);
1017         
1018 
1019 
1020 
1021         start = ktime_get();
1022         do {
1023                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1024                         ret = -EBUSY;
1025                         goto out;
1026                 }
1027 
1028                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1029                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1030                 if (!tm_doorbell && !tr_doorbell) {
1031                         timeout = false;
1032                         break;
1033                 } else if (do_last_check) {
1034                         break;
1035                 }
1036 
1037                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1038                 schedule();
1039                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1040                     wait_timeout_us) {
1041                         timeout = true;
1042                         
1043 
1044 
1045 
1046 
1047                         do_last_check = true;
1048                 }
1049                 spin_lock_irqsave(hba->host->host_lock, flags);
1050         } while (tm_doorbell || tr_doorbell);
1051 
1052         if (timeout) {
1053                 dev_err(hba->dev,
1054                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1055                         __func__, tm_doorbell, tr_doorbell);
1056                 ret = -EBUSY;
1057         }
1058 out:
1059         spin_unlock_irqrestore(hba->host->host_lock, flags);
1060         ufshcd_release(hba);
1061         return ret;
1062 }
1063 
1064 
1065 
1066 
1067 
1068 
1069 
1070 
1071 
1072 
1073 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1074 {
1075         #define UFS_MIN_GEAR_TO_SCALE_DOWN      UFS_HS_G1
1076         int ret = 0;
1077         struct ufs_pa_layer_attr new_pwr_info;
1078 
1079         if (scale_up) {
1080                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1081                        sizeof(struct ufs_pa_layer_attr));
1082         } else {
1083                 memcpy(&new_pwr_info, &hba->pwr_info,
1084                        sizeof(struct ufs_pa_layer_attr));
1085 
1086                 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1087                     || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1088                         
1089                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1090                                 &hba->pwr_info,
1091                                 sizeof(struct ufs_pa_layer_attr));
1092 
1093                         
1094                         new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1095                         new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1096                 }
1097         }
1098 
1099         
1100         ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1101 
1102         if (ret)
1103                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1104                         __func__, ret,
1105                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1106                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1107 
1108         return ret;
1109 }
1110 
1111 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1112 {
1113         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) 
1114         int ret = 0;
1115         
1116 
1117 
1118 
1119         ufshcd_scsi_block_requests(hba);
1120         down_write(&hba->clk_scaling_lock);
1121         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1122                 ret = -EBUSY;
1123                 up_write(&hba->clk_scaling_lock);
1124                 ufshcd_scsi_unblock_requests(hba);
1125         }
1126 
1127         return ret;
1128 }
1129 
1130 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1131 {
1132         up_write(&hba->clk_scaling_lock);
1133         ufshcd_scsi_unblock_requests(hba);
1134 }
1135 
1136 
1137 
1138 
1139 
1140 
1141 
1142 
1143 
1144 
1145 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1146 {
1147         int ret = 0;
1148 
1149         
1150         ufshcd_hold(hba, false);
1151 
1152         ret = ufshcd_clock_scaling_prepare(hba);
1153         if (ret)
1154                 return ret;
1155 
1156         
1157         if (!scale_up) {
1158                 ret = ufshcd_scale_gear(hba, false);
1159                 if (ret)
1160                         goto out;
1161         }
1162 
1163         ret = ufshcd_scale_clks(hba, scale_up);
1164         if (ret) {
1165                 if (!scale_up)
1166                         ufshcd_scale_gear(hba, true);
1167                 goto out;
1168         }
1169 
1170         
1171         if (scale_up) {
1172                 ret = ufshcd_scale_gear(hba, true);
1173                 if (ret) {
1174                         ufshcd_scale_clks(hba, false);
1175                         goto out;
1176                 }
1177         }
1178 
1179         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1180 
1181 out:
1182         ufshcd_clock_scaling_unprepare(hba);
1183         ufshcd_release(hba);
1184         return ret;
1185 }
1186 
1187 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1188 {
1189         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1190                                            clk_scaling.suspend_work);
1191         unsigned long irq_flags;
1192 
1193         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1194         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1195                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1196                 return;
1197         }
1198         hba->clk_scaling.is_suspended = true;
1199         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1200 
1201         __ufshcd_suspend_clkscaling(hba);
1202 }
1203 
1204 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1205 {
1206         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1207                                            clk_scaling.resume_work);
1208         unsigned long irq_flags;
1209 
1210         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1211         if (!hba->clk_scaling.is_suspended) {
1212                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1213                 return;
1214         }
1215         hba->clk_scaling.is_suspended = false;
1216         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1217 
1218         devfreq_resume_device(hba->devfreq);
1219 }
1220 
1221 static int ufshcd_devfreq_target(struct device *dev,
1222                                 unsigned long *freq, u32 flags)
1223 {
1224         int ret = 0;
1225         struct ufs_hba *hba = dev_get_drvdata(dev);
1226         ktime_t start;
1227         bool scale_up, sched_clk_scaling_suspend_work = false;
1228         struct list_head *clk_list = &hba->clk_list_head;
1229         struct ufs_clk_info *clki;
1230         unsigned long irq_flags;
1231 
1232         if (!ufshcd_is_clkscaling_supported(hba))
1233                 return -EINVAL;
1234 
1235         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1236         if (ufshcd_eh_in_progress(hba)) {
1237                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1238                 return 0;
1239         }
1240 
1241         if (!hba->clk_scaling.active_reqs)
1242                 sched_clk_scaling_suspend_work = true;
1243 
1244         if (list_empty(clk_list)) {
1245                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1246                 goto out;
1247         }
1248 
1249         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1250         scale_up = (*freq == clki->max_freq) ? true : false;
1251         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1252                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1253                 ret = 0;
1254                 goto out; 
1255         }
1256         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1257 
1258         start = ktime_get();
1259         ret = ufshcd_devfreq_scale(hba, scale_up);
1260 
1261         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1262                 (scale_up ? "up" : "down"),
1263                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1264 
1265 out:
1266         if (sched_clk_scaling_suspend_work)
1267                 queue_work(hba->clk_scaling.workq,
1268                            &hba->clk_scaling.suspend_work);
1269 
1270         return ret;
1271 }
1272 
1273 
1274 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1275                 struct devfreq_dev_status *stat)
1276 {
1277         struct ufs_hba *hba = dev_get_drvdata(dev);
1278         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1279         unsigned long flags;
1280 
1281         if (!ufshcd_is_clkscaling_supported(hba))
1282                 return -EINVAL;
1283 
1284         memset(stat, 0, sizeof(*stat));
1285 
1286         spin_lock_irqsave(hba->host->host_lock, flags);
1287         if (!scaling->window_start_t)
1288                 goto start_window;
1289 
1290         if (scaling->is_busy_started)
1291                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1292                                         scaling->busy_start_t));
1293 
1294         stat->total_time = jiffies_to_usecs((long)jiffies -
1295                                 (long)scaling->window_start_t);
1296         stat->busy_time = scaling->tot_busy_t;
1297 start_window:
1298         scaling->window_start_t = jiffies;
1299         scaling->tot_busy_t = 0;
1300 
1301         if (hba->outstanding_reqs) {
1302                 scaling->busy_start_t = ktime_get();
1303                 scaling->is_busy_started = true;
1304         } else {
1305                 scaling->busy_start_t = 0;
1306                 scaling->is_busy_started = false;
1307         }
1308         spin_unlock_irqrestore(hba->host->host_lock, flags);
1309         return 0;
1310 }
1311 
1312 static struct devfreq_dev_profile ufs_devfreq_profile = {
1313         .polling_ms     = 100,
1314         .target         = ufshcd_devfreq_target,
1315         .get_dev_status = ufshcd_devfreq_get_dev_status,
1316 };
1317 
1318 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1319 {
1320         struct list_head *clk_list = &hba->clk_list_head;
1321         struct ufs_clk_info *clki;
1322         struct devfreq *devfreq;
1323         int ret;
1324 
1325         
1326         if (list_empty(clk_list))
1327                 return 0;
1328 
1329         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1330         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1331         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1332 
1333         devfreq = devfreq_add_device(hba->dev,
1334                         &ufs_devfreq_profile,
1335                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1336                         NULL);
1337         if (IS_ERR(devfreq)) {
1338                 ret = PTR_ERR(devfreq);
1339                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1340 
1341                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1342                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1343                 return ret;
1344         }
1345 
1346         hba->devfreq = devfreq;
1347 
1348         return 0;
1349 }
1350 
1351 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1352 {
1353         struct list_head *clk_list = &hba->clk_list_head;
1354         struct ufs_clk_info *clki;
1355 
1356         if (!hba->devfreq)
1357                 return;
1358 
1359         devfreq_remove_device(hba->devfreq);
1360         hba->devfreq = NULL;
1361 
1362         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1363         dev_pm_opp_remove(hba->dev, clki->min_freq);
1364         dev_pm_opp_remove(hba->dev, clki->max_freq);
1365 }
1366 
1367 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1368 {
1369         unsigned long flags;
1370 
1371         devfreq_suspend_device(hba->devfreq);
1372         spin_lock_irqsave(hba->host->host_lock, flags);
1373         hba->clk_scaling.window_start_t = 0;
1374         spin_unlock_irqrestore(hba->host->host_lock, flags);
1375 }
1376 
1377 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1378 {
1379         unsigned long flags;
1380         bool suspend = false;
1381 
1382         if (!ufshcd_is_clkscaling_supported(hba))
1383                 return;
1384 
1385         spin_lock_irqsave(hba->host->host_lock, flags);
1386         if (!hba->clk_scaling.is_suspended) {
1387                 suspend = true;
1388                 hba->clk_scaling.is_suspended = true;
1389         }
1390         spin_unlock_irqrestore(hba->host->host_lock, flags);
1391 
1392         if (suspend)
1393                 __ufshcd_suspend_clkscaling(hba);
1394 }
1395 
1396 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1397 {
1398         unsigned long flags;
1399         bool resume = false;
1400 
1401         if (!ufshcd_is_clkscaling_supported(hba))
1402                 return;
1403 
1404         spin_lock_irqsave(hba->host->host_lock, flags);
1405         if (hba->clk_scaling.is_suspended) {
1406                 resume = true;
1407                 hba->clk_scaling.is_suspended = false;
1408         }
1409         spin_unlock_irqrestore(hba->host->host_lock, flags);
1410 
1411         if (resume)
1412                 devfreq_resume_device(hba->devfreq);
1413 }
1414 
1415 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1416                 struct device_attribute *attr, char *buf)
1417 {
1418         struct ufs_hba *hba = dev_get_drvdata(dev);
1419 
1420         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1421 }
1422 
1423 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1424                 struct device_attribute *attr, const char *buf, size_t count)
1425 {
1426         struct ufs_hba *hba = dev_get_drvdata(dev);
1427         u32 value;
1428         int err;
1429 
1430         if (kstrtou32(buf, 0, &value))
1431                 return -EINVAL;
1432 
1433         value = !!value;
1434         if (value == hba->clk_scaling.is_allowed)
1435                 goto out;
1436 
1437         pm_runtime_get_sync(hba->dev);
1438         ufshcd_hold(hba, false);
1439 
1440         cancel_work_sync(&hba->clk_scaling.suspend_work);
1441         cancel_work_sync(&hba->clk_scaling.resume_work);
1442 
1443         hba->clk_scaling.is_allowed = value;
1444 
1445         if (value) {
1446                 ufshcd_resume_clkscaling(hba);
1447         } else {
1448                 ufshcd_suspend_clkscaling(hba);
1449                 err = ufshcd_devfreq_scale(hba, true);
1450                 if (err)
1451                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1452                                         __func__, err);
1453         }
1454 
1455         ufshcd_release(hba);
1456         pm_runtime_put_sync(hba->dev);
1457 out:
1458         return count;
1459 }
1460 
1461 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1462 {
1463         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1464         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1465         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1466         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1467         hba->clk_scaling.enable_attr.attr.mode = 0644;
1468         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1469                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1470 }
1471 
1472 static void ufshcd_ungate_work(struct work_struct *work)
1473 {
1474         int ret;
1475         unsigned long flags;
1476         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1477                         clk_gating.ungate_work);
1478 
1479         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1480 
1481         spin_lock_irqsave(hba->host->host_lock, flags);
1482         if (hba->clk_gating.state == CLKS_ON) {
1483                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1484                 goto unblock_reqs;
1485         }
1486 
1487         spin_unlock_irqrestore(hba->host->host_lock, flags);
1488         ufshcd_setup_clocks(hba, true);
1489 
1490         
1491         if (ufshcd_can_hibern8_during_gating(hba)) {
1492                 
1493                 hba->clk_gating.is_suspended = true;
1494                 if (ufshcd_is_link_hibern8(hba)) {
1495                         ret = ufshcd_uic_hibern8_exit(hba);
1496                         if (ret)
1497                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1498                                         __func__, ret);
1499                         else
1500                                 ufshcd_set_link_active(hba);
1501                 }
1502                 hba->clk_gating.is_suspended = false;
1503         }
1504 unblock_reqs:
1505         ufshcd_scsi_unblock_requests(hba);
1506 }
1507 
1508 
1509 
1510 
1511 
1512 
1513 
1514 int ufshcd_hold(struct ufs_hba *hba, bool async)
1515 {
1516         int rc = 0;
1517         unsigned long flags;
1518 
1519         if (!ufshcd_is_clkgating_allowed(hba))
1520                 goto out;
1521         spin_lock_irqsave(hba->host->host_lock, flags);
1522         hba->clk_gating.active_reqs++;
1523 
1524         if (ufshcd_eh_in_progress(hba)) {
1525                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1526                 return 0;
1527         }
1528 
1529 start:
1530         switch (hba->clk_gating.state) {
1531         case CLKS_ON:
1532                 
1533 
1534 
1535 
1536 
1537 
1538 
1539 
1540                 if (ufshcd_can_hibern8_during_gating(hba) &&
1541                     ufshcd_is_link_hibern8(hba)) {
1542                         if (async) {
1543                                 rc = -EAGAIN;
1544                                 hba->clk_gating.active_reqs--;
1545                                 break;
1546                         }
1547                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1548                         flush_work(&hba->clk_gating.ungate_work);
1549                         spin_lock_irqsave(hba->host->host_lock, flags);
1550                         goto start;
1551                 }
1552                 break;
1553         case REQ_CLKS_OFF:
1554                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1555                         hba->clk_gating.state = CLKS_ON;
1556                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1557                                                 hba->clk_gating.state);
1558                         break;
1559                 }
1560                 
1561 
1562 
1563 
1564 
1565                 
1566         case CLKS_OFF:
1567                 ufshcd_scsi_block_requests(hba);
1568                 hba->clk_gating.state = REQ_CLKS_ON;
1569                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1570                                         hba->clk_gating.state);
1571                 queue_work(hba->clk_gating.clk_gating_workq,
1572                            &hba->clk_gating.ungate_work);
1573                 
1574 
1575 
1576 
1577                 
1578         case REQ_CLKS_ON:
1579                 if (async) {
1580                         rc = -EAGAIN;
1581                         hba->clk_gating.active_reqs--;
1582                         break;
1583                 }
1584 
1585                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1586                 flush_work(&hba->clk_gating.ungate_work);
1587                 
1588                 spin_lock_irqsave(hba->host->host_lock, flags);
1589                 goto start;
1590         default:
1591                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1592                                 __func__, hba->clk_gating.state);
1593                 break;
1594         }
1595         spin_unlock_irqrestore(hba->host->host_lock, flags);
1596 out:
1597         return rc;
1598 }
1599 EXPORT_SYMBOL_GPL(ufshcd_hold);
1600 
1601 static void ufshcd_gate_work(struct work_struct *work)
1602 {
1603         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1604                         clk_gating.gate_work.work);
1605         unsigned long flags;
1606 
1607         spin_lock_irqsave(hba->host->host_lock, flags);
1608         
1609 
1610 
1611 
1612 
1613 
1614         if (hba->clk_gating.is_suspended ||
1615                 (hba->clk_gating.state == REQ_CLKS_ON)) {
1616                 hba->clk_gating.state = CLKS_ON;
1617                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1618                                         hba->clk_gating.state);
1619                 goto rel_lock;
1620         }
1621 
1622         if (hba->clk_gating.active_reqs
1623                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1624                 || hba->lrb_in_use || hba->outstanding_tasks
1625                 || hba->active_uic_cmd || hba->uic_async_done)
1626                 goto rel_lock;
1627 
1628         spin_unlock_irqrestore(hba->host->host_lock, flags);
1629 
1630         
1631         if (ufshcd_can_hibern8_during_gating(hba)) {
1632                 if (ufshcd_uic_hibern8_enter(hba)) {
1633                         hba->clk_gating.state = CLKS_ON;
1634                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1635                                                 hba->clk_gating.state);
1636                         goto out;
1637                 }
1638                 ufshcd_set_link_hibern8(hba);
1639         }
1640 
1641         if (!ufshcd_is_link_active(hba))
1642                 ufshcd_setup_clocks(hba, false);
1643         else
1644                 
1645                 __ufshcd_setup_clocks(hba, false, true);
1646 
1647         
1648 
1649 
1650 
1651 
1652 
1653 
1654 
1655 
1656         spin_lock_irqsave(hba->host->host_lock, flags);
1657         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1658                 hba->clk_gating.state = CLKS_OFF;
1659                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1660                                         hba->clk_gating.state);
1661         }
1662 rel_lock:
1663         spin_unlock_irqrestore(hba->host->host_lock, flags);
1664 out:
1665         return;
1666 }
1667 
1668 
1669 static void __ufshcd_release(struct ufs_hba *hba)
1670 {
1671         if (!ufshcd_is_clkgating_allowed(hba))
1672                 return;
1673 
1674         hba->clk_gating.active_reqs--;
1675 
1676         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1677                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1678                 || hba->lrb_in_use || hba->outstanding_tasks
1679                 || hba->active_uic_cmd || hba->uic_async_done
1680                 || ufshcd_eh_in_progress(hba))
1681                 return;
1682 
1683         hba->clk_gating.state = REQ_CLKS_OFF;
1684         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1685         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1686                            &hba->clk_gating.gate_work,
1687                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1688 }
1689 
1690 void ufshcd_release(struct ufs_hba *hba)
1691 {
1692         unsigned long flags;
1693 
1694         spin_lock_irqsave(hba->host->host_lock, flags);
1695         __ufshcd_release(hba);
1696         spin_unlock_irqrestore(hba->host->host_lock, flags);
1697 }
1698 EXPORT_SYMBOL_GPL(ufshcd_release);
1699 
1700 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1701                 struct device_attribute *attr, char *buf)
1702 {
1703         struct ufs_hba *hba = dev_get_drvdata(dev);
1704 
1705         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1706 }
1707 
1708 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1709                 struct device_attribute *attr, const char *buf, size_t count)
1710 {
1711         struct ufs_hba *hba = dev_get_drvdata(dev);
1712         unsigned long flags, value;
1713 
1714         if (kstrtoul(buf, 0, &value))
1715                 return -EINVAL;
1716 
1717         spin_lock_irqsave(hba->host->host_lock, flags);
1718         hba->clk_gating.delay_ms = value;
1719         spin_unlock_irqrestore(hba->host->host_lock, flags);
1720         return count;
1721 }
1722 
1723 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1724                 struct device_attribute *attr, char *buf)
1725 {
1726         struct ufs_hba *hba = dev_get_drvdata(dev);
1727 
1728         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1729 }
1730 
1731 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1732                 struct device_attribute *attr, const char *buf, size_t count)
1733 {
1734         struct ufs_hba *hba = dev_get_drvdata(dev);
1735         unsigned long flags;
1736         u32 value;
1737 
1738         if (kstrtou32(buf, 0, &value))
1739                 return -EINVAL;
1740 
1741         value = !!value;
1742         if (value == hba->clk_gating.is_enabled)
1743                 goto out;
1744 
1745         if (value) {
1746                 ufshcd_release(hba);
1747         } else {
1748                 spin_lock_irqsave(hba->host->host_lock, flags);
1749                 hba->clk_gating.active_reqs++;
1750                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1751         }
1752 
1753         hba->clk_gating.is_enabled = value;
1754 out:
1755         return count;
1756 }
1757 
1758 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1759 {
1760         char wq_name[sizeof("ufs_clkscaling_00")];
1761 
1762         if (!ufshcd_is_clkscaling_supported(hba))
1763                 return;
1764 
1765         INIT_WORK(&hba->clk_scaling.suspend_work,
1766                   ufshcd_clk_scaling_suspend_work);
1767         INIT_WORK(&hba->clk_scaling.resume_work,
1768                   ufshcd_clk_scaling_resume_work);
1769 
1770         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1771                  hba->host->host_no);
1772         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1773 
1774         ufshcd_clkscaling_init_sysfs(hba);
1775 }
1776 
1777 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1778 {
1779         if (!ufshcd_is_clkscaling_supported(hba))
1780                 return;
1781 
1782         destroy_workqueue(hba->clk_scaling.workq);
1783         ufshcd_devfreq_remove(hba);
1784 }
1785 
1786 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1787 {
1788         char wq_name[sizeof("ufs_clk_gating_00")];
1789 
1790         if (!ufshcd_is_clkgating_allowed(hba))
1791                 return;
1792 
1793         hba->clk_gating.delay_ms = 150;
1794         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1795         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1796 
1797         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1798                  hba->host->host_no);
1799         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1800                                                            WQ_MEM_RECLAIM);
1801 
1802         hba->clk_gating.is_enabled = true;
1803 
1804         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1805         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1806         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1807         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1808         hba->clk_gating.delay_attr.attr.mode = 0644;
1809         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1810                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1811 
1812         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1813         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1814         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1815         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1816         hba->clk_gating.enable_attr.attr.mode = 0644;
1817         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1818                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1819 }
1820 
1821 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1822 {
1823         if (!ufshcd_is_clkgating_allowed(hba))
1824                 return;
1825         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1826         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1827         cancel_work_sync(&hba->clk_gating.ungate_work);
1828         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1829         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1830 }
1831 
1832 
1833 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1834 {
1835         bool queue_resume_work = false;
1836 
1837         if (!ufshcd_is_clkscaling_supported(hba))
1838                 return;
1839 
1840         if (!hba->clk_scaling.active_reqs++)
1841                 queue_resume_work = true;
1842 
1843         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1844                 return;
1845 
1846         if (queue_resume_work)
1847                 queue_work(hba->clk_scaling.workq,
1848                            &hba->clk_scaling.resume_work);
1849 
1850         if (!hba->clk_scaling.window_start_t) {
1851                 hba->clk_scaling.window_start_t = jiffies;
1852                 hba->clk_scaling.tot_busy_t = 0;
1853                 hba->clk_scaling.is_busy_started = false;
1854         }
1855 
1856         if (!hba->clk_scaling.is_busy_started) {
1857                 hba->clk_scaling.busy_start_t = ktime_get();
1858                 hba->clk_scaling.is_busy_started = true;
1859         }
1860 }
1861 
1862 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1863 {
1864         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1865 
1866         if (!ufshcd_is_clkscaling_supported(hba))
1867                 return;
1868 
1869         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1870                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1871                                         scaling->busy_start_t));
1872                 scaling->busy_start_t = 0;
1873                 scaling->is_busy_started = false;
1874         }
1875 }
1876 
1877 
1878 
1879 
1880 
1881 static inline
1882 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1883 {
1884         hba->lrb[task_tag].issue_time_stamp = ktime_get();
1885         hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1886         ufshcd_clk_scaling_start_busy(hba);
1887         __set_bit(task_tag, &hba->outstanding_reqs);
1888         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1889         
1890         wmb();
1891         ufshcd_add_command_trace(hba, task_tag, "send");
1892 }
1893 
1894 
1895 
1896 
1897 
1898 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1899 {
1900         int len;
1901         if (lrbp->sense_buffer &&
1902             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1903                 int len_to_copy;
1904 
1905                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1906                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1907 
1908                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1909                        len_to_copy);
1910         }
1911 }
1912 
1913 
1914 
1915 
1916 
1917 
1918 
1919 static
1920 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1921 {
1922         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1923 
1924         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1925 
1926         
1927         if (hba->dev_cmd.query.descriptor &&
1928             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1929                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1930                                 GENERAL_UPIU_REQUEST_SIZE;
1931                 u16 resp_len;
1932                 u16 buf_len;
1933 
1934                 
1935                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1936                                                 MASK_QUERY_DATA_SEG_LEN;
1937                 buf_len = be16_to_cpu(
1938                                 hba->dev_cmd.query.request.upiu_req.length);
1939                 if (likely(buf_len >= resp_len)) {
1940                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1941                 } else {
1942                         dev_warn(hba->dev,
1943                                 "%s: Response size is bigger than buffer",
1944                                 __func__);
1945                         return -EINVAL;
1946                 }
1947         }
1948 
1949         return 0;
1950 }
1951 
1952 
1953 
1954 
1955 
1956 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1957 {
1958         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1959 
1960         
1961         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1962         hba->nutmrs =
1963         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1964 }
1965 
1966 
1967 
1968 
1969 
1970 
1971 
1972 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1973 {
1974         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1975                 return true;
1976         else
1977                 return false;
1978 }
1979 
1980 
1981 
1982 
1983 
1984 
1985 
1986 
1987 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1988 {
1989         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1990 }
1991 
1992 
1993 
1994 
1995 
1996 
1997 
1998 
1999 static inline void
2000 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2001 {
2002         WARN_ON(hba->active_uic_cmd);
2003 
2004         hba->active_uic_cmd = uic_cmd;
2005 
2006         
2007         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2008         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2009         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2010 
2011         
2012         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2013                       REG_UIC_COMMAND);
2014 }
2015 
2016 
2017 
2018 
2019 
2020 
2021 
2022 
2023 
2024 static int
2025 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2026 {
2027         int ret;
2028         unsigned long flags;
2029 
2030         if (wait_for_completion_timeout(&uic_cmd->done,
2031                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2032                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2033         else
2034                 ret = -ETIMEDOUT;
2035 
2036         spin_lock_irqsave(hba->host->host_lock, flags);
2037         hba->active_uic_cmd = NULL;
2038         spin_unlock_irqrestore(hba->host->host_lock, flags);
2039 
2040         return ret;
2041 }
2042 
2043 
2044 
2045 
2046 
2047 
2048 
2049 
2050 
2051 
2052 
2053 static int
2054 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2055                       bool completion)
2056 {
2057         if (!ufshcd_ready_for_uic_cmd(hba)) {
2058                 dev_err(hba->dev,
2059                         "Controller not ready to accept UIC commands\n");
2060                 return -EIO;
2061         }
2062 
2063         if (completion)
2064                 init_completion(&uic_cmd->done);
2065 
2066         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2067 
2068         return 0;
2069 }
2070 
2071 
2072 
2073 
2074 
2075 
2076 
2077 
2078 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2079 {
2080         int ret;
2081         unsigned long flags;
2082 
2083         ufshcd_hold(hba, false);
2084         mutex_lock(&hba->uic_cmd_mutex);
2085         ufshcd_add_delay_before_dme_cmd(hba);
2086 
2087         spin_lock_irqsave(hba->host->host_lock, flags);
2088         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2089         spin_unlock_irqrestore(hba->host->host_lock, flags);
2090         if (!ret)
2091                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2092 
2093         mutex_unlock(&hba->uic_cmd_mutex);
2094 
2095         ufshcd_release(hba);
2096         return ret;
2097 }
2098 
2099 
2100 
2101 
2102 
2103 
2104 
2105 
2106 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2107 {
2108         struct ufshcd_sg_entry *prd_table;
2109         struct scatterlist *sg;
2110         struct scsi_cmnd *cmd;
2111         int sg_segments;
2112         int i;
2113 
2114         cmd = lrbp->cmd;
2115         sg_segments = scsi_dma_map(cmd);
2116         if (sg_segments < 0)
2117                 return sg_segments;
2118 
2119         if (sg_segments) {
2120                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2121                         lrbp->utr_descriptor_ptr->prd_table_length =
2122                                 cpu_to_le16((u16)(sg_segments *
2123                                         sizeof(struct ufshcd_sg_entry)));
2124                 else
2125                         lrbp->utr_descriptor_ptr->prd_table_length =
2126                                 cpu_to_le16((u16) (sg_segments));
2127 
2128                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2129 
2130                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2131                         prd_table[i].size  =
2132                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2133                         prd_table[i].base_addr =
2134                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2135                         prd_table[i].upper_addr =
2136                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2137                         prd_table[i].reserved = 0;
2138                 }
2139         } else {
2140                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2141         }
2142 
2143         return 0;
2144 }
2145 
2146 
2147 
2148 
2149 
2150 
2151 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2152 {
2153         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2154 
2155         if (hba->ufs_version == UFSHCI_VERSION_10) {
2156                 u32 rw;
2157                 rw = set & INTERRUPT_MASK_RW_VER_10;
2158                 set = rw | ((set ^ intrs) & intrs);
2159         } else {
2160                 set |= intrs;
2161         }
2162 
2163         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2164 }
2165 
2166 
2167 
2168 
2169 
2170 
2171 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2172 {
2173         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2174 
2175         if (hba->ufs_version == UFSHCI_VERSION_10) {
2176                 u32 rw;
2177                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2178                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2179                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2180 
2181         } else {
2182                 set &= ~intrs;
2183         }
2184 
2185         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2186 }
2187 
2188 
2189 
2190 
2191 
2192 
2193 
2194 
2195 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2196                         u32 *upiu_flags, enum dma_data_direction cmd_dir)
2197 {
2198         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2199         u32 data_direction;
2200         u32 dword_0;
2201 
2202         if (cmd_dir == DMA_FROM_DEVICE) {
2203                 data_direction = UTP_DEVICE_TO_HOST;
2204                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2205         } else if (cmd_dir == DMA_TO_DEVICE) {
2206                 data_direction = UTP_HOST_TO_DEVICE;
2207                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2208         } else {
2209                 data_direction = UTP_NO_DATA_TRANSFER;
2210                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2211         }
2212 
2213         dword_0 = data_direction | (lrbp->command_type
2214                                 << UPIU_COMMAND_TYPE_OFFSET);
2215         if (lrbp->intr_cmd)
2216                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2217 
2218         
2219         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2220         
2221         req_desc->header.dword_1 = 0;
2222         
2223 
2224 
2225 
2226 
2227         req_desc->header.dword_2 =
2228                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2229         
2230         req_desc->header.dword_3 = 0;
2231 
2232         req_desc->prd_table_length = 0;
2233 }
2234 
2235 
2236 
2237 
2238 
2239 
2240 
2241 static
2242 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2243 {
2244         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2245         unsigned short cdb_len;
2246 
2247         
2248         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2249                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2250                                 lrbp->lun, lrbp->task_tag);
2251         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2252                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2253 
2254         
2255         ucd_req_ptr->header.dword_2 = 0;
2256 
2257         ucd_req_ptr->sc.exp_data_transfer_len =
2258                 cpu_to_be32(lrbp->cmd->sdb.length);
2259 
2260         cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
2261         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2262         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2263 
2264         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2265 }
2266 
2267 
2268 
2269 
2270 
2271 
2272 
2273 
2274 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2275                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2276 {
2277         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2278         struct ufs_query *query = &hba->dev_cmd.query;
2279         u16 len = be16_to_cpu(query->request.upiu_req.length);
2280 
2281         
2282         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2283                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2284                         lrbp->lun, lrbp->task_tag);
2285         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2286                         0, query->request.query_func, 0, 0);
2287 
2288         
2289         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2290                 ucd_req_ptr->header.dword_2 =
2291                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2292         else
2293                 ucd_req_ptr->header.dword_2 = 0;
2294 
2295         
2296         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2297                         QUERY_OSF_SIZE);
2298 
2299         
2300         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2301                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2302 
2303         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2304 }
2305 
2306 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2307 {
2308         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2309 
2310         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2311 
2312         
2313         ucd_req_ptr->header.dword_0 =
2314                 UPIU_HEADER_DWORD(
2315                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2316         
2317         ucd_req_ptr->header.dword_1 = 0;
2318         ucd_req_ptr->header.dword_2 = 0;
2319 
2320         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2321 }
2322 
2323 
2324 
2325 
2326 
2327 
2328 
2329 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2330 {
2331         u32 upiu_flags;
2332         int ret = 0;
2333 
2334         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2335             (hba->ufs_version == UFSHCI_VERSION_11))
2336                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2337         else
2338                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2339 
2340         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2341         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2342                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2343         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2344                 ufshcd_prepare_utp_nop_upiu(lrbp);
2345         else
2346                 ret = -EINVAL;
2347 
2348         return ret;
2349 }
2350 
2351 
2352 
2353 
2354 
2355 
2356 
2357 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2358 {
2359         u32 upiu_flags;
2360         int ret = 0;
2361 
2362         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2363             (hba->ufs_version == UFSHCI_VERSION_11))
2364                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2365         else
2366                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2367 
2368         if (likely(lrbp->cmd)) {
2369                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2370                                                 lrbp->cmd->sc_data_direction);
2371                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2372         } else {
2373                 ret = -EINVAL;
2374         }
2375 
2376         return ret;
2377 }
2378 
2379 
2380 
2381 
2382 
2383 
2384 
2385 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2386 {
2387         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2388 }
2389 
2390 
2391 
2392 
2393 
2394 
2395 
2396 
2397 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2398 {
2399         struct ufshcd_lrb *lrbp;
2400         struct ufs_hba *hba;
2401         unsigned long flags;
2402         int tag;
2403         int err = 0;
2404 
2405         hba = shost_priv(host);
2406 
2407         tag = cmd->request->tag;
2408         if (!ufshcd_valid_tag(hba, tag)) {
2409                 dev_err(hba->dev,
2410                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2411                         __func__, tag, cmd, cmd->request);
2412                 BUG();
2413         }
2414 
2415         if (!down_read_trylock(&hba->clk_scaling_lock))
2416                 return SCSI_MLQUEUE_HOST_BUSY;
2417 
2418         spin_lock_irqsave(hba->host->host_lock, flags);
2419         switch (hba->ufshcd_state) {
2420         case UFSHCD_STATE_OPERATIONAL:
2421                 break;
2422         case UFSHCD_STATE_EH_SCHEDULED:
2423         case UFSHCD_STATE_RESET:
2424                 err = SCSI_MLQUEUE_HOST_BUSY;
2425                 goto out_unlock;
2426         case UFSHCD_STATE_ERROR:
2427                 set_host_byte(cmd, DID_ERROR);
2428                 cmd->scsi_done(cmd);
2429                 goto out_unlock;
2430         default:
2431                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2432                                 __func__, hba->ufshcd_state);
2433                 set_host_byte(cmd, DID_BAD_TARGET);
2434                 cmd->scsi_done(cmd);
2435                 goto out_unlock;
2436         }
2437 
2438         
2439         if (ufshcd_eh_in_progress(hba)) {
2440                 set_host_byte(cmd, DID_ERROR);
2441                 cmd->scsi_done(cmd);
2442                 goto out_unlock;
2443         }
2444         spin_unlock_irqrestore(hba->host->host_lock, flags);
2445 
2446         hba->req_abort_count = 0;
2447 
2448         
2449         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2450                 
2451 
2452 
2453 
2454 
2455 
2456                 err = SCSI_MLQUEUE_HOST_BUSY;
2457                 goto out;
2458         }
2459 
2460         err = ufshcd_hold(hba, true);
2461         if (err) {
2462                 err = SCSI_MLQUEUE_HOST_BUSY;
2463                 clear_bit_unlock(tag, &hba->lrb_in_use);
2464                 goto out;
2465         }
2466         WARN_ON(hba->clk_gating.state != CLKS_ON);
2467 
2468         lrbp = &hba->lrb[tag];
2469 
2470         WARN_ON(lrbp->cmd);
2471         lrbp->cmd = cmd;
2472         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2473         lrbp->sense_buffer = cmd->sense_buffer;
2474         lrbp->task_tag = tag;
2475         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2476         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2477         lrbp->req_abort_skip = false;
2478 
2479         ufshcd_comp_scsi_upiu(hba, lrbp);
2480 
2481         err = ufshcd_map_sg(hba, lrbp);
2482         if (err) {
2483                 ufshcd_release(hba);
2484                 lrbp->cmd = NULL;
2485                 clear_bit_unlock(tag, &hba->lrb_in_use);
2486                 goto out;
2487         }
2488         
2489         wmb();
2490 
2491         
2492         spin_lock_irqsave(hba->host->host_lock, flags);
2493         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2494         ufshcd_send_command(hba, tag);
2495 out_unlock:
2496         spin_unlock_irqrestore(hba->host->host_lock, flags);
2497 out:
2498         up_read(&hba->clk_scaling_lock);
2499         return err;
2500 }
2501 
2502 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2503                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2504 {
2505         lrbp->cmd = NULL;
2506         lrbp->sense_bufflen = 0;
2507         lrbp->sense_buffer = NULL;
2508         lrbp->task_tag = tag;
2509         lrbp->lun = 0; 
2510         lrbp->intr_cmd = true; 
2511         hba->dev_cmd.type = cmd_type;
2512 
2513         return ufshcd_comp_devman_upiu(hba, lrbp);
2514 }
2515 
2516 static int
2517 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2518 {
2519         int err = 0;
2520         unsigned long flags;
2521         u32 mask = 1 << tag;
2522 
2523         
2524         spin_lock_irqsave(hba->host->host_lock, flags);
2525         ufshcd_utrl_clear(hba, tag);
2526         spin_unlock_irqrestore(hba->host->host_lock, flags);
2527 
2528         
2529 
2530 
2531 
2532         err = ufshcd_wait_for_register(hba,
2533                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2534                         mask, ~mask, 1000, 1000, true);
2535 
2536         return err;
2537 }
2538 
2539 static int
2540 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2541 {
2542         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2543 
2544         
2545         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2546                                 UPIU_RSP_CODE_OFFSET;
2547         return query_res->response;
2548 }
2549 
2550 
2551 
2552 
2553 
2554 
2555 static int
2556 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2557 {
2558         int resp;
2559         int err = 0;
2560 
2561         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2562         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2563 
2564         switch (resp) {
2565         case UPIU_TRANSACTION_NOP_IN:
2566                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2567                         err = -EINVAL;
2568                         dev_err(hba->dev, "%s: unexpected response %x\n",
2569                                         __func__, resp);
2570                 }
2571                 break;
2572         case UPIU_TRANSACTION_QUERY_RSP:
2573                 err = ufshcd_check_query_response(hba, lrbp);
2574                 if (!err)
2575                         err = ufshcd_copy_query_response(hba, lrbp);
2576                 break;
2577         case UPIU_TRANSACTION_REJECT_UPIU:
2578                 
2579                 err = -EPERM;
2580                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2581                                 __func__);
2582                 break;
2583         default:
2584                 err = -EINVAL;
2585                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2586                                 __func__, resp);
2587                 break;
2588         }
2589 
2590         return err;
2591 }
2592 
2593 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2594                 struct ufshcd_lrb *lrbp, int max_timeout)
2595 {
2596         int err = 0;
2597         unsigned long time_left;
2598         unsigned long flags;
2599 
2600         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2601                         msecs_to_jiffies(max_timeout));
2602 
2603         
2604         wmb();
2605         spin_lock_irqsave(hba->host->host_lock, flags);
2606         hba->dev_cmd.complete = NULL;
2607         if (likely(time_left)) {
2608                 err = ufshcd_get_tr_ocs(lrbp);
2609                 if (!err)
2610                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2611         }
2612         spin_unlock_irqrestore(hba->host->host_lock, flags);
2613 
2614         if (!time_left) {
2615                 err = -ETIMEDOUT;
2616                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2617                         __func__, lrbp->task_tag);
2618                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2619                         
2620                         err = -EAGAIN;
2621                 
2622 
2623 
2624 
2625 
2626                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2627         }
2628 
2629         return err;
2630 }
2631 
2632 
2633 
2634 
2635 
2636 
2637 
2638 
2639 
2640 
2641 
2642 
2643 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2644 {
2645         int tag;
2646         bool ret = false;
2647         unsigned long tmp;
2648 
2649         if (!tag_out)
2650                 goto out;
2651 
2652         do {
2653                 tmp = ~hba->lrb_in_use;
2654                 tag = find_last_bit(&tmp, hba->nutrs);
2655                 if (tag >= hba->nutrs)
2656                         goto out;
2657         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2658 
2659         *tag_out = tag;
2660         ret = true;
2661 out:
2662         return ret;
2663 }
2664 
2665 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2666 {
2667         clear_bit_unlock(tag, &hba->lrb_in_use);
2668 }
2669 
2670 
2671 
2672 
2673 
2674 
2675 
2676 
2677 
2678 
2679 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2680                 enum dev_cmd_type cmd_type, int timeout)
2681 {
2682         struct ufshcd_lrb *lrbp;
2683         int err;
2684         int tag;
2685         struct completion wait;
2686         unsigned long flags;
2687 
2688         down_read(&hba->clk_scaling_lock);
2689 
2690         
2691 
2692 
2693 
2694 
2695         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2696 
2697         init_completion(&wait);
2698         lrbp = &hba->lrb[tag];
2699         WARN_ON(lrbp->cmd);
2700         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2701         if (unlikely(err))
2702                 goto out_put_tag;
2703 
2704         hba->dev_cmd.complete = &wait;
2705 
2706         ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2707         
2708         wmb();
2709         spin_lock_irqsave(hba->host->host_lock, flags);
2710         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2711         ufshcd_send_command(hba, tag);
2712         spin_unlock_irqrestore(hba->host->host_lock, flags);
2713 
2714         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2715 
2716         ufshcd_add_query_upiu_trace(hba, tag,
2717                         err ? "query_complete_err" : "query_complete");
2718 
2719 out_put_tag:
2720         ufshcd_put_dev_cmd_tag(hba, tag);
2721         wake_up(&hba->dev_cmd.tag_wq);
2722         up_read(&hba->clk_scaling_lock);
2723         return err;
2724 }
2725 
2726 
2727 
2728 
2729 
2730 
2731 
2732 
2733 
2734 
2735 
2736 static inline void ufshcd_init_query(struct ufs_hba *hba,
2737                 struct ufs_query_req **request, struct ufs_query_res **response,
2738                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2739 {
2740         *request = &hba->dev_cmd.query.request;
2741         *response = &hba->dev_cmd.query.response;
2742         memset(*request, 0, sizeof(struct ufs_query_req));
2743         memset(*response, 0, sizeof(struct ufs_query_res));
2744         (*request)->upiu_req.opcode = opcode;
2745         (*request)->upiu_req.idn = idn;
2746         (*request)->upiu_req.index = index;
2747         (*request)->upiu_req.selector = selector;
2748 }
2749 
2750 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2751         enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2752 {
2753         int ret;
2754         int retries;
2755 
2756         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2757                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2758                 if (ret)
2759                         dev_dbg(hba->dev,
2760                                 "%s: failed with error %d, retries %d\n",
2761                                 __func__, ret, retries);
2762                 else
2763                         break;
2764         }
2765 
2766         if (ret)
2767                 dev_err(hba->dev,
2768                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2769                         __func__, opcode, idn, ret, retries);
2770         return ret;
2771 }
2772 
2773 
2774 
2775 
2776 
2777 
2778 
2779 
2780 
2781 
2782 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2783                         enum flag_idn idn, bool *flag_res)
2784 {
2785         struct ufs_query_req *request = NULL;
2786         struct ufs_query_res *response = NULL;
2787         int err, index = 0, selector = 0;
2788         int timeout = QUERY_REQ_TIMEOUT;
2789 
2790         BUG_ON(!hba);
2791 
2792         ufshcd_hold(hba, false);
2793         mutex_lock(&hba->dev_cmd.lock);
2794         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2795                         selector);
2796 
2797         switch (opcode) {
2798         case UPIU_QUERY_OPCODE_SET_FLAG:
2799         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2800         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2801                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2802                 break;
2803         case UPIU_QUERY_OPCODE_READ_FLAG:
2804                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2805                 if (!flag_res) {
2806                         
2807                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2808                                         __func__);
2809                         err = -EINVAL;
2810                         goto out_unlock;
2811                 }
2812                 break;
2813         default:
2814                 dev_err(hba->dev,
2815                         "%s: Expected query flag opcode but got = %d\n",
2816                         __func__, opcode);
2817                 err = -EINVAL;
2818                 goto out_unlock;
2819         }
2820 
2821         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2822 
2823         if (err) {
2824                 dev_err(hba->dev,
2825                         "%s: Sending flag query for idn %d failed, err = %d\n",
2826                         __func__, idn, err);
2827                 goto out_unlock;
2828         }
2829 
2830         if (flag_res)
2831                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2832                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2833 
2834 out_unlock:
2835         mutex_unlock(&hba->dev_cmd.lock);
2836         ufshcd_release(hba);
2837         return err;
2838 }
2839 
2840 
2841 
2842 
2843 
2844 
2845 
2846 
2847 
2848 
2849 
2850 
2851 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2852                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2853 {
2854         struct ufs_query_req *request = NULL;
2855         struct ufs_query_res *response = NULL;
2856         int err;
2857 
2858         BUG_ON(!hba);
2859 
2860         ufshcd_hold(hba, false);
2861         if (!attr_val) {
2862                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2863                                 __func__, opcode);
2864                 err = -EINVAL;
2865                 goto out;
2866         }
2867 
2868         mutex_lock(&hba->dev_cmd.lock);
2869         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2870                         selector);
2871 
2872         switch (opcode) {
2873         case UPIU_QUERY_OPCODE_WRITE_ATTR:
2874                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2875                 request->upiu_req.value = cpu_to_be32(*attr_val);
2876                 break;
2877         case UPIU_QUERY_OPCODE_READ_ATTR:
2878                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2879                 break;
2880         default:
2881                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2882                                 __func__, opcode);
2883                 err = -EINVAL;
2884                 goto out_unlock;
2885         }
2886 
2887         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2888 
2889         if (err) {
2890                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2891                                 __func__, opcode, idn, index, err);
2892                 goto out_unlock;
2893         }
2894 
2895         *attr_val = be32_to_cpu(response->upiu_res.value);
2896 
2897 out_unlock:
2898         mutex_unlock(&hba->dev_cmd.lock);
2899 out:
2900         ufshcd_release(hba);
2901         return err;
2902 }
2903 
2904 
2905 
2906 
2907 
2908 
2909 
2910 
2911 
2912 
2913 
2914 
2915 
2916 
2917 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2918         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2919         u32 *attr_val)
2920 {
2921         int ret = 0;
2922         u32 retries;
2923 
2924          for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2925                 ret = ufshcd_query_attr(hba, opcode, idn, index,
2926                                                 selector, attr_val);
2927                 if (ret)
2928                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2929                                 __func__, ret, retries);
2930                 else
2931                         break;
2932         }
2933 
2934         if (ret)
2935                 dev_err(hba->dev,
2936                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2937                         __func__, idn, ret, QUERY_REQ_RETRIES);
2938         return ret;
2939 }
2940 
2941 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2942                         enum query_opcode opcode, enum desc_idn idn, u8 index,
2943                         u8 selector, u8 *desc_buf, int *buf_len)
2944 {
2945         struct ufs_query_req *request = NULL;
2946         struct ufs_query_res *response = NULL;
2947         int err;
2948 
2949         BUG_ON(!hba);
2950 
2951         ufshcd_hold(hba, false);
2952         if (!desc_buf) {
2953                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2954                                 __func__, opcode);
2955                 err = -EINVAL;
2956                 goto out;
2957         }
2958 
2959         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2960                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2961                                 __func__, *buf_len);
2962                 err = -EINVAL;
2963                 goto out;
2964         }
2965 
2966         mutex_lock(&hba->dev_cmd.lock);
2967         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2968                         selector);
2969         hba->dev_cmd.query.descriptor = desc_buf;
2970         request->upiu_req.length = cpu_to_be16(*buf_len);
2971 
2972         switch (opcode) {
2973         case UPIU_QUERY_OPCODE_WRITE_DESC:
2974                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2975                 break;
2976         case UPIU_QUERY_OPCODE_READ_DESC:
2977                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2978                 break;
2979         default:
2980                 dev_err(hba->dev,
2981                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2982                                 __func__, opcode);
2983                 err = -EINVAL;
2984                 goto out_unlock;
2985         }
2986 
2987         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2988 
2989         if (err) {
2990                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2991                                 __func__, opcode, idn, index, err);
2992                 goto out_unlock;
2993         }
2994 
2995         *buf_len = be16_to_cpu(response->upiu_res.length);
2996 
2997 out_unlock:
2998         hba->dev_cmd.query.descriptor = NULL;
2999         mutex_unlock(&hba->dev_cmd.lock);
3000 out:
3001         ufshcd_release(hba);
3002         return err;
3003 }
3004 
3005 
3006 
3007 
3008 
3009 
3010 
3011 
3012 
3013 
3014 
3015 
3016 
3017 
3018 
3019 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3020                                   enum query_opcode opcode,
3021                                   enum desc_idn idn, u8 index,
3022                                   u8 selector,
3023                                   u8 *desc_buf, int *buf_len)
3024 {
3025         int err;
3026         int retries;
3027 
3028         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3029                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3030                                                 selector, desc_buf, buf_len);
3031                 if (!err || err == -EINVAL)
3032                         break;
3033         }
3034 
3035         return err;
3036 }
3037 
3038 
3039 
3040 
3041 
3042 
3043 
3044 
3045 
3046 
3047 static int ufshcd_read_desc_length(struct ufs_hba *hba,
3048         enum desc_idn desc_id,
3049         int desc_index,
3050         int *desc_length)
3051 {
3052         int ret;
3053         u8 header[QUERY_DESC_HDR_SIZE];
3054         int header_len = QUERY_DESC_HDR_SIZE;
3055 
3056         if (desc_id >= QUERY_DESC_IDN_MAX)
3057                 return -EINVAL;
3058 
3059         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3060                                         desc_id, desc_index, 0, header,
3061                                         &header_len);
3062 
3063         if (ret) {
3064                 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3065                         __func__, desc_id);
3066                 return ret;
3067         } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3068                 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3069                         __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3070                         desc_id);
3071                 ret = -EINVAL;
3072         }
3073 
3074         *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3075         return ret;
3076 
3077 }
3078 
3079 
3080 
3081 
3082 
3083 
3084 
3085 
3086 
3087 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3088         enum desc_idn desc_id, int *desc_len)
3089 {
3090         switch (desc_id) {
3091         case QUERY_DESC_IDN_DEVICE:
3092                 *desc_len = hba->desc_size.dev_desc;
3093                 break;
3094         case QUERY_DESC_IDN_POWER:
3095                 *desc_len = hba->desc_size.pwr_desc;
3096                 break;
3097         case QUERY_DESC_IDN_GEOMETRY:
3098                 *desc_len = hba->desc_size.geom_desc;
3099                 break;
3100         case QUERY_DESC_IDN_CONFIGURATION:
3101                 *desc_len = hba->desc_size.conf_desc;
3102                 break;
3103         case QUERY_DESC_IDN_UNIT:
3104                 *desc_len = hba->desc_size.unit_desc;
3105                 break;
3106         case QUERY_DESC_IDN_INTERCONNECT:
3107                 *desc_len = hba->desc_size.interc_desc;
3108                 break;
3109         case QUERY_DESC_IDN_STRING:
3110                 *desc_len = QUERY_DESC_MAX_SIZE;
3111                 break;
3112         case QUERY_DESC_IDN_HEALTH:
3113                 *desc_len = hba->desc_size.hlth_desc;
3114                 break;
3115         case QUERY_DESC_IDN_RFU_0:
3116         case QUERY_DESC_IDN_RFU_1:
3117                 *desc_len = 0;
3118                 break;
3119         default:
3120                 *desc_len = 0;
3121                 return -EINVAL;
3122         }
3123         return 0;
3124 }
3125 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3126 
3127 
3128 
3129 
3130 
3131 
3132 
3133 
3134 
3135 
3136 
3137 
3138 int ufshcd_read_desc_param(struct ufs_hba *hba,
3139                            enum desc_idn desc_id,
3140                            int desc_index,
3141                            u8 param_offset,
3142                            u8 *param_read_buf,
3143                            u8 param_size)
3144 {
3145         int ret;
3146         u8 *desc_buf;
3147         int buff_len;
3148         bool is_kmalloc = true;
3149 
3150         
3151         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3152                 return -EINVAL;
3153 
3154         
3155 
3156 
3157         ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3158 
3159         
3160         if (ret || !buff_len) {
3161                 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3162                         __func__);
3163                 return ret;
3164         }
3165 
3166         
3167         if (param_offset != 0 || param_size < buff_len) {
3168                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3169                 if (!desc_buf)
3170                         return -ENOMEM;
3171         } else {
3172                 desc_buf = param_read_buf;
3173                 is_kmalloc = false;
3174         }
3175 
3176         
3177         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3178                                         desc_id, desc_index, 0,
3179                                         desc_buf, &buff_len);
3180 
3181         if (ret) {
3182                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3183                         __func__, desc_id, desc_index, param_offset, ret);
3184                 goto out;
3185         }
3186 
3187         
3188         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3189                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3190                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3191                 ret = -EINVAL;
3192                 goto out;
3193         }
3194 
3195         
3196         if (is_kmalloc && param_size > buff_len)
3197                 param_size = buff_len;
3198 
3199         if (is_kmalloc)
3200                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3201 out:
3202         if (is_kmalloc)
3203                 kfree(desc_buf);
3204         return ret;
3205 }
3206 
3207 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3208                                    enum desc_idn desc_id,
3209                                    int desc_index,
3210                                    void *buf,
3211                                    u32 size)
3212 {
3213         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3214 }
3215 
3216 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3217                                          u8 *buf,
3218                                          u32 size)
3219 {
3220         return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3221 }
3222 
3223 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3224 {
3225         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3226 }
3227 
3228 
3229 
3230 
3231 
3232 
3233 
3234 
3235 struct uc_string_id {
3236         u8 len;
3237         u8 type;
3238         wchar_t uc[0];
3239 } __packed;
3240 
3241 
3242 static inline char ufshcd_remove_non_printable(u8 ch)
3243 {
3244         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3245 }
3246 
3247 
3248 
3249 
3250 
3251 
3252 
3253 
3254 
3255 
3256 
3257 
3258 
3259 
3260 
3261 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3262                             u8 **buf, bool ascii)
3263 {
3264         struct uc_string_id *uc_str;
3265         u8 *str;
3266         int ret;
3267 
3268         if (!buf)
3269                 return -EINVAL;
3270 
3271         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3272         if (!uc_str)
3273                 return -ENOMEM;
3274 
3275         ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3276                                desc_index, uc_str,
3277                                QUERY_DESC_MAX_SIZE);
3278         if (ret < 0) {
3279                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3280                         QUERY_REQ_RETRIES, ret);
3281                 str = NULL;
3282                 goto out;
3283         }
3284 
3285         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3286                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3287                 str = NULL;
3288                 ret = 0;
3289                 goto out;
3290         }
3291 
3292         if (ascii) {
3293                 ssize_t ascii_len;
3294                 int i;
3295                 
3296                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3297                 str = kzalloc(ascii_len, GFP_KERNEL);
3298                 if (!str) {
3299                         ret = -ENOMEM;
3300                         goto out;
3301                 }
3302 
3303                 
3304 
3305 
3306 
3307                 ret = utf16s_to_utf8s(uc_str->uc,
3308                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3309                                       UTF16_BIG_ENDIAN, str, ascii_len);
3310 
3311                 
3312                 for (i = 0; i < ret; i++)
3313                         str[i] = ufshcd_remove_non_printable(str[i]);
3314 
3315                 str[ret++] = '\0';
3316 
3317         } else {
3318                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3319                 if (!str) {
3320                         ret = -ENOMEM;
3321                         goto out;
3322                 }
3323                 ret = uc_str->len;
3324         }
3325 out:
3326         *buf = str;
3327         kfree(uc_str);
3328         return ret;
3329 }
3330 
3331 
3332 
3333 
3334 
3335 
3336 
3337 
3338 
3339 
3340 
3341 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3342                                               int lun,
3343                                               enum unit_desc_param param_offset,
3344                                               u8 *param_read_buf,
3345                                               u32 param_size)
3346 {
3347         
3348 
3349 
3350 
3351         if (!ufs_is_valid_unit_desc_lun(lun))
3352                 return -EOPNOTSUPP;
3353 
3354         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3355                                       param_offset, param_read_buf, param_size);
3356 }
3357 
3358 
3359 
3360 
3361 
3362 
3363 
3364 
3365 
3366 
3367 
3368 
3369 
3370 
3371 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3372 {
3373         size_t utmrdl_size, utrdl_size, ucdl_size;
3374 
3375         
3376         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3377         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3378                                                   ucdl_size,
3379                                                   &hba->ucdl_dma_addr,
3380                                                   GFP_KERNEL);
3381 
3382         
3383 
3384 
3385 
3386 
3387 
3388         if (!hba->ucdl_base_addr ||
3389             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3390                 dev_err(hba->dev,
3391                         "Command Descriptor Memory allocation failed\n");
3392                 goto out;
3393         }
3394 
3395         
3396 
3397 
3398 
3399         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3400         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3401                                                    utrdl_size,
3402                                                    &hba->utrdl_dma_addr,
3403                                                    GFP_KERNEL);
3404         if (!hba->utrdl_base_addr ||
3405             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3406                 dev_err(hba->dev,
3407                         "Transfer Descriptor Memory allocation failed\n");
3408                 goto out;
3409         }
3410 
3411         
3412 
3413 
3414 
3415         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3416         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3417                                                     utmrdl_size,
3418                                                     &hba->utmrdl_dma_addr,
3419                                                     GFP_KERNEL);
3420         if (!hba->utmrdl_base_addr ||
3421             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3422                 dev_err(hba->dev,
3423                 "Task Management Descriptor Memory allocation failed\n");
3424                 goto out;
3425         }
3426 
3427         
3428         hba->lrb = devm_kcalloc(hba->dev,
3429                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3430                                 GFP_KERNEL);
3431         if (!hba->lrb) {
3432                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3433                 goto out;
3434         }
3435         return 0;
3436 out:
3437         return -ENOMEM;
3438 }
3439 
3440 
3441 
3442 
3443 
3444 
3445 
3446 
3447 
3448 
3449 
3450 
3451 
3452 
3453 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3454 {
3455         struct utp_transfer_cmd_desc *cmd_descp;
3456         struct utp_transfer_req_desc *utrdlp;
3457         dma_addr_t cmd_desc_dma_addr;
3458         dma_addr_t cmd_desc_element_addr;
3459         u16 response_offset;
3460         u16 prdt_offset;
3461         int cmd_desc_size;
3462         int i;
3463 
3464         utrdlp = hba->utrdl_base_addr;
3465         cmd_descp = hba->ucdl_base_addr;
3466 
3467         response_offset =
3468                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3469         prdt_offset =
3470                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3471 
3472         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3473         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3474 
3475         for (i = 0; i < hba->nutrs; i++) {
3476                 
3477                 cmd_desc_element_addr =
3478                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3479                 utrdlp[i].command_desc_base_addr_lo =
3480                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3481                 utrdlp[i].command_desc_base_addr_hi =
3482                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3483 
3484                 
3485                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3486                         utrdlp[i].response_upiu_offset =
3487                                 cpu_to_le16(response_offset);
3488                         utrdlp[i].prd_table_offset =
3489                                 cpu_to_le16(prdt_offset);
3490                         utrdlp[i].response_upiu_length =
3491                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3492                 } else {
3493                         utrdlp[i].response_upiu_offset =
3494                                 cpu_to_le16((response_offset >> 2));
3495                         utrdlp[i].prd_table_offset =
3496                                 cpu_to_le16((prdt_offset >> 2));
3497                         utrdlp[i].response_upiu_length =
3498                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3499                 }
3500 
3501                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3502                 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3503                                 (i * sizeof(struct utp_transfer_req_desc));
3504                 hba->lrb[i].ucd_req_ptr =
3505                         (struct utp_upiu_req *)(cmd_descp + i);
3506                 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3507                 hba->lrb[i].ucd_rsp_ptr =
3508                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3509                 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3510                                 response_offset;
3511                 hba->lrb[i].ucd_prdt_ptr =
3512                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3513                 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3514                                 prdt_offset;
3515         }
3516 }
3517 
3518 
3519 
3520 
3521 
3522 
3523 
3524 
3525 
3526 
3527 
3528 
3529 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3530 {
3531         struct uic_command uic_cmd = {0};
3532         int ret;
3533 
3534         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3535 
3536         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3537         if (ret)
3538                 dev_dbg(hba->dev,
3539                         "dme-link-startup: error code %d\n", ret);
3540         return ret;
3541 }
3542 
3543 
3544 
3545 
3546 
3547 
3548 
3549 
3550 
3551 static int ufshcd_dme_reset(struct ufs_hba *hba)
3552 {
3553         struct uic_command uic_cmd = {0};
3554         int ret;
3555 
3556         uic_cmd.command = UIC_CMD_DME_RESET;
3557 
3558         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3559         if (ret)
3560                 dev_err(hba->dev,
3561                         "dme-reset: error code %d\n", ret);
3562 
3563         return ret;
3564 }
3565 
3566 
3567 
3568 
3569 
3570 
3571 
3572 
3573 
3574 static int ufshcd_dme_enable(struct ufs_hba *hba)
3575 {
3576         struct uic_command uic_cmd = {0};
3577         int ret;
3578 
3579         uic_cmd.command = UIC_CMD_DME_ENABLE;
3580 
3581         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3582         if (ret)
3583                 dev_err(hba->dev,
3584                         "dme-reset: error code %d\n", ret);
3585 
3586         return ret;
3587 }
3588 
3589 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3590 {
3591         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3592         unsigned long min_sleep_time_us;
3593 
3594         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3595                 return;
3596 
3597         
3598 
3599 
3600 
3601         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3602                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3603         } else {
3604                 unsigned long delta =
3605                         (unsigned long) ktime_to_us(
3606                                 ktime_sub(ktime_get(),
3607                                 hba->last_dme_cmd_tstamp));
3608 
3609                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3610                         min_sleep_time_us =
3611                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3612                 else
3613                         return; 
3614         }
3615 
3616         
3617         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3618 }
3619 
3620 
3621 
3622 
3623 
3624 
3625 
3626 
3627 
3628 
3629 
3630 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3631                         u8 attr_set, u32 mib_val, u8 peer)
3632 {
3633         struct uic_command uic_cmd = {0};
3634         static const char *const action[] = {
3635                 "dme-set",
3636                 "dme-peer-set"
3637         };
3638         const char *set = action[!!peer];
3639         int ret;
3640         int retries = UFS_UIC_COMMAND_RETRIES;
3641 
3642         uic_cmd.command = peer ?
3643                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3644         uic_cmd.argument1 = attr_sel;
3645         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3646         uic_cmd.argument3 = mib_val;
3647 
3648         do {
3649                 
3650                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3651                 if (ret)
3652                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3653                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3654         } while (ret && peer && --retries);
3655 
3656         if (ret)
3657                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3658                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3659                         UFS_UIC_COMMAND_RETRIES - retries);
3660 
3661         return ret;
3662 }
3663 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3664 
3665 
3666 
3667 
3668 
3669 
3670 
3671 
3672 
3673 
3674 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3675                         u32 *mib_val, u8 peer)
3676 {
3677         struct uic_command uic_cmd = {0};
3678         static const char *const action[] = {
3679                 "dme-get",
3680                 "dme-peer-get"
3681         };
3682         const char *get = action[!!peer];
3683         int ret;
3684         int retries = UFS_UIC_COMMAND_RETRIES;
3685         struct ufs_pa_layer_attr orig_pwr_info;
3686         struct ufs_pa_layer_attr temp_pwr_info;
3687         bool pwr_mode_change = false;
3688 
3689         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3690                 orig_pwr_info = hba->pwr_info;
3691                 temp_pwr_info = orig_pwr_info;
3692 
3693                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3694                     orig_pwr_info.pwr_rx == FAST_MODE) {
3695                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3696                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3697                         pwr_mode_change = true;
3698                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3699                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3700                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3701                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3702                         pwr_mode_change = true;
3703                 }
3704                 if (pwr_mode_change) {
3705                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3706                         if (ret)
3707                                 goto out;
3708                 }
3709         }
3710 
3711         uic_cmd.command = peer ?
3712                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3713         uic_cmd.argument1 = attr_sel;
3714 
3715         do {
3716                 
3717                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3718                 if (ret)
3719                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3720                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3721         } while (ret && peer && --retries);
3722 
3723         if (ret)
3724                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3725                         get, UIC_GET_ATTR_ID(attr_sel),
3726                         UFS_UIC_COMMAND_RETRIES - retries);
3727 
3728         if (mib_val && !ret)
3729                 *mib_val = uic_cmd.argument3;
3730 
3731         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3732             && pwr_mode_change)
3733                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3734 out:
3735         return ret;
3736 }
3737 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3738 
3739 
3740 
3741 
3742 
3743 
3744 
3745 
3746 
3747 
3748 
3749 
3750 
3751 
3752 
3753 
3754 
3755 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3756 {
3757         struct completion uic_async_done;
3758         unsigned long flags;
3759         u8 status;
3760         int ret;
3761         bool reenable_intr = false;
3762 
3763         mutex_lock(&hba->uic_cmd_mutex);
3764         init_completion(&uic_async_done);
3765         ufshcd_add_delay_before_dme_cmd(hba);
3766 
3767         spin_lock_irqsave(hba->host->host_lock, flags);
3768         hba->uic_async_done = &uic_async_done;
3769         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3770                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3771                 
3772 
3773 
3774 
3775                 wmb();
3776                 reenable_intr = true;
3777         }
3778         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3779         spin_unlock_irqrestore(hba->host->host_lock, flags);
3780         if (ret) {
3781                 dev_err(hba->dev,
3782                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3783                         cmd->command, cmd->argument3, ret);
3784                 goto out;
3785         }
3786 
3787         if (!wait_for_completion_timeout(hba->uic_async_done,
3788                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3789                 dev_err(hba->dev,
3790                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3791                         cmd->command, cmd->argument3);
3792                 ret = -ETIMEDOUT;
3793                 goto out;
3794         }
3795 
3796         status = ufshcd_get_upmcrs(hba);
3797         if (status != PWR_LOCAL) {
3798                 dev_err(hba->dev,
3799                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3800                         cmd->command, status);
3801                 ret = (status != PWR_OK) ? status : -1;
3802         }
3803 out:
3804         if (ret) {
3805                 ufshcd_print_host_state(hba);
3806                 ufshcd_print_pwr_info(hba);
3807                 ufshcd_print_host_regs(hba);
3808         }
3809 
3810         spin_lock_irqsave(hba->host->host_lock, flags);
3811         hba->active_uic_cmd = NULL;
3812         hba->uic_async_done = NULL;
3813         if (reenable_intr)
3814                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3815         spin_unlock_irqrestore(hba->host->host_lock, flags);
3816         mutex_unlock(&hba->uic_cmd_mutex);
3817 
3818         return ret;
3819 }
3820 
3821 
3822 
3823 
3824 
3825 
3826 
3827 
3828 
3829 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3830 {
3831         struct uic_command uic_cmd = {0};
3832         int ret;
3833 
3834         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3835                 ret = ufshcd_dme_set(hba,
3836                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3837                 if (ret) {
3838                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3839                                                 __func__, ret);
3840                         goto out;
3841                 }
3842         }
3843 
3844         uic_cmd.command = UIC_CMD_DME_SET;
3845         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3846         uic_cmd.argument3 = mode;
3847         ufshcd_hold(hba, false);
3848         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3849         ufshcd_release(hba);
3850 
3851 out:
3852         return ret;
3853 }
3854 
3855 static int ufshcd_link_recovery(struct ufs_hba *hba)
3856 {
3857         int ret;
3858         unsigned long flags;
3859 
3860         spin_lock_irqsave(hba->host->host_lock, flags);
3861         hba->ufshcd_state = UFSHCD_STATE_RESET;
3862         ufshcd_set_eh_in_progress(hba);
3863         spin_unlock_irqrestore(hba->host->host_lock, flags);
3864 
3865         ret = ufshcd_host_reset_and_restore(hba);
3866 
3867         spin_lock_irqsave(hba->host->host_lock, flags);
3868         if (ret)
3869                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3870         ufshcd_clear_eh_in_progress(hba);
3871         spin_unlock_irqrestore(hba->host->host_lock, flags);
3872 
3873         if (ret)
3874                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3875                         __func__, ret);
3876 
3877         return ret;
3878 }
3879 
3880 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3881 {
3882         int ret;
3883         struct uic_command uic_cmd = {0};
3884         ktime_t start = ktime_get();
3885 
3886         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3887 
3888         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3889         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3890         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3891                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3892 
3893         if (ret) {
3894                 int err;
3895 
3896                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3897                         __func__, ret);
3898 
3899                 
3900 
3901 
3902 
3903 
3904 
3905                 err = ufshcd_link_recovery(hba);
3906                 if (err) {
3907                         dev_err(hba->dev, "%s: link recovery failed", __func__);
3908                         ret = err;
3909                 } else {
3910                         ret = -EAGAIN;
3911                 }
3912         } else
3913                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3914                                                                 POST_CHANGE);
3915 
3916         return ret;
3917 }
3918 
3919 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3920 {
3921         int ret = 0, retries;
3922 
3923         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3924                 ret = __ufshcd_uic_hibern8_enter(hba);
3925                 if (!ret)
3926                         goto out;
3927         }
3928 out:
3929         return ret;
3930 }
3931 
3932 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3933 {
3934         struct uic_command uic_cmd = {0};
3935         int ret;
3936         ktime_t start = ktime_get();
3937 
3938         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3939 
3940         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3941         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3942         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3943                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3944 
3945         if (ret) {
3946                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3947                         __func__, ret);
3948                 ret = ufshcd_link_recovery(hba);
3949         } else {
3950                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3951                                                                 POST_CHANGE);
3952                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3953                 hba->ufs_stats.hibern8_exit_cnt++;
3954         }
3955 
3956         return ret;
3957 }
3958 
3959 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3960 {
3961         unsigned long flags;
3962 
3963         if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
3964                 return;
3965 
3966         spin_lock_irqsave(hba->host->host_lock, flags);
3967         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3968         spin_unlock_irqrestore(hba->host->host_lock, flags);
3969 }
3970 
3971  
3972 
3973 
3974 
3975 
3976 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3977 {
3978         hba->pwr_info.gear_rx = UFS_PWM_G1;
3979         hba->pwr_info.gear_tx = UFS_PWM_G1;
3980         hba->pwr_info.lane_rx = 1;
3981         hba->pwr_info.lane_tx = 1;
3982         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3983         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3984         hba->pwr_info.hs_rate = 0;
3985 }
3986 
3987 
3988 
3989 
3990 
3991 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3992 {
3993         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3994 
3995         if (hba->max_pwr_info.is_valid)
3996                 return 0;
3997 
3998         pwr_info->pwr_tx = FAST_MODE;
3999         pwr_info->pwr_rx = FAST_MODE;
4000         pwr_info->hs_rate = PA_HS_MODE_B;
4001 
4002         
4003         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4004                         &pwr_info->lane_rx);
4005         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4006                         &pwr_info->lane_tx);
4007 
4008         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4009                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4010                                 __func__,
4011                                 pwr_info->lane_rx,
4012                                 pwr_info->lane_tx);
4013                 return -EINVAL;
4014         }
4015 
4016         
4017 
4018 
4019 
4020 
4021         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4022         if (!pwr_info->gear_rx) {
4023                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4024                                 &pwr_info->gear_rx);
4025                 if (!pwr_info->gear_rx) {
4026                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4027                                 __func__, pwr_info->gear_rx);
4028                         return -EINVAL;
4029                 }
4030                 pwr_info->pwr_rx = SLOW_MODE;
4031         }
4032 
4033         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4034                         &pwr_info->gear_tx);
4035         if (!pwr_info->gear_tx) {
4036                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4037                                 &pwr_info->gear_tx);
4038                 if (!pwr_info->gear_tx) {
4039                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4040                                 __func__, pwr_info->gear_tx);
4041                         return -EINVAL;
4042                 }
4043                 pwr_info->pwr_tx = SLOW_MODE;
4044         }
4045 
4046         hba->max_pwr_info.is_valid = true;
4047         return 0;
4048 }
4049 
4050 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4051                              struct ufs_pa_layer_attr *pwr_mode)
4052 {
4053         int ret;
4054 
4055         
4056         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4057             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4058             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4059             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4060             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4061             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4062             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4063                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4064                 return 0;
4065         }
4066 
4067         
4068 
4069 
4070 
4071 
4072 
4073         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4074         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4075                         pwr_mode->lane_rx);
4076         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4077                         pwr_mode->pwr_rx == FAST_MODE)
4078                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4079         else
4080                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4081 
4082         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4083         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4084                         pwr_mode->lane_tx);
4085         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4086                         pwr_mode->pwr_tx == FAST_MODE)
4087                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4088         else
4089                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4090 
4091         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4092             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4093             pwr_mode->pwr_rx == FAST_MODE ||
4094             pwr_mode->pwr_tx == FAST_MODE)
4095                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4096                                                 pwr_mode->hs_rate);
4097 
4098         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4099                         | pwr_mode->pwr_tx);
4100 
4101         if (ret) {
4102                 dev_err(hba->dev,
4103                         "%s: power mode change failed %d\n", __func__, ret);
4104         } else {
4105                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4106                                                                 pwr_mode);
4107 
4108                 memcpy(&hba->pwr_info, pwr_mode,
4109                         sizeof(struct ufs_pa_layer_attr));
4110         }
4111 
4112         return ret;
4113 }
4114 
4115 
4116 
4117 
4118 
4119 
4120 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4121                 struct ufs_pa_layer_attr *desired_pwr_mode)
4122 {
4123         struct ufs_pa_layer_attr final_params = { 0 };
4124         int ret;
4125 
4126         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4127                                         desired_pwr_mode, &final_params);
4128 
4129         if (ret)
4130                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4131 
4132         ret = ufshcd_change_power_mode(hba, &final_params);
4133         if (!ret)
4134                 ufshcd_print_pwr_info(hba);
4135 
4136         return ret;
4137 }
4138 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4139 
4140 
4141 
4142 
4143 
4144 
4145 
4146 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4147 {
4148         int i;
4149         int err;
4150         bool flag_res = 1;
4151 
4152         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4153                 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4154         if (err) {
4155                 dev_err(hba->dev,
4156                         "%s setting fDeviceInit flag failed with error %d\n",
4157                         __func__, err);
4158                 goto out;
4159         }
4160 
4161         
4162         for (i = 0; i < 1000 && !err && flag_res; i++)
4163                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4164                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4165 
4166         if (err)
4167                 dev_err(hba->dev,
4168                         "%s reading fDeviceInit flag failed with error %d\n",
4169                         __func__, err);
4170         else if (flag_res)
4171                 dev_err(hba->dev,
4172                         "%s fDeviceInit was not cleared by the device\n",
4173                         __func__);
4174 
4175 out:
4176         return err;
4177 }
4178 
4179 
4180 
4181 
4182 
4183 
4184 
4185 
4186 
4187 
4188 
4189 
4190 
4191 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4192 {
4193         int err = 0;
4194         u32 reg;
4195 
4196         
4197         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4198 
4199         
4200         if (ufshcd_is_intr_aggr_allowed(hba))
4201                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4202         else
4203                 ufshcd_disable_intr_aggr(hba);
4204 
4205         
4206         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4207                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4208         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4209                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4210         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4211                         REG_UTP_TASK_REQ_LIST_BASE_L);
4212         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4213                         REG_UTP_TASK_REQ_LIST_BASE_H);
4214 
4215         
4216 
4217 
4218 
4219         wmb();
4220 
4221         
4222 
4223 
4224         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4225         if (!(ufshcd_get_lists_status(reg))) {
4226                 ufshcd_enable_run_stop_reg(hba);
4227         } else {
4228                 dev_err(hba->dev,
4229                         "Host controller not ready to process requests");
4230                 err = -EIO;
4231                 goto out;
4232         }
4233 
4234 out:
4235         return err;
4236 }
4237 
4238 
4239 
4240 
4241 
4242 
4243 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4244 {
4245         int err;
4246 
4247         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4248         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4249                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4250                                         10, 1, can_sleep);
4251         if (err)
4252                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4253 }
4254 
4255 
4256 
4257 
4258 
4259 
4260 
4261 
4262 
4263 
4264 
4265 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4266 {
4267         int retry;
4268 
4269         if (!ufshcd_is_hba_active(hba))
4270                 
4271                 ufshcd_hba_stop(hba, true);
4272 
4273         
4274         ufshcd_set_link_off(hba);
4275 
4276         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4277 
4278         
4279         ufshcd_hba_start(hba);
4280 
4281         
4282 
4283 
4284 
4285 
4286 
4287 
4288 
4289 
4290 
4291         usleep_range(1000, 1100);
4292 
4293         
4294         retry = 10;
4295         while (ufshcd_is_hba_active(hba)) {
4296                 if (retry) {
4297                         retry--;
4298                 } else {
4299                         dev_err(hba->dev,
4300                                 "Controller enable failed\n");
4301                         return -EIO;
4302                 }
4303                 usleep_range(5000, 5100);
4304         }
4305 
4306         
4307         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4308 
4309         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4310 
4311         return 0;
4312 }
4313 
4314 static int ufshcd_hba_enable(struct ufs_hba *hba)
4315 {
4316         int ret;
4317 
4318         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4319                 ufshcd_set_link_off(hba);
4320                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4321 
4322                 
4323                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4324                 ret = ufshcd_dme_reset(hba);
4325                 if (!ret) {
4326                         ret = ufshcd_dme_enable(hba);
4327                         if (!ret)
4328                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4329                         if (ret)
4330                                 dev_err(hba->dev,
4331                                         "Host controller enable failed with non-hce\n");
4332                 }
4333         } else {
4334                 ret = ufshcd_hba_execute_hce(hba);
4335         }
4336 
4337         return ret;
4338 }
4339 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4340 {
4341         int tx_lanes, i, err = 0;
4342 
4343         if (!peer)
4344                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4345                                &tx_lanes);
4346         else
4347                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4348                                     &tx_lanes);
4349         for (i = 0; i < tx_lanes; i++) {
4350                 if (!peer)
4351                         err = ufshcd_dme_set(hba,
4352                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4353                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4354                                         0);
4355                 else
4356                         err = ufshcd_dme_peer_set(hba,
4357                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4358                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4359                                         0);
4360                 if (err) {
4361                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4362                                 __func__, peer, i, err);
4363                         break;
4364                 }
4365         }
4366 
4367         return err;
4368 }
4369 
4370 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4371 {
4372         return ufshcd_disable_tx_lcc(hba, true);
4373 }
4374 
4375 static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4376                                    u32 reg)
4377 {
4378         reg_hist->reg[reg_hist->pos] = reg;
4379         reg_hist->tstamp[reg_hist->pos] = ktime_get();
4380         reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4381 }
4382 
4383 
4384 
4385 
4386 
4387 
4388 
4389 static int ufshcd_link_startup(struct ufs_hba *hba)
4390 {
4391         int ret;
4392         int retries = DME_LINKSTARTUP_RETRIES;
4393         bool link_startup_again = false;
4394 
4395         
4396 
4397 
4398 
4399         if (!ufshcd_is_ufs_dev_active(hba))
4400                 link_startup_again = true;
4401 
4402 link_startup:
4403         do {
4404                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4405 
4406                 ret = ufshcd_dme_link_startup(hba);
4407 
4408                 
4409                 if (!ret && !ufshcd_is_device_present(hba)) {
4410                         ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4411                                                0);
4412                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4413                         ret = -ENXIO;
4414                         goto out;
4415                 }
4416 
4417                 
4418 
4419 
4420 
4421 
4422                 if (ret && ufshcd_hba_enable(hba)) {
4423                         ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4424                                                (u32)ret);
4425                         goto out;
4426                 }
4427         } while (ret && retries--);
4428 
4429         if (ret) {
4430                 
4431                 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4432                                        (u32)ret);
4433                 goto out;
4434         }
4435 
4436         if (link_startup_again) {
4437                 link_startup_again = false;
4438                 retries = DME_LINKSTARTUP_RETRIES;
4439                 goto link_startup;
4440         }
4441 
4442         
4443         ufshcd_init_pwr_info(hba);
4444         ufshcd_print_pwr_info(hba);
4445 
4446         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4447                 ret = ufshcd_disable_device_tx_lcc(hba);
4448                 if (ret)
4449                         goto out;
4450         }
4451 
4452         
4453         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4454         if (ret)
4455                 goto out;
4456 
4457         ret = ufshcd_make_hba_operational(hba);
4458 out:
4459         if (ret) {
4460                 dev_err(hba->dev, "link startup failed %d\n", ret);
4461                 ufshcd_print_host_state(hba);
4462                 ufshcd_print_pwr_info(hba);
4463                 ufshcd_print_host_regs(hba);
4464         }
4465         return ret;
4466 }
4467 
4468 
4469 
4470 
4471 
4472 
4473 
4474 
4475 
4476 
4477 
4478 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4479 {
4480         int err = 0;
4481         int retries;
4482 
4483         ufshcd_hold(hba, false);
4484         mutex_lock(&hba->dev_cmd.lock);
4485         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4486                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4487                                                NOP_OUT_TIMEOUT);
4488 
4489                 if (!err || err == -ETIMEDOUT)
4490                         break;
4491 
4492                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4493         }
4494         mutex_unlock(&hba->dev_cmd.lock);
4495         ufshcd_release(hba);
4496 
4497         if (err)
4498                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4499         return err;
4500 }
4501 
4502 
4503 
4504 
4505 
4506 
4507 
4508 
4509 
4510 
4511 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4512 {
4513         int ret = 0;
4514         u8 lun_qdepth;
4515         struct ufs_hba *hba;
4516 
4517         hba = shost_priv(sdev->host);
4518 
4519         lun_qdepth = hba->nutrs;
4520         ret = ufshcd_read_unit_desc_param(hba,
4521                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4522                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4523                                           &lun_qdepth,
4524                                           sizeof(lun_qdepth));
4525 
4526         
4527         if (ret == -EOPNOTSUPP)
4528                 lun_qdepth = 1;
4529         else if (!lun_qdepth)
4530                 
4531                 lun_qdepth = hba->nutrs;
4532         else
4533                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4534 
4535         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4536                         __func__, lun_qdepth);
4537         scsi_change_queue_depth(sdev, lun_qdepth);
4538 }
4539 
4540 
4541 
4542 
4543 
4544 
4545 
4546 
4547 
4548 
4549 
4550 
4551 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4552                             u8 lun,
4553                             u8 *b_lu_write_protect)
4554 {
4555         int ret;
4556 
4557         if (!b_lu_write_protect)
4558                 ret = -EINVAL;
4559         
4560 
4561 
4562 
4563 
4564         else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4565                 ret = -ENOTSUPP;
4566         else
4567                 ret = ufshcd_read_unit_desc_param(hba,
4568                                           lun,
4569                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4570                                           b_lu_write_protect,
4571                                           sizeof(*b_lu_write_protect));
4572         return ret;
4573 }
4574 
4575 
4576 
4577 
4578 
4579 
4580 
4581 
4582 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4583                                                     struct scsi_device *sdev)
4584 {
4585         if (hba->dev_info.f_power_on_wp_en &&
4586             !hba->dev_info.is_lu_power_on_wp) {
4587                 u8 b_lu_write_protect;
4588 
4589                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4590                                       &b_lu_write_protect) &&
4591                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4592                         hba->dev_info.is_lu_power_on_wp = true;
4593         }
4594 }
4595 
4596 
4597 
4598 
4599 
4600 
4601 
4602 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4603 {
4604         struct ufs_hba *hba;
4605 
4606         hba = shost_priv(sdev->host);
4607 
4608         
4609         sdev->use_10_for_ms = 1;
4610 
4611         
4612         sdev->allow_restart = 1;
4613 
4614         
4615         sdev->no_report_opcodes = 1;
4616 
4617         
4618         sdev->no_write_same = 1;
4619 
4620         ufshcd_set_queue_depth(sdev);
4621 
4622         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4623 
4624         return 0;
4625 }
4626 
4627 
4628 
4629 
4630 
4631 
4632 
4633 
4634 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4635 {
4636         struct ufs_hba *hba = shost_priv(sdev->host);
4637 
4638         if (depth > hba->nutrs)
4639                 depth = hba->nutrs;
4640         return scsi_change_queue_depth(sdev, depth);
4641 }
4642 
4643 
4644 
4645 
4646 
4647 static int ufshcd_slave_configure(struct scsi_device *sdev)
4648 {
4649         struct request_queue *q = sdev->request_queue;
4650 
4651         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4652         return 0;
4653 }
4654 
4655 
4656 
4657 
4658 
4659 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4660 {
4661         struct ufs_hba *hba;
4662 
4663         hba = shost_priv(sdev->host);
4664         
4665         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4666                 unsigned long flags;
4667 
4668                 spin_lock_irqsave(hba->host->host_lock, flags);
4669                 hba->sdev_ufs_device = NULL;
4670                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4671         }
4672 }
4673 
4674 
4675 
4676 
4677 
4678 
4679 
4680 
4681 static inline int
4682 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4683 {
4684         int result = 0;
4685 
4686         switch (scsi_status) {
4687         case SAM_STAT_CHECK_CONDITION:
4688                 ufshcd_copy_sense_data(lrbp);
4689                 
4690         case SAM_STAT_GOOD:
4691                 result |= DID_OK << 16 |
4692                           COMMAND_COMPLETE << 8 |
4693                           scsi_status;
4694                 break;
4695         case SAM_STAT_TASK_SET_FULL:
4696         case SAM_STAT_BUSY:
4697         case SAM_STAT_TASK_ABORTED:
4698                 ufshcd_copy_sense_data(lrbp);
4699                 result |= scsi_status;
4700                 break;
4701         default:
4702                 result |= DID_ERROR << 16;
4703                 break;
4704         } 
4705 
4706         return result;
4707 }
4708 
4709 
4710 
4711 
4712 
4713 
4714 
4715 
4716 static inline int
4717 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4718 {
4719         int result = 0;
4720         int scsi_status;
4721         int ocs;
4722 
4723         
4724         ocs = ufshcd_get_tr_ocs(lrbp);
4725 
4726         switch (ocs) {
4727         case OCS_SUCCESS:
4728                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4729                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4730                 switch (result) {
4731                 case UPIU_TRANSACTION_RESPONSE:
4732                         
4733 
4734 
4735 
4736                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4737 
4738                         
4739 
4740 
4741 
4742                         scsi_status = result & MASK_SCSI_STATUS;
4743                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4744 
4745                         
4746 
4747 
4748 
4749 
4750 
4751 
4752 
4753 
4754 
4755 
4756 
4757                         if (!hba->pm_op_in_progress &&
4758                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4759                                 schedule_work(&hba->eeh_work);
4760                         break;
4761                 case UPIU_TRANSACTION_REJECT_UPIU:
4762                         
4763                         result = DID_ERROR << 16;
4764                         dev_err(hba->dev,
4765                                 "Reject UPIU not fully implemented\n");
4766                         break;
4767                 default:
4768                         dev_err(hba->dev,
4769                                 "Unexpected request response code = %x\n",
4770                                 result);
4771                         result = DID_ERROR << 16;
4772                         break;
4773                 }
4774                 break;
4775         case OCS_ABORTED:
4776                 result |= DID_ABORT << 16;
4777                 break;
4778         case OCS_INVALID_COMMAND_STATUS:
4779                 result |= DID_REQUEUE << 16;
4780                 break;
4781         case OCS_INVALID_CMD_TABLE_ATTR:
4782         case OCS_INVALID_PRDT_ATTR:
4783         case OCS_MISMATCH_DATA_BUF_SIZE:
4784         case OCS_MISMATCH_RESP_UPIU_SIZE:
4785         case OCS_PEER_COMM_FAILURE:
4786         case OCS_FATAL_ERROR:
4787         default:
4788                 result |= DID_ERROR << 16;
4789                 dev_err(hba->dev,
4790                                 "OCS error from controller = %x for tag %d\n",
4791                                 ocs, lrbp->task_tag);
4792                 ufshcd_print_host_regs(hba);
4793                 ufshcd_print_host_state(hba);
4794                 break;
4795         } 
4796 
4797         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4798                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4799         return result;
4800 }
4801 
4802 
4803 
4804 
4805 
4806 
4807 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4808 {
4809         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4810                 hba->active_uic_cmd->argument2 |=
4811                         ufshcd_get_uic_cmd_result(hba);
4812                 hba->active_uic_cmd->argument3 =
4813                         ufshcd_get_dme_attr_val(hba);
4814                 complete(&hba->active_uic_cmd->done);
4815         }
4816 
4817         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4818                 complete(hba->uic_async_done);
4819 }
4820 
4821 
4822 
4823 
4824 
4825 
4826 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4827                                         unsigned long completed_reqs)
4828 {
4829         struct ufshcd_lrb *lrbp;
4830         struct scsi_cmnd *cmd;
4831         int result;
4832         int index;
4833 
4834         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4835                 lrbp = &hba->lrb[index];
4836                 cmd = lrbp->cmd;
4837                 if (cmd) {
4838                         ufshcd_add_command_trace(hba, index, "complete");
4839                         result = ufshcd_transfer_rsp_status(hba, lrbp);
4840                         scsi_dma_unmap(cmd);
4841                         cmd->result = result;
4842                         
4843                         lrbp->cmd = NULL;
4844                         clear_bit_unlock(index, &hba->lrb_in_use);
4845                         
4846                         cmd->scsi_done(cmd);
4847                         __ufshcd_release(hba);
4848                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4849                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4850                         if (hba->dev_cmd.complete) {
4851                                 ufshcd_add_command_trace(hba, index,
4852                                                 "dev_complete");
4853                                 complete(hba->dev_cmd.complete);
4854                         }
4855                 }
4856                 if (ufshcd_is_clkscaling_supported(hba))
4857                         hba->clk_scaling.active_reqs--;
4858 
4859                 lrbp->compl_time_stamp = ktime_get();
4860         }
4861 
4862         
4863         hba->outstanding_reqs ^= completed_reqs;
4864 
4865         ufshcd_clk_scaling_update_busy(hba);
4866 
4867         
4868         wake_up(&hba->dev_cmd.tag_wq);
4869 }
4870 
4871 
4872 
4873 
4874 
4875 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4876 {
4877         unsigned long completed_reqs;
4878         u32 tr_doorbell;
4879 
4880         
4881 
4882 
4883 
4884 
4885 
4886 
4887         if (ufshcd_is_intr_aggr_allowed(hba) &&
4888             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4889                 ufshcd_reset_intr_aggr(hba);
4890 
4891         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4892         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4893 
4894         __ufshcd_transfer_req_compl(hba, completed_reqs);
4895 }
4896 
4897 
4898 
4899 
4900 
4901 
4902 
4903 
4904 
4905 
4906 
4907 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4908 {
4909         int err = 0;
4910         u32 val;
4911 
4912         if (!(hba->ee_ctrl_mask & mask))
4913                 goto out;
4914 
4915         val = hba->ee_ctrl_mask & ~mask;
4916         val &= MASK_EE_STATUS;
4917         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4918                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4919         if (!err)
4920                 hba->ee_ctrl_mask &= ~mask;
4921 out:
4922         return err;
4923 }
4924 
4925 
4926 
4927 
4928 
4929 
4930 
4931 
4932 
4933 
4934 
4935 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4936 {
4937         int err = 0;
4938         u32 val;
4939 
4940         if (hba->ee_ctrl_mask & mask)
4941                 goto out;
4942 
4943         val = hba->ee_ctrl_mask | mask;
4944         val &= MASK_EE_STATUS;
4945         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4946                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4947         if (!err)
4948                 hba->ee_ctrl_mask |= mask;
4949 out:
4950         return err;
4951 }
4952 
4953 
4954 
4955 
4956 
4957 
4958 
4959 
4960 
4961 
4962 
4963 
4964 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4965 {
4966         int err = 0;
4967 
4968         if (hba->auto_bkops_enabled)
4969                 goto out;
4970 
4971         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4972                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
4973         if (err) {
4974                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4975                                 __func__, err);
4976                 goto out;
4977         }
4978 
4979         hba->auto_bkops_enabled = true;
4980         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4981 
4982         
4983         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4984         if (err)
4985                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4986                                 __func__, err);
4987 out:
4988         return err;
4989 }
4990 
4991 
4992 
4993 
4994 
4995 
4996 
4997 
4998 
4999 
5000 
5001 
5002 
5003 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5004 {
5005         int err = 0;
5006 
5007         if (!hba->auto_bkops_enabled)
5008                 goto out;
5009 
5010         
5011 
5012 
5013 
5014         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5015         if (err) {
5016                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5017                                 __func__, err);
5018                 goto out;
5019         }
5020 
5021         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5022                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
5023         if (err) {
5024                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5025                                 __func__, err);
5026                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5027                 goto out;
5028         }
5029 
5030         hba->auto_bkops_enabled = false;
5031         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5032         hba->is_urgent_bkops_lvl_checked = false;
5033 out:
5034         return err;
5035 }
5036 
5037 
5038 
5039 
5040 
5041 
5042 
5043 
5044 
5045 
5046 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5047 {
5048         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5049                 hba->auto_bkops_enabled = false;
5050                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5051                 ufshcd_enable_auto_bkops(hba);
5052         } else {
5053                 hba->auto_bkops_enabled = true;
5054                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5055                 ufshcd_disable_auto_bkops(hba);
5056         }
5057         hba->is_urgent_bkops_lvl_checked = false;
5058 }
5059 
5060 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5061 {
5062         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5063                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5064 }
5065 
5066 
5067 
5068 
5069 
5070 
5071 
5072 
5073 
5074 
5075 
5076 
5077 
5078 
5079 
5080 
5081 
5082 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5083                              enum bkops_status status)
5084 {
5085         int err;
5086         u32 curr_status = 0;
5087 
5088         err = ufshcd_get_bkops_status(hba, &curr_status);
5089         if (err) {
5090                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5091                                 __func__, err);
5092                 goto out;
5093         } else if (curr_status > BKOPS_STATUS_MAX) {
5094                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5095                                 __func__, curr_status);
5096                 err = -EINVAL;
5097                 goto out;
5098         }
5099 
5100         if (curr_status >= status)
5101                 err = ufshcd_enable_auto_bkops(hba);
5102         else
5103                 err = ufshcd_disable_auto_bkops(hba);
5104         hba->urgent_bkops_lvl = curr_status;
5105 out:
5106         return err;
5107 }
5108 
5109 
5110 
5111 
5112 
5113 
5114 
5115 
5116 
5117 
5118 
5119 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5120 {
5121         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5122 }
5123 
5124 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5125 {
5126         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5127                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5128 }
5129 
5130 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5131 {
5132         int err;
5133         u32 curr_status = 0;
5134 
5135         if (hba->is_urgent_bkops_lvl_checked)
5136                 goto enable_auto_bkops;
5137 
5138         err = ufshcd_get_bkops_status(hba, &curr_status);
5139         if (err) {
5140                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5141                                 __func__, err);
5142                 goto out;
5143         }
5144 
5145         
5146 
5147 
5148 
5149 
5150 
5151         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5152                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5153                                 __func__, curr_status);
5154                 
5155                 hba->urgent_bkops_lvl = curr_status;
5156                 hba->is_urgent_bkops_lvl_checked = true;
5157         }
5158 
5159 enable_auto_bkops:
5160         err = ufshcd_enable_auto_bkops(hba);
5161 out:
5162         if (err < 0)
5163                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5164                                 __func__, err);
5165 }
5166 
5167 
5168 
5169 
5170 
5171 
5172 
5173 
5174 static void ufshcd_exception_event_handler(struct work_struct *work)
5175 {
5176         struct ufs_hba *hba;
5177         int err;
5178         u32 status = 0;
5179         hba = container_of(work, struct ufs_hba, eeh_work);
5180 
5181         pm_runtime_get_sync(hba->dev);
5182         scsi_block_requests(hba->host);
5183         err = ufshcd_get_ee_status(hba, &status);
5184         if (err) {
5185                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5186                                 __func__, err);
5187                 goto out;
5188         }
5189 
5190         status &= hba->ee_ctrl_mask;
5191 
5192         if (status & MASK_EE_URGENT_BKOPS)
5193                 ufshcd_bkops_exception_event_handler(hba);
5194 
5195 out:
5196         scsi_unblock_requests(hba->host);
5197         pm_runtime_put_sync(hba->dev);
5198         return;
5199 }
5200 
5201 
5202 static void ufshcd_complete_requests(struct ufs_hba *hba)
5203 {
5204         ufshcd_transfer_req_compl(hba);
5205         ufshcd_tmc_handler(hba);
5206 }
5207 
5208 
5209 
5210 
5211 
5212 
5213 
5214 
5215 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5216 {
5217         unsigned long flags;
5218         bool err_handling = true;
5219 
5220         spin_lock_irqsave(hba->host->host_lock, flags);
5221         
5222 
5223 
5224 
5225         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5226                 goto out;
5227 
5228         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5229             ((hba->saved_err & UIC_ERROR) &&
5230              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5231                 goto out;
5232 
5233         if ((hba->saved_err & UIC_ERROR) &&
5234             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5235                 int err;
5236                 
5237 
5238 
5239                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5240                 msleep(50);
5241                 spin_lock_irqsave(hba->host->host_lock, flags);
5242 
5243                 
5244 
5245 
5246 
5247                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5248                     ((hba->saved_err & UIC_ERROR) &&
5249                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5250                         goto out;
5251 
5252                 
5253 
5254 
5255 
5256 
5257 
5258 
5259                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5260                 err = ufshcd_verify_dev_init(hba);
5261                 spin_lock_irqsave(hba->host->host_lock, flags);
5262 
5263                 if (err)
5264                         goto out;
5265 
5266                 
5267                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5268                         hba->saved_err &= ~UIC_ERROR;
5269                 
5270                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5271                 if (!hba->saved_uic_err) {
5272                         err_handling = false;
5273                         goto out;
5274                 }
5275         }
5276 out:
5277         spin_unlock_irqrestore(hba->host->host_lock, flags);
5278         return err_handling;
5279 }
5280 
5281 
5282 
5283 
5284 
5285 static void ufshcd_err_handler(struct work_struct *work)
5286 {
5287         struct ufs_hba *hba;
5288         unsigned long flags;
5289         u32 err_xfer = 0;
5290         u32 err_tm = 0;
5291         int err = 0;
5292         int tag;
5293         bool needs_reset = false;
5294 
5295         hba = container_of(work, struct ufs_hba, eh_work);
5296 
5297         pm_runtime_get_sync(hba->dev);
5298         ufshcd_hold(hba, false);
5299 
5300         spin_lock_irqsave(hba->host->host_lock, flags);
5301         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5302                 goto out;
5303 
5304         hba->ufshcd_state = UFSHCD_STATE_RESET;
5305         ufshcd_set_eh_in_progress(hba);
5306 
5307         
5308         ufshcd_complete_requests(hba);
5309 
5310         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5311                 bool ret;
5312 
5313                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5314                 
5315                 ret = ufshcd_quirk_dl_nac_errors(hba);
5316                 spin_lock_irqsave(hba->host->host_lock, flags);
5317                 if (!ret)
5318                         goto skip_err_handling;
5319         }
5320         if ((hba->saved_err & INT_FATAL_ERRORS) ||
5321             (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
5322             ((hba->saved_err & UIC_ERROR) &&
5323             (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5324                                    UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5325                                    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5326                 needs_reset = true;
5327 
5328         
5329 
5330 
5331 
5332 
5333         if (needs_reset)
5334                 goto skip_pending_xfer_clear;
5335 
5336         
5337         spin_unlock_irqrestore(hba->host->host_lock, flags);
5338         
5339         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5340                 if (ufshcd_clear_cmd(hba, tag)) {
5341                         err_xfer = true;
5342                         goto lock_skip_pending_xfer_clear;
5343                 }
5344         }
5345 
5346         
5347         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5348                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5349                         err_tm = true;
5350                         goto lock_skip_pending_xfer_clear;
5351                 }
5352         }
5353 
5354 lock_skip_pending_xfer_clear:
5355         spin_lock_irqsave(hba->host->host_lock, flags);
5356 
5357         
5358         ufshcd_complete_requests(hba);
5359 
5360         if (err_xfer || err_tm)
5361                 needs_reset = true;
5362 
5363 skip_pending_xfer_clear:
5364         
5365         if (needs_reset) {
5366                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5367 
5368                 
5369 
5370 
5371 
5372 
5373 
5374 
5375                 if (hba->outstanding_reqs == max_doorbells)
5376                         __ufshcd_transfer_req_compl(hba,
5377                                                     (1UL << (hba->nutrs - 1)));
5378 
5379                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5380                 err = ufshcd_reset_and_restore(hba);
5381                 spin_lock_irqsave(hba->host->host_lock, flags);
5382                 if (err) {
5383                         dev_err(hba->dev, "%s: reset and restore failed\n",
5384                                         __func__);
5385                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
5386                 }
5387                 
5388 
5389 
5390 
5391                 scsi_report_bus_reset(hba->host, 0);
5392                 hba->saved_err = 0;
5393                 hba->saved_uic_err = 0;
5394         }
5395 
5396 skip_err_handling:
5397         if (!needs_reset) {
5398                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5399                 if (hba->saved_err || hba->saved_uic_err)
5400                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5401                             __func__, hba->saved_err, hba->saved_uic_err);
5402         }
5403 
5404         ufshcd_clear_eh_in_progress(hba);
5405 
5406 out:
5407         spin_unlock_irqrestore(hba->host->host_lock, flags);
5408         ufshcd_scsi_unblock_requests(hba);
5409         ufshcd_release(hba);
5410         pm_runtime_put_sync(hba->dev);
5411 }
5412 
5413 
5414 
5415 
5416 
5417 static void ufshcd_update_uic_error(struct ufs_hba *hba)
5418 {
5419         u32 reg;
5420 
5421         
5422         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5423         
5424         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5425                         (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5426                 
5427 
5428 
5429 
5430                 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5431                 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5432         }
5433 
5434         
5435         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5436         if (reg)
5437                 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5438 
5439         if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5440                 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5441         else if (hba->dev_quirks &
5442                    UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5443                 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5444                         hba->uic_error |=
5445                                 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5446                 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5447                         hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5448         }
5449 
5450         
5451         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5452         if (reg) {
5453                 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5454                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5455         }
5456 
5457         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5458         if (reg) {
5459                 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
5460                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5461         }
5462 
5463         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5464         if (reg) {
5465                 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
5466                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5467         }
5468 
5469         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5470                         __func__, hba->uic_error);
5471 }
5472 
5473 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5474                                          u32 intr_mask)
5475 {
5476         if (!ufshcd_is_auto_hibern8_supported(hba) ||
5477             !ufshcd_is_auto_hibern8_enabled(hba))
5478                 return false;
5479 
5480         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5481                 return false;
5482 
5483         if (hba->active_uic_cmd &&
5484             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5485             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5486                 return false;
5487 
5488         return true;
5489 }
5490 
5491 
5492 
5493 
5494 
5495 static void ufshcd_check_errors(struct ufs_hba *hba)
5496 {
5497         bool queue_eh_work = false;
5498 
5499         if (hba->errors & INT_FATAL_ERRORS) {
5500                 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
5501                 queue_eh_work = true;
5502         }
5503 
5504         if (hba->errors & UIC_ERROR) {
5505                 hba->uic_error = 0;
5506                 ufshcd_update_uic_error(hba);
5507                 if (hba->uic_error)
5508                         queue_eh_work = true;
5509         }
5510 
5511         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5512                 dev_err(hba->dev,
5513                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5514                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5515                         "Enter" : "Exit",
5516                         hba->errors, ufshcd_get_upmcrs(hba));
5517                 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5518                                        hba->errors);
5519                 queue_eh_work = true;
5520         }
5521 
5522         if (queue_eh_work) {
5523                 
5524 
5525 
5526 
5527                 hba->saved_err |= hba->errors;
5528                 hba->saved_uic_err |= hba->uic_error;
5529 
5530                 
5531                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5532                         
5533                         ufshcd_scsi_block_requests(hba);
5534 
5535                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5536 
5537                         
5538                         if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5539                                 bool pr_prdt = !!(hba->saved_err &
5540                                                 SYSTEM_BUS_FATAL_ERROR);
5541 
5542                                 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5543                                         __func__, hba->saved_err,
5544                                         hba->saved_uic_err);
5545 
5546                                 ufshcd_print_host_regs(hba);
5547                                 ufshcd_print_pwr_info(hba);
5548                                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5549                                 ufshcd_print_trs(hba, hba->outstanding_reqs,
5550                                                         pr_prdt);
5551                         }
5552                         schedule_work(&hba->eh_work);
5553                 }
5554         }
5555         
5556 
5557 
5558 
5559 
5560 
5561 }
5562 
5563 
5564 
5565 
5566 
5567 static void ufshcd_tmc_handler(struct ufs_hba *hba)
5568 {
5569         u32 tm_doorbell;
5570 
5571         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5572         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5573         wake_up(&hba->tm_wq);
5574 }
5575 
5576 
5577 
5578 
5579 
5580 
5581 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5582 {
5583         hba->errors = UFSHCD_ERROR_MASK & intr_status;
5584 
5585         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5586                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5587 
5588         if (hba->errors)
5589                 ufshcd_check_errors(hba);
5590 
5591         if (intr_status & UFSHCD_UIC_MASK)
5592                 ufshcd_uic_cmd_compl(hba, intr_status);
5593 
5594         if (intr_status & UTP_TASK_REQ_COMPL)
5595                 ufshcd_tmc_handler(hba);
5596 
5597         if (intr_status & UTP_TRANSFER_REQ_COMPL)
5598                 ufshcd_transfer_req_compl(hba);
5599 }
5600 
5601 
5602 
5603 
5604 
5605 
5606 
5607 
5608 
5609 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5610 {
5611         u32 intr_status, enabled_intr_status;
5612         irqreturn_t retval = IRQ_NONE;
5613         struct ufs_hba *hba = __hba;
5614         int retries = hba->nutrs;
5615 
5616         spin_lock(hba->host->host_lock);
5617         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5618 
5619         
5620 
5621 
5622 
5623 
5624 
5625         do {
5626                 enabled_intr_status =
5627                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5628                 if (intr_status)
5629                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5630                 if (enabled_intr_status) {
5631                         ufshcd_sl_intr(hba, enabled_intr_status);
5632                         retval = IRQ_HANDLED;
5633                 }
5634 
5635                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5636         } while (intr_status && --retries);
5637 
5638         spin_unlock(hba->host->host_lock);
5639         return retval;
5640 }
5641 
5642 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5643 {
5644         int err = 0;
5645         u32 mask = 1 << tag;
5646         unsigned long flags;
5647 
5648         if (!test_bit(tag, &hba->outstanding_tasks))
5649                 goto out;
5650 
5651         spin_lock_irqsave(hba->host->host_lock, flags);
5652         ufshcd_utmrl_clear(hba, tag);
5653         spin_unlock_irqrestore(hba->host->host_lock, flags);
5654 
5655         
5656         err = ufshcd_wait_for_register(hba,
5657                         REG_UTP_TASK_REQ_DOOR_BELL,
5658                         mask, 0, 1000, 1000, true);
5659 out:
5660         return err;
5661 }
5662 
5663 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5664                 struct utp_task_req_desc *treq, u8 tm_function)
5665 {
5666         struct Scsi_Host *host = hba->host;
5667         unsigned long flags;
5668         int free_slot, task_tag, err;
5669 
5670         
5671 
5672 
5673 
5674 
5675         wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5676         ufshcd_hold(hba, false);
5677 
5678         spin_lock_irqsave(host->host_lock, flags);
5679         task_tag = hba->nutrs + free_slot;
5680 
5681         treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5682 
5683         memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
5684         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5685 
5686         
5687         __set_bit(free_slot, &hba->outstanding_tasks);
5688 
5689         
5690         wmb();
5691 
5692         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5693         
5694         wmb();
5695 
5696         spin_unlock_irqrestore(host->host_lock, flags);
5697 
5698         ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5699 
5700         
5701         err = wait_event_timeout(hba->tm_wq,
5702                         test_bit(free_slot, &hba->tm_condition),
5703                         msecs_to_jiffies(TM_CMD_TIMEOUT));
5704         if (!err) {
5705                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5706                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5707                                 __func__, tm_function);
5708                 if (ufshcd_clear_tm_cmd(hba, free_slot))
5709                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5710                                         __func__, free_slot);
5711                 err = -ETIMEDOUT;
5712         } else {
5713                 err = 0;
5714                 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5715 
5716                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5717         }
5718 
5719         spin_lock_irqsave(hba->host->host_lock, flags);
5720         __clear_bit(free_slot, &hba->outstanding_tasks);
5721         spin_unlock_irqrestore(hba->host->host_lock, flags);
5722 
5723         clear_bit(free_slot, &hba->tm_condition);
5724         ufshcd_put_tm_slot(hba, free_slot);
5725         wake_up(&hba->tm_tag_wq);
5726 
5727         ufshcd_release(hba);
5728         return err;
5729 }
5730 
5731 
5732 
5733 
5734 
5735 
5736 
5737 
5738 
5739 
5740 
5741 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5742                 u8 tm_function, u8 *tm_response)
5743 {
5744         struct utp_task_req_desc treq = { { 0 }, };
5745         int ocs_value, err;
5746 
5747         
5748         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5749         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5750 
5751         
5752         treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
5753                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
5754         treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
5755 
5756         
5757 
5758 
5759 
5760         treq.input_param1 = cpu_to_be32(lun_id);
5761         treq.input_param2 = cpu_to_be32(task_id);
5762 
5763         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
5764         if (err == -ETIMEDOUT)
5765                 return err;
5766 
5767         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5768         if (ocs_value != OCS_SUCCESS)
5769                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5770                                 __func__, ocs_value);
5771         else if (tm_response)
5772                 *tm_response = be32_to_cpu(treq.output_param1) &
5773                                 MASK_TM_SERVICE_RESP;
5774         return err;
5775 }
5776 
5777 
5778 
5779 
5780 
5781 
5782 
5783 
5784 
5785 
5786 
5787 
5788 
5789 
5790 
5791 
5792 
5793 
5794 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5795                                         struct utp_upiu_req *req_upiu,
5796                                         struct utp_upiu_req *rsp_upiu,
5797                                         u8 *desc_buff, int *buff_len,
5798                                         int cmd_type,
5799                                         enum query_opcode desc_op)
5800 {
5801         struct ufshcd_lrb *lrbp;
5802         int err = 0;
5803         int tag;
5804         struct completion wait;
5805         unsigned long flags;
5806         u32 upiu_flags;
5807 
5808         down_read(&hba->clk_scaling_lock);
5809 
5810         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
5811 
5812         init_completion(&wait);
5813         lrbp = &hba->lrb[tag];
5814         WARN_ON(lrbp->cmd);
5815 
5816         lrbp->cmd = NULL;
5817         lrbp->sense_bufflen = 0;
5818         lrbp->sense_buffer = NULL;
5819         lrbp->task_tag = tag;
5820         lrbp->lun = 0;
5821         lrbp->intr_cmd = true;
5822         hba->dev_cmd.type = cmd_type;
5823 
5824         switch (hba->ufs_version) {
5825         case UFSHCI_VERSION_10:
5826         case UFSHCI_VERSION_11:
5827                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
5828                 break;
5829         default:
5830                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5831                 break;
5832         }
5833 
5834         
5835         req_upiu->header.dword_0 |= cpu_to_be32(tag);
5836 
5837         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
5838 
5839         
5840         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
5841         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
5842                 
5843 
5844 
5845 
5846                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
5847                 *buff_len = 0;
5848         }
5849 
5850         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5851 
5852         hba->dev_cmd.complete = &wait;
5853 
5854         
5855         wmb();
5856         spin_lock_irqsave(hba->host->host_lock, flags);
5857         ufshcd_send_command(hba, tag);
5858         spin_unlock_irqrestore(hba->host->host_lock, flags);
5859 
5860         
5861 
5862 
5863 
5864 
5865         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
5866 
5867         
5868         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
5869         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
5870                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
5871                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
5872                                MASK_QUERY_DATA_SEG_LEN;
5873 
5874                 if (*buff_len >= resp_len) {
5875                         memcpy(desc_buff, descp, resp_len);
5876                         *buff_len = resp_len;
5877                 } else {
5878                         dev_warn(hba->dev, "rsp size is bigger than buffer");
5879                         *buff_len = 0;
5880                         err = -EINVAL;
5881                 }
5882         }
5883 
5884         ufshcd_put_dev_cmd_tag(hba, tag);
5885         wake_up(&hba->dev_cmd.tag_wq);
5886         up_read(&hba->clk_scaling_lock);
5887         return err;
5888 }
5889 
5890 
5891 
5892 
5893 
5894 
5895 
5896 
5897 
5898 
5899 
5900 
5901 
5902 
5903 
5904 
5905 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
5906                              struct utp_upiu_req *req_upiu,
5907                              struct utp_upiu_req *rsp_upiu,
5908                              int msgcode,
5909                              u8 *desc_buff, int *buff_len,
5910                              enum query_opcode desc_op)
5911 {
5912         int err;
5913         int cmd_type = DEV_CMD_TYPE_QUERY;
5914         struct utp_task_req_desc treq = { { 0 }, };
5915         int ocs_value;
5916         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
5917 
5918         switch (msgcode) {
5919         case UPIU_TRANSACTION_NOP_OUT:
5920                 cmd_type = DEV_CMD_TYPE_NOP;
5921                 
5922         case UPIU_TRANSACTION_QUERY_REQ:
5923                 ufshcd_hold(hba, false);
5924                 mutex_lock(&hba->dev_cmd.lock);
5925                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
5926                                                    desc_buff, buff_len,
5927                                                    cmd_type, desc_op);
5928                 mutex_unlock(&hba->dev_cmd.lock);
5929                 ufshcd_release(hba);
5930 
5931                 break;
5932         case UPIU_TRANSACTION_TASK_REQ:
5933                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5934                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5935 
5936                 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
5937 
5938                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
5939                 if (err == -ETIMEDOUT)
5940                         break;
5941 
5942                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5943                 if (ocs_value != OCS_SUCCESS) {
5944                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
5945                                 ocs_value);
5946                         break;
5947                 }
5948 
5949                 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
5950 
5951                 break;
5952         default:
5953                 err = -EINVAL;
5954 
5955                 break;
5956         }
5957 
5958         return err;
5959 }
5960 
5961 
5962 
5963 
5964 
5965 
5966 
5967 
5968 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5969 {
5970         struct Scsi_Host *host;
5971         struct ufs_hba *hba;
5972         unsigned int tag;
5973         u32 pos;
5974         int err;
5975         u8 resp = 0xF;
5976         struct ufshcd_lrb *lrbp;
5977         unsigned long flags;
5978 
5979         host = cmd->device->host;
5980         hba = shost_priv(host);
5981         tag = cmd->request->tag;
5982 
5983         lrbp = &hba->lrb[tag];
5984         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5985         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5986                 if (!err)
5987                         err = resp;
5988                 goto out;
5989         }
5990 
5991         
5992         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5993                 if (hba->lrb[pos].lun == lrbp->lun) {
5994                         err = ufshcd_clear_cmd(hba, pos);
5995                         if (err)
5996                                 break;
5997                 }
5998         }
5999         spin_lock_irqsave(host->host_lock, flags);
6000         ufshcd_transfer_req_compl(hba);
6001         spin_unlock_irqrestore(host->host_lock, flags);
6002 
6003 out:
6004         hba->req_abort_count = 0;
6005         ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
6006         if (!err) {
6007                 err = SUCCESS;
6008         } else {
6009                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6010                 err = FAILED;
6011         }
6012         return err;
6013 }
6014 
6015 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6016 {
6017         struct ufshcd_lrb *lrbp;
6018         int tag;
6019 
6020         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6021                 lrbp = &hba->lrb[tag];
6022                 lrbp->req_abort_skip = true;
6023         }
6024 }
6025 
6026 
6027 
6028 
6029 
6030 
6031 
6032 
6033 
6034 
6035 
6036 
6037 
6038 static int ufshcd_abort(struct scsi_cmnd *cmd)
6039 {
6040         struct Scsi_Host *host;
6041         struct ufs_hba *hba;
6042         unsigned long flags;
6043         unsigned int tag;
6044         int err = 0;
6045         int poll_cnt;
6046         u8 resp = 0xF;
6047         struct ufshcd_lrb *lrbp;
6048         u32 reg;
6049 
6050         host = cmd->device->host;
6051         hba = shost_priv(host);
6052         tag = cmd->request->tag;
6053         lrbp = &hba->lrb[tag];
6054         if (!ufshcd_valid_tag(hba, tag)) {
6055                 dev_err(hba->dev,
6056                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6057                         __func__, tag, cmd, cmd->request);
6058                 BUG();
6059         }
6060 
6061         
6062 
6063 
6064 
6065 
6066 
6067 
6068         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6069                 return ufshcd_eh_host_reset_handler(cmd);
6070 
6071         ufshcd_hold(hba, false);
6072         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6073         
6074         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6075                 dev_err(hba->dev,
6076                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6077                         __func__, tag, hba->outstanding_reqs, reg);
6078                 goto out;
6079         }
6080 
6081         if (!(reg & (1 << tag))) {
6082                 dev_err(hba->dev,
6083                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6084                 __func__, tag);
6085         }
6086 
6087         
6088         dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6089 
6090         
6091 
6092 
6093 
6094 
6095 
6096 
6097         scsi_print_command(hba->lrb[tag].cmd);
6098         if (!hba->req_abort_count) {
6099                 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6100                 ufshcd_print_host_regs(hba);
6101                 ufshcd_print_host_state(hba);
6102                 ufshcd_print_pwr_info(hba);
6103                 ufshcd_print_trs(hba, 1 << tag, true);
6104         } else {
6105                 ufshcd_print_trs(hba, 1 << tag, false);
6106         }
6107         hba->req_abort_count++;
6108 
6109         
6110         if (lrbp->req_abort_skip) {
6111                 err = -EIO;
6112                 goto out;
6113         }
6114 
6115         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6116                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6117                                 UFS_QUERY_TASK, &resp);
6118                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6119                         
6120                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6121                                 __func__, tag);
6122                         break;
6123                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6124                         
6125 
6126 
6127 
6128                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6129                                 __func__, tag);
6130                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6131                         if (reg & (1 << tag)) {
6132                                 
6133                                 usleep_range(100, 200);
6134                                 continue;
6135                         }
6136                         
6137                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6138                                 __func__, tag);
6139                         goto out;
6140                 } else {
6141                         dev_err(hba->dev,
6142                                 "%s: no response from device. tag = %d, err %d\n",
6143                                 __func__, tag, err);
6144                         if (!err)
6145                                 err = resp; 
6146                         goto out;
6147                 }
6148         }
6149 
6150         if (!poll_cnt) {
6151                 err = -EBUSY;
6152                 goto out;
6153         }
6154 
6155         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6156                         UFS_ABORT_TASK, &resp);
6157         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6158                 if (!err) {
6159                         err = resp; 
6160                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6161                                 __func__, tag, err);
6162                 }
6163                 goto out;
6164         }
6165 
6166         err = ufshcd_clear_cmd(hba, tag);
6167         if (err) {
6168                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6169                         __func__, tag, err);
6170                 goto out;
6171         }
6172 
6173         scsi_dma_unmap(cmd);
6174 
6175         spin_lock_irqsave(host->host_lock, flags);
6176         ufshcd_outstanding_req_clear(hba, tag);
6177         hba->lrb[tag].cmd = NULL;
6178         spin_unlock_irqrestore(host->host_lock, flags);
6179 
6180         clear_bit_unlock(tag, &hba->lrb_in_use);
6181         wake_up(&hba->dev_cmd.tag_wq);
6182 
6183 out:
6184         if (!err) {
6185                 err = SUCCESS;
6186         } else {
6187                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6188                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6189                 err = FAILED;
6190         }
6191 
6192         
6193 
6194 
6195 
6196         ufshcd_release(hba);
6197         return err;
6198 }
6199 
6200 
6201 
6202 
6203 
6204 
6205 
6206 
6207 
6208 
6209 
6210 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6211 {
6212         int err;
6213         unsigned long flags;
6214 
6215         
6216 
6217 
6218 
6219         spin_lock_irqsave(hba->host->host_lock, flags);
6220         ufshcd_hba_stop(hba, false);
6221         hba->silence_err_logs = true;
6222         ufshcd_complete_requests(hba);
6223         hba->silence_err_logs = false;
6224         spin_unlock_irqrestore(hba->host->host_lock, flags);
6225 
6226         
6227         ufshcd_scale_clks(hba, true);
6228 
6229         err = ufshcd_hba_enable(hba);
6230         if (err)
6231                 goto out;
6232 
6233         
6234         err = ufshcd_probe_hba(hba);
6235 
6236         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6237                 err = -EIO;
6238 out:
6239         if (err)
6240                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6241         ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6242         return err;
6243 }
6244 
6245 
6246 
6247 
6248 
6249 
6250 
6251 
6252 
6253 
6254 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6255 {
6256         int err = 0;
6257         int retries = MAX_HOST_RESET_RETRIES;
6258 
6259         do {
6260                 
6261                 ufshcd_vops_device_reset(hba);
6262 
6263                 err = ufshcd_host_reset_and_restore(hba);
6264         } while (err && --retries);
6265 
6266         return err;
6267 }
6268 
6269 
6270 
6271 
6272 
6273 
6274 
6275 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6276 {
6277         int err;
6278         unsigned long flags;
6279         struct ufs_hba *hba;
6280 
6281         hba = shost_priv(cmd->device->host);
6282 
6283         ufshcd_hold(hba, false);
6284         
6285 
6286 
6287 
6288 
6289 
6290         do {
6291                 spin_lock_irqsave(hba->host->host_lock, flags);
6292                 if (!(work_pending(&hba->eh_work) ||
6293                             hba->ufshcd_state == UFSHCD_STATE_RESET ||
6294                             hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6295                         break;
6296                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6297                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6298                 flush_work(&hba->eh_work);
6299         } while (1);
6300 
6301         hba->ufshcd_state = UFSHCD_STATE_RESET;
6302         ufshcd_set_eh_in_progress(hba);
6303         spin_unlock_irqrestore(hba->host->host_lock, flags);
6304 
6305         err = ufshcd_reset_and_restore(hba);
6306 
6307         spin_lock_irqsave(hba->host->host_lock, flags);
6308         if (!err) {
6309                 err = SUCCESS;
6310                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6311         } else {
6312                 err = FAILED;
6313                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6314         }
6315         ufshcd_clear_eh_in_progress(hba);
6316         spin_unlock_irqrestore(hba->host->host_lock, flags);
6317 
6318         ufshcd_release(hba);
6319         return err;
6320 }
6321 
6322 
6323 
6324 
6325 
6326 
6327 
6328 
6329 
6330 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6331 {
6332         int i;
6333         int curr_uA;
6334         u16 data;
6335         u16 unit;
6336 
6337         for (i = start_scan; i >= 0; i--) {
6338                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6339                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6340                                                 ATTR_ICC_LVL_UNIT_OFFSET;
6341                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6342                 switch (unit) {
6343                 case UFSHCD_NANO_AMP:
6344                         curr_uA = curr_uA / 1000;
6345                         break;
6346                 case UFSHCD_MILI_AMP:
6347                         curr_uA = curr_uA * 1000;
6348                         break;
6349                 case UFSHCD_AMP:
6350                         curr_uA = curr_uA * 1000 * 1000;
6351                         break;
6352                 case UFSHCD_MICRO_AMP:
6353                 default:
6354                         break;
6355                 }
6356                 if (sup_curr_uA >= curr_uA)
6357                         break;
6358         }
6359         if (i < 0) {
6360                 i = 0;
6361                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6362         }
6363 
6364         return (u32)i;
6365 }
6366 
6367 
6368 
6369 
6370 
6371 
6372 
6373 
6374 
6375 
6376 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6377                                                         u8 *desc_buf, int len)
6378 {
6379         u32 icc_level = 0;
6380 
6381         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6382                                                 !hba->vreg_info.vccq2) {
6383                 dev_err(hba->dev,
6384                         "%s: Regulator capability was not set, actvIccLevel=%d",
6385                                                         __func__, icc_level);
6386                 goto out;
6387         }
6388 
6389         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6390                 icc_level = ufshcd_get_max_icc_level(
6391                                 hba->vreg_info.vcc->max_uA,
6392                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6393                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6394 
6395         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6396                 icc_level = ufshcd_get_max_icc_level(
6397                                 hba->vreg_info.vccq->max_uA,
6398                                 icc_level,
6399                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6400 
6401         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6402                 icc_level = ufshcd_get_max_icc_level(
6403                                 hba->vreg_info.vccq2->max_uA,
6404                                 icc_level,
6405                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6406 out:
6407         return icc_level;
6408 }
6409 
6410 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6411 {
6412         int ret;
6413         int buff_len = hba->desc_size.pwr_desc;
6414         u8 *desc_buf;
6415 
6416         desc_buf = kmalloc(buff_len, GFP_KERNEL);
6417         if (!desc_buf)
6418                 return;
6419 
6420         ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6421         if (ret) {
6422                 dev_err(hba->dev,
6423                         "%s: Failed reading power descriptor.len = %d ret = %d",
6424                         __func__, buff_len, ret);
6425                 goto out;
6426         }
6427 
6428         hba->init_prefetch_data.icc_level =
6429                         ufshcd_find_max_sup_active_icc_level(hba,
6430                         desc_buf, buff_len);
6431         dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6432                         __func__, hba->init_prefetch_data.icc_level);
6433 
6434         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6435                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6436                 &hba->init_prefetch_data.icc_level);
6437 
6438         if (ret)
6439                 dev_err(hba->dev,
6440                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6441                         __func__, hba->init_prefetch_data.icc_level , ret);
6442 
6443 out:
6444         kfree(desc_buf);
6445 }
6446 
6447 
6448 
6449 
6450 
6451 
6452 
6453 
6454 
6455 
6456 
6457 
6458 
6459 
6460 
6461 
6462 
6463 
6464 
6465 
6466 
6467 
6468 
6469 
6470 
6471 
6472 
6473 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6474 {
6475         int ret = 0;
6476         struct scsi_device *sdev_rpmb;
6477         struct scsi_device *sdev_boot;
6478 
6479         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6480                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6481         if (IS_ERR(hba->sdev_ufs_device)) {
6482                 ret = PTR_ERR(hba->sdev_ufs_device);
6483                 hba->sdev_ufs_device = NULL;
6484                 goto out;
6485         }
6486         scsi_device_put(hba->sdev_ufs_device);
6487 
6488         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6489                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6490         if (IS_ERR(sdev_rpmb)) {
6491                 ret = PTR_ERR(sdev_rpmb);
6492                 goto remove_sdev_ufs_device;
6493         }
6494         scsi_device_put(sdev_rpmb);
6495 
6496         sdev_boot = __scsi_add_device(hba->host, 0, 0,
6497                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6498         if (IS_ERR(sdev_boot))
6499                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6500         else
6501                 scsi_device_put(sdev_boot);
6502         goto out;
6503 
6504 remove_sdev_ufs_device:
6505         scsi_remove_device(hba->sdev_ufs_device);
6506 out:
6507         return ret;
6508 }
6509 
6510 static int ufs_get_device_desc(struct ufs_hba *hba,
6511                                struct ufs_dev_desc *dev_desc)
6512 {
6513         int err;
6514         size_t buff_len;
6515         u8 model_index;
6516         u8 *desc_buf;
6517 
6518         if (!dev_desc)
6519                 return -EINVAL;
6520 
6521         buff_len = max_t(size_t, hba->desc_size.dev_desc,
6522                          QUERY_DESC_MAX_SIZE + 1);
6523         desc_buf = kmalloc(buff_len, GFP_KERNEL);
6524         if (!desc_buf) {
6525                 err = -ENOMEM;
6526                 goto out;
6527         }
6528 
6529         err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6530         if (err) {
6531                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6532                         __func__, err);
6533                 goto out;
6534         }
6535 
6536         
6537 
6538 
6539 
6540         dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6541                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6542 
6543         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6544         err = ufshcd_read_string_desc(hba, model_index,
6545                                       &dev_desc->model, SD_ASCII_STD);
6546         if (err < 0) {
6547                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6548                         __func__, err);
6549                 goto out;
6550         }
6551 
6552         
6553 
6554 
6555 
6556         err = 0;
6557 
6558 out:
6559         kfree(desc_buf);
6560         return err;
6561 }
6562 
6563 static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
6564 {
6565         kfree(dev_desc->model);
6566         dev_desc->model = NULL;
6567 }
6568 
6569 static void ufs_fixup_device_setup(struct ufs_hba *hba,
6570                                    struct ufs_dev_desc *dev_desc)
6571 {
6572         struct ufs_dev_fix *f;
6573 
6574         for (f = ufs_fixups; f->quirk; f++) {
6575                 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6576                      f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6577                      ((dev_desc->model &&
6578                        STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
6579                       !strcmp(f->card.model, UFS_ANY_MODEL)))
6580                         hba->dev_quirks |= f->quirk;
6581         }
6582 }
6583 
6584 
6585 
6586 
6587 
6588 
6589 
6590 
6591 
6592 
6593 
6594 
6595 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6596 {
6597         int ret = 0;
6598         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6599 
6600         ret = ufshcd_dme_peer_get(hba,
6601                                   UIC_ARG_MIB_SEL(
6602                                         RX_MIN_ACTIVATETIME_CAPABILITY,
6603                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6604                                   &peer_rx_min_activatetime);
6605         if (ret)
6606                 goto out;
6607 
6608         
6609         tuned_pa_tactivate =
6610                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6611                  / PA_TACTIVATE_TIME_UNIT_US);
6612         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6613                              tuned_pa_tactivate);
6614 
6615 out:
6616         return ret;
6617 }
6618 
6619 
6620 
6621 
6622 
6623 
6624 
6625 
6626 
6627 
6628 
6629 
6630 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6631 {
6632         int ret = 0;
6633         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6634         u32 max_hibern8_time, tuned_pa_hibern8time;
6635 
6636         ret = ufshcd_dme_get(hba,
6637                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6638                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6639                                   &local_tx_hibern8_time_cap);
6640         if (ret)
6641                 goto out;
6642 
6643         ret = ufshcd_dme_peer_get(hba,
6644                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6645                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6646                                   &peer_rx_hibern8_time_cap);
6647         if (ret)
6648                 goto out;
6649 
6650         max_hibern8_time = max(local_tx_hibern8_time_cap,
6651                                peer_rx_hibern8_time_cap);
6652         
6653         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6654                                 / PA_HIBERN8_TIME_UNIT_US);
6655         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6656                              tuned_pa_hibern8time);
6657 out:
6658         return ret;
6659 }
6660 
6661 
6662 
6663 
6664 
6665 
6666 
6667 
6668 
6669 
6670 
6671 
6672 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6673 {
6674         int ret = 0;
6675         u32 granularity, peer_granularity;
6676         u32 pa_tactivate, peer_pa_tactivate;
6677         u32 pa_tactivate_us, peer_pa_tactivate_us;
6678         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6679 
6680         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6681                                   &granularity);
6682         if (ret)
6683                 goto out;
6684 
6685         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6686                                   &peer_granularity);
6687         if (ret)
6688                 goto out;
6689 
6690         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6691             (granularity > PA_GRANULARITY_MAX_VAL)) {
6692                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6693                         __func__, granularity);
6694                 return -EINVAL;
6695         }
6696 
6697         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6698             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6699                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6700                         __func__, peer_granularity);
6701                 return -EINVAL;
6702         }
6703 
6704         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6705         if (ret)
6706                 goto out;
6707 
6708         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6709                                   &peer_pa_tactivate);
6710         if (ret)
6711                 goto out;
6712 
6713         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6714         peer_pa_tactivate_us = peer_pa_tactivate *
6715                              gran_to_us_table[peer_granularity - 1];
6716 
6717         if (pa_tactivate_us > peer_pa_tactivate_us) {
6718                 u32 new_peer_pa_tactivate;
6719 
6720                 new_peer_pa_tactivate = pa_tactivate_us /
6721                                       gran_to_us_table[peer_granularity - 1];
6722                 new_peer_pa_tactivate++;
6723                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6724                                           new_peer_pa_tactivate);
6725         }
6726 
6727 out:
6728         return ret;
6729 }
6730 
6731 static void ufshcd_tune_unipro_params(struct ufs_hba *hba,
6732                                       struct ufs_dev_desc *card)
6733 {
6734         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6735                 ufshcd_tune_pa_tactivate(hba);
6736                 ufshcd_tune_pa_hibern8time(hba);
6737         }
6738 
6739         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6740                 
6741                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6742 
6743         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6744                 ufshcd_quirk_tune_host_pa_tactivate(hba);
6745 
6746         ufshcd_vops_apply_dev_quirks(hba, card);
6747 }
6748 
6749 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6750 {
6751         hba->ufs_stats.hibern8_exit_cnt = 0;
6752         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6753         hba->req_abort_count = 0;
6754 }
6755 
6756 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6757 {
6758         int err;
6759 
6760         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6761                 &hba->desc_size.dev_desc);
6762         if (err)
6763                 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6764 
6765         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6766                 &hba->desc_size.pwr_desc);
6767         if (err)
6768                 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6769 
6770         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6771                 &hba->desc_size.interc_desc);
6772         if (err)
6773                 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6774 
6775         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6776                 &hba->desc_size.conf_desc);
6777         if (err)
6778                 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6779 
6780         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6781                 &hba->desc_size.unit_desc);
6782         if (err)
6783                 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6784 
6785         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6786                 &hba->desc_size.geom_desc);
6787         if (err)
6788                 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6789 
6790         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6791                 &hba->desc_size.hlth_desc);
6792         if (err)
6793                 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6794 }
6795 
6796 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
6797         {19200000, REF_CLK_FREQ_19_2_MHZ},
6798         {26000000, REF_CLK_FREQ_26_MHZ},
6799         {38400000, REF_CLK_FREQ_38_4_MHZ},
6800         {52000000, REF_CLK_FREQ_52_MHZ},
6801         {0, REF_CLK_FREQ_INVAL},
6802 };
6803 
6804 static enum ufs_ref_clk_freq
6805 ufs_get_bref_clk_from_hz(unsigned long freq)
6806 {
6807         int i;
6808 
6809         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
6810                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
6811                         return ufs_ref_clk_freqs[i].val;
6812 
6813         return REF_CLK_FREQ_INVAL;
6814 }
6815 
6816 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
6817 {
6818         unsigned long freq;
6819 
6820         freq = clk_get_rate(refclk);
6821 
6822         hba->dev_ref_clk_freq =
6823                 ufs_get_bref_clk_from_hz(freq);
6824 
6825         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
6826                 dev_err(hba->dev,
6827                 "invalid ref_clk setting = %ld\n", freq);
6828 }
6829 
6830 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6831 {
6832         int err;
6833         u32 ref_clk;
6834         u32 freq = hba->dev_ref_clk_freq;
6835 
6836         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6837                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6838 
6839         if (err) {
6840                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
6841                         err);
6842                 goto out;
6843         }
6844 
6845         if (ref_clk == freq)
6846                 goto out; 
6847 
6848         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6849                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
6850 
6851         if (err) {
6852                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
6853                         ufs_ref_clk_freqs[freq].freq_hz);
6854                 goto out;
6855         }
6856 
6857         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
6858                         ufs_ref_clk_freqs[freq].freq_hz);
6859 
6860 out:
6861         return err;
6862 }
6863 
6864 
6865 
6866 
6867 
6868 
6869 
6870 static int ufshcd_probe_hba(struct ufs_hba *hba)
6871 {
6872         struct ufs_dev_desc card = {0};
6873         int ret;
6874         ktime_t start = ktime_get();
6875 
6876         ret = ufshcd_link_startup(hba);
6877         if (ret)
6878                 goto out;
6879 
6880         
6881         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6882         hba->is_urgent_bkops_lvl_checked = false;
6883 
6884         
6885         ufshcd_clear_dbg_ufs_stats(hba);
6886 
6887         
6888         ufshcd_set_link_active(hba);
6889 
6890         ret = ufshcd_verify_dev_init(hba);
6891         if (ret)
6892                 goto out;
6893 
6894         ret = ufshcd_complete_dev_init(hba);
6895         if (ret)
6896                 goto out;
6897 
6898         
6899         ufshcd_init_desc_sizes(hba);
6900 
6901         ret = ufs_get_device_desc(hba, &card);
6902         if (ret) {
6903                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6904                         __func__, ret);
6905                 goto out;
6906         }
6907 
6908         ufs_fixup_device_setup(hba, &card);
6909         ufshcd_tune_unipro_params(hba, &card);
6910         ufs_put_device_desc(&card);
6911 
6912         
6913         ufshcd_set_ufs_dev_active(hba);
6914         ufshcd_force_reset_auto_bkops(hba);
6915         hba->wlun_dev_clr_ua = true;
6916 
6917         if (ufshcd_get_max_pwr_mode(hba)) {
6918                 dev_err(hba->dev,
6919                         "%s: Failed getting max supported power mode\n",
6920                         __func__);
6921         } else {
6922                 
6923 
6924 
6925 
6926                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
6927                         ufshcd_set_dev_ref_clk(hba);
6928                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6929                 if (ret) {
6930                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6931                                         __func__, ret);
6932                         goto out;
6933                 }
6934         }
6935 
6936         
6937         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6938 
6939         
6940         ufshcd_auto_hibern8_enable(hba);
6941 
6942         
6943 
6944 
6945 
6946         if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6947                 bool flag;
6948 
6949                 
6950                 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6951                 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6952                                 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6953                         hba->dev_info.f_power_on_wp_en = flag;
6954 
6955                 if (!hba->is_init_prefetch)
6956                         ufshcd_init_icc_levels(hba);
6957 
6958                 
6959                 ret = ufshcd_scsi_add_wlus(hba);
6960                 if (ret)
6961                         goto out;
6962 
6963                 
6964                 if (ufshcd_is_clkscaling_supported(hba)) {
6965                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
6966                                 &hba->pwr_info,
6967                                 sizeof(struct ufs_pa_layer_attr));
6968                         hba->clk_scaling.saved_pwr_info.is_valid = true;
6969                         if (!hba->devfreq) {
6970                                 ret = ufshcd_devfreq_init(hba);
6971                                 if (ret)
6972                                         goto out;
6973                         }
6974                         hba->clk_scaling.is_allowed = true;
6975                 }
6976 
6977                 ufs_bsg_probe(hba);
6978 
6979                 scsi_scan_host(hba->host);
6980                 pm_runtime_put_sync(hba->dev);
6981         }
6982 
6983         if (!hba->is_init_prefetch)
6984                 hba->is_init_prefetch = true;
6985 
6986 out:
6987         
6988 
6989 
6990 
6991         if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6992                 pm_runtime_put_sync(hba->dev);
6993                 ufshcd_exit_clk_scaling(hba);
6994                 ufshcd_hba_exit(hba);
6995         }
6996 
6997         trace_ufshcd_init(dev_name(hba->dev), ret,
6998                 ktime_to_us(ktime_sub(ktime_get(), start)),
6999                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7000         return ret;
7001 }
7002 
7003 
7004 
7005 
7006 
7007 
7008 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7009 {
7010         struct ufs_hba *hba = (struct ufs_hba *)data;
7011 
7012         ufshcd_probe_hba(hba);
7013 }
7014 
7015 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7016 {
7017         unsigned long flags;
7018         struct Scsi_Host *host;
7019         struct ufs_hba *hba;
7020         int index;
7021         bool found = false;
7022 
7023         if (!scmd || !scmd->device || !scmd->device->host)
7024                 return BLK_EH_DONE;
7025 
7026         host = scmd->device->host;
7027         hba = shost_priv(host);
7028         if (!hba)
7029                 return BLK_EH_DONE;
7030 
7031         spin_lock_irqsave(host->host_lock, flags);
7032 
7033         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7034                 if (hba->lrb[index].cmd == scmd) {
7035                         found = true;
7036                         break;
7037                 }
7038         }
7039 
7040         spin_unlock_irqrestore(host->host_lock, flags);
7041 
7042         
7043 
7044 
7045 
7046 
7047         return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
7048 }
7049 
7050 static const struct attribute_group *ufshcd_driver_groups[] = {
7051         &ufs_sysfs_unit_descriptor_group,
7052         &ufs_sysfs_lun_attributes_group,
7053         NULL,
7054 };
7055 
7056 static struct scsi_host_template ufshcd_driver_template = {
7057         .module                 = THIS_MODULE,
7058         .name                   = UFSHCD,
7059         .proc_name              = UFSHCD,
7060         .queuecommand           = ufshcd_queuecommand,
7061         .slave_alloc            = ufshcd_slave_alloc,
7062         .slave_configure        = ufshcd_slave_configure,
7063         .slave_destroy          = ufshcd_slave_destroy,
7064         .change_queue_depth     = ufshcd_change_queue_depth,
7065         .eh_abort_handler       = ufshcd_abort,
7066         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7067         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7068         .eh_timed_out           = ufshcd_eh_timed_out,
7069         .this_id                = -1,
7070         .sg_tablesize           = SG_ALL,
7071         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7072         .can_queue              = UFSHCD_CAN_QUEUE,
7073         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
7074         .max_host_blocked       = 1,
7075         .track_queue_depth      = 1,
7076         .sdev_groups            = ufshcd_driver_groups,
7077         .dma_boundary           = PAGE_SIZE - 1,
7078 };
7079 
7080 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7081                                    int ua)
7082 {
7083         int ret;
7084 
7085         if (!vreg)
7086                 return 0;
7087 
7088         
7089 
7090 
7091 
7092 
7093 
7094         if (!vreg->max_uA)
7095                 return 0;
7096 
7097         ret = regulator_set_load(vreg->reg, ua);
7098         if (ret < 0) {
7099                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7100                                 __func__, vreg->name, ua, ret);
7101         }
7102 
7103         return ret;
7104 }
7105 
7106 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7107                                          struct ufs_vreg *vreg)
7108 {
7109         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7110 }
7111 
7112 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7113                                          struct ufs_vreg *vreg)
7114 {
7115         if (!vreg)
7116                 return 0;
7117 
7118         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7119 }
7120 
7121 static int ufshcd_config_vreg(struct device *dev,
7122                 struct ufs_vreg *vreg, bool on)
7123 {
7124         int ret = 0;
7125         struct regulator *reg;
7126         const char *name;
7127         int min_uV, uA_load;
7128 
7129         BUG_ON(!vreg);
7130 
7131         reg = vreg->reg;
7132         name = vreg->name;
7133 
7134         if (regulator_count_voltages(reg) > 0) {
7135                 if (vreg->min_uV && vreg->max_uV) {
7136                         min_uV = on ? vreg->min_uV : 0;
7137                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7138                         if (ret) {
7139                                 dev_err(dev,
7140                                         "%s: %s set voltage failed, err=%d\n",
7141                                         __func__, name, ret);
7142                                 goto out;
7143                         }
7144                 }
7145 
7146                 uA_load = on ? vreg->max_uA : 0;
7147                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7148                 if (ret)
7149                         goto out;
7150         }
7151 out:
7152         return ret;
7153 }
7154 
7155 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7156 {
7157         int ret = 0;
7158 
7159         if (!vreg || vreg->enabled)
7160                 goto out;
7161 
7162         ret = ufshcd_config_vreg(dev, vreg, true);
7163         if (!ret)
7164                 ret = regulator_enable(vreg->reg);
7165 
7166         if (!ret)
7167                 vreg->enabled = true;
7168         else
7169                 dev_err(dev, "%s: %s enable failed, err=%d\n",
7170                                 __func__, vreg->name, ret);
7171 out:
7172         return ret;
7173 }
7174 
7175 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7176 {
7177         int ret = 0;
7178 
7179         if (!vreg || !vreg->enabled)
7180                 goto out;
7181 
7182         ret = regulator_disable(vreg->reg);
7183 
7184         if (!ret) {
7185                 
7186                 ufshcd_config_vreg(dev, vreg, false);
7187                 vreg->enabled = false;
7188         } else {
7189                 dev_err(dev, "%s: %s disable failed, err=%d\n",
7190                                 __func__, vreg->name, ret);
7191         }
7192 out:
7193         return ret;
7194 }
7195 
7196 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7197 {
7198         int ret = 0;
7199         struct device *dev = hba->dev;
7200         struct ufs_vreg_info *info = &hba->vreg_info;
7201 
7202         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7203         if (ret)
7204                 goto out;
7205 
7206         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7207         if (ret)
7208                 goto out;
7209 
7210         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7211         if (ret)
7212                 goto out;
7213 
7214 out:
7215         if (ret) {
7216                 ufshcd_toggle_vreg(dev, info->vccq2, false);
7217                 ufshcd_toggle_vreg(dev, info->vccq, false);
7218                 ufshcd_toggle_vreg(dev, info->vcc, false);
7219         }
7220         return ret;
7221 }
7222 
7223 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7224 {
7225         struct ufs_vreg_info *info = &hba->vreg_info;
7226 
7227         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7228 }
7229 
7230 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7231 {
7232         int ret = 0;
7233 
7234         if (!vreg)
7235                 goto out;
7236 
7237         vreg->reg = devm_regulator_get(dev, vreg->name);
7238         if (IS_ERR(vreg->reg)) {
7239                 ret = PTR_ERR(vreg->reg);
7240                 dev_err(dev, "%s: %s get failed, err=%d\n",
7241                                 __func__, vreg->name, ret);
7242         }
7243 out:
7244         return ret;
7245 }
7246 
7247 static int ufshcd_init_vreg(struct ufs_hba *hba)
7248 {
7249         int ret = 0;
7250         struct device *dev = hba->dev;
7251         struct ufs_vreg_info *info = &hba->vreg_info;
7252 
7253         ret = ufshcd_get_vreg(dev, info->vcc);
7254         if (ret)
7255                 goto out;
7256 
7257         ret = ufshcd_get_vreg(dev, info->vccq);
7258         if (ret)
7259                 goto out;
7260 
7261         ret = ufshcd_get_vreg(dev, info->vccq2);
7262 out:
7263         return ret;
7264 }
7265 
7266 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7267 {
7268         struct ufs_vreg_info *info = &hba->vreg_info;
7269 
7270         if (info)
7271                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7272 
7273         return 0;
7274 }
7275 
7276 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7277                                         bool skip_ref_clk)
7278 {
7279         int ret = 0;
7280         struct ufs_clk_info *clki;
7281         struct list_head *head = &hba->clk_list_head;
7282         unsigned long flags;
7283         ktime_t start = ktime_get();
7284         bool clk_state_changed = false;
7285 
7286         if (list_empty(head))
7287                 goto out;
7288 
7289         
7290 
7291 
7292 
7293 
7294         if (!on) {
7295                 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7296                 if (ret)
7297                         return ret;
7298         }
7299 
7300         list_for_each_entry(clki, head, list) {
7301                 if (!IS_ERR_OR_NULL(clki->clk)) {
7302                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7303                                 continue;
7304 
7305                         clk_state_changed = on ^ clki->enabled;
7306                         if (on && !clki->enabled) {
7307                                 ret = clk_prepare_enable(clki->clk);
7308                                 if (ret) {
7309                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7310                                                 __func__, clki->name, ret);
7311                                         goto out;
7312                                 }
7313                         } else if (!on && clki->enabled) {
7314                                 clk_disable_unprepare(clki->clk);
7315                         }
7316                         clki->enabled = on;
7317                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7318                                         clki->name, on ? "en" : "dis");
7319                 }
7320         }
7321 
7322         
7323 
7324 
7325 
7326 
7327         if (on) {
7328                 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7329                 if (ret)
7330                         return ret;
7331         }
7332 
7333 out:
7334         if (ret) {
7335                 list_for_each_entry(clki, head, list) {
7336                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7337                                 clk_disable_unprepare(clki->clk);
7338                 }
7339         } else if (!ret && on) {
7340                 spin_lock_irqsave(hba->host->host_lock, flags);
7341                 hba->clk_gating.state = CLKS_ON;
7342                 trace_ufshcd_clk_gating(dev_name(hba->dev),
7343                                         hba->clk_gating.state);
7344                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7345         }
7346 
7347         if (clk_state_changed)
7348                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7349                         (on ? "on" : "off"),
7350                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7351         return ret;
7352 }
7353 
7354 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7355 {
7356         return  __ufshcd_setup_clocks(hba, on, false);
7357 }
7358 
7359 static int ufshcd_init_clocks(struct ufs_hba *hba)
7360 {
7361         int ret = 0;
7362         struct ufs_clk_info *clki;
7363         struct device *dev = hba->dev;
7364         struct list_head *head = &hba->clk_list_head;
7365 
7366         if (list_empty(head))
7367                 goto out;
7368 
7369         list_for_each_entry(clki, head, list) {
7370                 if (!clki->name)
7371                         continue;
7372 
7373                 clki->clk = devm_clk_get(dev, clki->name);
7374                 if (IS_ERR(clki->clk)) {
7375                         ret = PTR_ERR(clki->clk);
7376                         dev_err(dev, "%s: %s clk get failed, %d\n",
7377                                         __func__, clki->name, ret);
7378                         goto out;
7379                 }
7380 
7381                 
7382 
7383 
7384 
7385 
7386                 if (!strcmp(clki->name, "ref_clk"))
7387                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7388 
7389                 if (clki->max_freq) {
7390                         ret = clk_set_rate(clki->clk, clki->max_freq);
7391                         if (ret) {
7392                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7393                                         __func__, clki->name,
7394                                         clki->max_freq, ret);
7395                                 goto out;
7396                         }
7397                         clki->curr_freq = clki->max_freq;
7398                 }
7399                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7400                                 clki->name, clk_get_rate(clki->clk));
7401         }
7402 out:
7403         return ret;
7404 }
7405 
7406 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7407 {
7408         int err = 0;
7409 
7410         if (!hba->vops)
7411                 goto out;
7412 
7413         err = ufshcd_vops_init(hba);
7414         if (err)
7415                 goto out;
7416 
7417         err = ufshcd_vops_setup_regulators(hba, true);
7418         if (err)
7419                 goto out_exit;
7420 
7421         goto out;
7422 
7423 out_exit:
7424         ufshcd_vops_exit(hba);
7425 out:
7426         if (err)
7427                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7428                         __func__, ufshcd_get_var_name(hba), err);
7429         return err;
7430 }
7431 
7432 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7433 {
7434         if (!hba->vops)
7435                 return;
7436 
7437         ufshcd_vops_setup_regulators(hba, false);
7438 
7439         ufshcd_vops_exit(hba);
7440 }
7441 
7442 static int ufshcd_hba_init(struct ufs_hba *hba)
7443 {
7444         int err;
7445 
7446         
7447 
7448 
7449 
7450 
7451 
7452 
7453         err = ufshcd_init_hba_vreg(hba);
7454         if (err)
7455                 goto out;
7456 
7457         err = ufshcd_setup_hba_vreg(hba, true);
7458         if (err)
7459                 goto out;
7460 
7461         err = ufshcd_init_clocks(hba);
7462         if (err)
7463                 goto out_disable_hba_vreg;
7464 
7465         err = ufshcd_setup_clocks(hba, true);
7466         if (err)
7467                 goto out_disable_hba_vreg;
7468 
7469         err = ufshcd_init_vreg(hba);
7470         if (err)
7471                 goto out_disable_clks;
7472 
7473         err = ufshcd_setup_vreg(hba, true);
7474         if (err)
7475                 goto out_disable_clks;
7476 
7477         err = ufshcd_variant_hba_init(hba);
7478         if (err)
7479                 goto out_disable_vreg;
7480 
7481         hba->is_powered = true;
7482         goto out;
7483 
7484 out_disable_vreg:
7485         ufshcd_setup_vreg(hba, false);
7486 out_disable_clks:
7487         ufshcd_setup_clocks(hba, false);
7488 out_disable_hba_vreg:
7489         ufshcd_setup_hba_vreg(hba, false);
7490 out:
7491         return err;
7492 }
7493 
7494 static void ufshcd_hba_exit(struct ufs_hba *hba)
7495 {
7496         if (hba->is_powered) {
7497                 ufshcd_variant_hba_exit(hba);
7498                 ufshcd_setup_vreg(hba, false);
7499                 ufshcd_suspend_clkscaling(hba);
7500                 if (ufshcd_is_clkscaling_supported(hba))
7501                         if (hba->devfreq)
7502                                 ufshcd_suspend_clkscaling(hba);
7503                 ufshcd_setup_clocks(hba, false);
7504                 ufshcd_setup_hba_vreg(hba, false);
7505                 hba->is_powered = false;
7506         }
7507 }
7508 
7509 static int
7510 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7511 {
7512         unsigned char cmd[6] = {REQUEST_SENSE,
7513                                 0,
7514                                 0,
7515                                 0,
7516                                 UFS_SENSE_SIZE,
7517                                 0};
7518         char *buffer;
7519         int ret;
7520 
7521         buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
7522         if (!buffer) {
7523                 ret = -ENOMEM;
7524                 goto out;
7525         }
7526 
7527         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7528                         UFS_SENSE_SIZE, NULL, NULL,
7529                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7530         if (ret)
7531                 pr_err("%s: failed with err %d\n", __func__, ret);
7532 
7533         kfree(buffer);
7534 out:
7535         return ret;
7536 }
7537 
7538 
7539 
7540 
7541 
7542 
7543 
7544 
7545 
7546 
7547 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7548                                      enum ufs_dev_pwr_mode pwr_mode)
7549 {
7550         unsigned char cmd[6] = { START_STOP };
7551         struct scsi_sense_hdr sshdr;
7552         struct scsi_device *sdp;
7553         unsigned long flags;
7554         int ret;
7555 
7556         spin_lock_irqsave(hba->host->host_lock, flags);
7557         sdp = hba->sdev_ufs_device;
7558         if (sdp) {
7559                 ret = scsi_device_get(sdp);
7560                 if (!ret && !scsi_device_online(sdp)) {
7561                         ret = -ENODEV;
7562                         scsi_device_put(sdp);
7563                 }
7564         } else {
7565                 ret = -ENODEV;
7566         }
7567         spin_unlock_irqrestore(hba->host->host_lock, flags);
7568 
7569         if (ret)
7570                 return ret;
7571 
7572         
7573 
7574 
7575 
7576 
7577 
7578         hba->host->eh_noresume = 1;
7579         if (hba->wlun_dev_clr_ua) {
7580                 ret = ufshcd_send_request_sense(hba, sdp);
7581                 if (ret)
7582                         goto out;
7583                 
7584                 hba->wlun_dev_clr_ua = false;
7585         }
7586 
7587         cmd[4] = pwr_mode << 4;
7588 
7589         
7590 
7591 
7592 
7593 
7594         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7595                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7596         if (ret) {
7597                 sdev_printk(KERN_WARNING, sdp,
7598                             "START_STOP failed for power mode: %d, result %x\n",
7599                             pwr_mode, ret);
7600                 if (driver_byte(ret) == DRIVER_SENSE)
7601                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
7602         }
7603 
7604         if (!ret)
7605                 hba->curr_dev_pwr_mode = pwr_mode;
7606 out:
7607         scsi_device_put(sdp);
7608         hba->host->eh_noresume = 0;
7609         return ret;
7610 }
7611 
7612 static int ufshcd_link_state_transition(struct ufs_hba *hba,
7613                                         enum uic_link_state req_link_state,
7614                                         int check_for_bkops)
7615 {
7616         int ret = 0;
7617 
7618         if (req_link_state == hba->uic_link_state)
7619                 return 0;
7620 
7621         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7622                 ret = ufshcd_uic_hibern8_enter(hba);
7623                 if (!ret)
7624                         ufshcd_set_link_hibern8(hba);
7625                 else
7626                         goto out;
7627         }
7628         
7629 
7630 
7631 
7632         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7633                    (!check_for_bkops || (check_for_bkops &&
7634                     !hba->auto_bkops_enabled))) {
7635                 
7636 
7637 
7638 
7639 
7640 
7641 
7642                 ret = ufshcd_uic_hibern8_enter(hba);
7643                 if (ret)
7644                         goto out;
7645                 
7646 
7647 
7648 
7649                 ufshcd_hba_stop(hba, true);
7650                 
7651 
7652 
7653 
7654                 ufshcd_set_link_off(hba);
7655         }
7656 
7657 out:
7658         return ret;
7659 }
7660 
7661 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7662 {
7663         
7664 
7665 
7666 
7667 
7668 
7669         if (!ufshcd_is_link_active(hba) &&
7670             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7671                 usleep_range(2000, 2100);
7672 
7673         
7674 
7675 
7676 
7677 
7678 
7679 
7680 
7681 
7682 
7683 
7684 
7685         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7686             !hba->dev_info.is_lu_power_on_wp) {
7687                 ufshcd_setup_vreg(hba, false);
7688         } else if (!ufshcd_is_ufs_dev_active(hba)) {
7689                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7690                 if (!ufshcd_is_link_active(hba)) {
7691                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7692                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7693                 }
7694         }
7695 }
7696 
7697 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7698 {
7699         int ret = 0;
7700 
7701         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7702             !hba->dev_info.is_lu_power_on_wp) {
7703                 ret = ufshcd_setup_vreg(hba, true);
7704         } else if (!ufshcd_is_ufs_dev_active(hba)) {
7705                 if (!ret && !ufshcd_is_link_active(hba)) {
7706                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7707                         if (ret)
7708                                 goto vcc_disable;
7709                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7710                         if (ret)
7711                                 goto vccq_lpm;
7712                 }
7713                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7714         }
7715         goto out;
7716 
7717 vccq_lpm:
7718         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7719 vcc_disable:
7720         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7721 out:
7722         return ret;
7723 }
7724 
7725 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7726 {
7727         if (ufshcd_is_link_off(hba))
7728                 ufshcd_setup_hba_vreg(hba, false);
7729 }
7730 
7731 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7732 {
7733         if (ufshcd_is_link_off(hba))
7734                 ufshcd_setup_hba_vreg(hba, true);
7735 }
7736 
7737 
7738 
7739 
7740 
7741 
7742 
7743 
7744 
7745 
7746 
7747 
7748 
7749 
7750 
7751 
7752 
7753 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7754 {
7755         int ret = 0;
7756         enum ufs_pm_level pm_lvl;
7757         enum ufs_dev_pwr_mode req_dev_pwr_mode;
7758         enum uic_link_state req_link_state;
7759 
7760         hba->pm_op_in_progress = 1;
7761         if (!ufshcd_is_shutdown_pm(pm_op)) {
7762                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7763                          hba->rpm_lvl : hba->spm_lvl;
7764                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7765                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7766         } else {
7767                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7768                 req_link_state = UIC_LINK_OFF_STATE;
7769         }
7770 
7771         
7772 
7773 
7774 
7775         ufshcd_hold(hba, false);
7776         hba->clk_gating.is_suspended = true;
7777 
7778         if (hba->clk_scaling.is_allowed) {
7779                 cancel_work_sync(&hba->clk_scaling.suspend_work);
7780                 cancel_work_sync(&hba->clk_scaling.resume_work);
7781                 ufshcd_suspend_clkscaling(hba);
7782         }
7783 
7784         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7785                         req_link_state == UIC_LINK_ACTIVE_STATE) {
7786                 goto disable_clks;
7787         }
7788 
7789         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7790             (req_link_state == hba->uic_link_state))
7791                 goto enable_gating;
7792 
7793         
7794         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7795                 ret = -EINVAL;
7796                 goto enable_gating;
7797         }
7798 
7799         if (ufshcd_is_runtime_pm(pm_op)) {
7800                 if (ufshcd_can_autobkops_during_suspend(hba)) {
7801                         
7802 
7803 
7804 
7805 
7806                         ret = ufshcd_urgent_bkops(hba);
7807                         if (ret)
7808                                 goto enable_gating;
7809                 } else {
7810                         
7811                         ufshcd_disable_auto_bkops(hba);
7812                 }
7813         }
7814 
7815         if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7816              ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7817                !ufshcd_is_runtime_pm(pm_op))) {
7818                 
7819                 ufshcd_disable_auto_bkops(hba);
7820                 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7821                 if (ret)
7822                         goto enable_gating;
7823         }
7824 
7825         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7826         if (ret)
7827                 goto set_dev_active;
7828 
7829         ufshcd_vreg_set_lpm(hba);
7830 
7831 disable_clks:
7832         
7833 
7834 
7835 
7836 
7837         ret = ufshcd_vops_suspend(hba, pm_op);
7838         if (ret)
7839                 goto set_link_active;
7840 
7841         if (!ufshcd_is_link_active(hba))
7842                 ufshcd_setup_clocks(hba, false);
7843         else
7844                 
7845                 __ufshcd_setup_clocks(hba, false, true);
7846 
7847         hba->clk_gating.state = CLKS_OFF;
7848         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7849         
7850 
7851 
7852 
7853         ufshcd_disable_irq(hba);
7854         
7855         ufshcd_hba_vreg_set_lpm(hba);
7856         goto out;
7857 
7858 set_link_active:
7859         if (hba->clk_scaling.is_allowed)
7860                 ufshcd_resume_clkscaling(hba);
7861         ufshcd_vreg_set_hpm(hba);
7862         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7863                 ufshcd_set_link_active(hba);
7864         else if (ufshcd_is_link_off(hba))
7865                 ufshcd_host_reset_and_restore(hba);
7866 set_dev_active:
7867         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7868                 ufshcd_disable_auto_bkops(hba);
7869 enable_gating:
7870         if (hba->clk_scaling.is_allowed)
7871                 ufshcd_resume_clkscaling(hba);
7872         hba->clk_gating.is_suspended = false;
7873         ufshcd_release(hba);
7874 out:
7875         hba->pm_op_in_progress = 0;
7876         if (ret)
7877                 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
7878         return ret;
7879 }
7880 
7881 
7882 
7883 
7884 
7885 
7886 
7887 
7888 
7889 
7890 
7891 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7892 {
7893         int ret;
7894         enum uic_link_state old_link_state;
7895 
7896         hba->pm_op_in_progress = 1;
7897         old_link_state = hba->uic_link_state;
7898 
7899         ufshcd_hba_vreg_set_hpm(hba);
7900         
7901         ret = ufshcd_setup_clocks(hba, true);
7902         if (ret)
7903                 goto out;
7904 
7905         
7906         ret = ufshcd_enable_irq(hba);
7907         if (ret)
7908                 goto disable_irq_and_vops_clks;
7909 
7910         ret = ufshcd_vreg_set_hpm(hba);
7911         if (ret)
7912                 goto disable_irq_and_vops_clks;
7913 
7914         
7915 
7916 
7917 
7918 
7919         ret = ufshcd_vops_resume(hba, pm_op);
7920         if (ret)
7921                 goto disable_vreg;
7922 
7923         if (ufshcd_is_link_hibern8(hba)) {
7924                 ret = ufshcd_uic_hibern8_exit(hba);
7925                 if (!ret)
7926                         ufshcd_set_link_active(hba);
7927                 else
7928                         goto vendor_suspend;
7929         } else if (ufshcd_is_link_off(hba)) {
7930                 ret = ufshcd_host_reset_and_restore(hba);
7931                 
7932 
7933 
7934 
7935                 if (ret || !ufshcd_is_link_active(hba))
7936                         goto vendor_suspend;
7937         }
7938 
7939         if (!ufshcd_is_ufs_dev_active(hba)) {
7940                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7941                 if (ret)
7942                         goto set_old_link_state;
7943         }
7944 
7945         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7946                 ufshcd_enable_auto_bkops(hba);
7947         else
7948                 
7949 
7950 
7951 
7952                 ufshcd_urgent_bkops(hba);
7953 
7954         hba->clk_gating.is_suspended = false;
7955 
7956         if (hba->clk_scaling.is_allowed)
7957                 ufshcd_resume_clkscaling(hba);
7958 
7959         
7960         ufshcd_auto_hibern8_enable(hba);
7961 
7962         
7963         ufshcd_release(hba);
7964 
7965         goto out;
7966 
7967 set_old_link_state:
7968         ufshcd_link_state_transition(hba, old_link_state, 0);
7969 vendor_suspend:
7970         ufshcd_vops_suspend(hba, pm_op);
7971 disable_vreg:
7972         ufshcd_vreg_set_lpm(hba);
7973 disable_irq_and_vops_clks:
7974         ufshcd_disable_irq(hba);
7975         if (hba->clk_scaling.is_allowed)
7976                 ufshcd_suspend_clkscaling(hba);
7977         ufshcd_setup_clocks(hba, false);
7978 out:
7979         hba->pm_op_in_progress = 0;
7980         if (ret)
7981                 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
7982         return ret;
7983 }
7984 
7985 
7986 
7987 
7988 
7989 
7990 
7991 
7992 
7993 int ufshcd_system_suspend(struct ufs_hba *hba)
7994 {
7995         int ret = 0;
7996         ktime_t start = ktime_get();
7997 
7998         if (!hba || !hba->is_powered)
7999                 return 0;
8000 
8001         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8002              hba->curr_dev_pwr_mode) &&
8003             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8004              hba->uic_link_state))
8005                 goto out;
8006 
8007         if (pm_runtime_suspended(hba->dev)) {
8008                 
8009 
8010 
8011 
8012 
8013 
8014 
8015 
8016                 ret = ufshcd_runtime_resume(hba);
8017                 if (ret)
8018                         goto out;
8019         }
8020 
8021         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8022 out:
8023         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8024                 ktime_to_us(ktime_sub(ktime_get(), start)),
8025                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8026         if (!ret)
8027                 hba->is_sys_suspended = true;
8028         return ret;
8029 }
8030 EXPORT_SYMBOL(ufshcd_system_suspend);
8031 
8032 
8033 
8034 
8035 
8036 
8037 
8038 
8039 int ufshcd_system_resume(struct ufs_hba *hba)
8040 {
8041         int ret = 0;
8042         ktime_t start = ktime_get();
8043 
8044         if (!hba)
8045                 return -EINVAL;
8046 
8047         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8048                 
8049 
8050 
8051 
8052                 goto out;
8053         else
8054                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8055 out:
8056         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8057                 ktime_to_us(ktime_sub(ktime_get(), start)),
8058                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8059         if (!ret)
8060                 hba->is_sys_suspended = false;
8061         return ret;
8062 }
8063 EXPORT_SYMBOL(ufshcd_system_resume);
8064 
8065 
8066 
8067 
8068 
8069 
8070 
8071 
8072 
8073 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8074 {
8075         int ret = 0;
8076         ktime_t start = ktime_get();
8077 
8078         if (!hba)
8079                 return -EINVAL;
8080 
8081         if (!hba->is_powered)
8082                 goto out;
8083         else
8084                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8085 out:
8086         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8087                 ktime_to_us(ktime_sub(ktime_get(), start)),
8088                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8089         return ret;
8090 }
8091 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8092 
8093 
8094 
8095 
8096 
8097 
8098 
8099 
8100 
8101 
8102 
8103 
8104 
8105 
8106 
8107 
8108 
8109 
8110 
8111 
8112 
8113 
8114 int ufshcd_runtime_resume(struct ufs_hba *hba)
8115 {
8116         int ret = 0;
8117         ktime_t start = ktime_get();
8118 
8119         if (!hba)
8120                 return -EINVAL;
8121 
8122         if (!hba->is_powered)
8123                 goto out;
8124         else
8125                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8126 out:
8127         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8128                 ktime_to_us(ktime_sub(ktime_get(), start)),
8129                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8130         return ret;
8131 }
8132 EXPORT_SYMBOL(ufshcd_runtime_resume);
8133 
8134 int ufshcd_runtime_idle(struct ufs_hba *hba)
8135 {
8136         return 0;
8137 }
8138 EXPORT_SYMBOL(ufshcd_runtime_idle);
8139 
8140 
8141 
8142 
8143 
8144 
8145 
8146 
8147 
8148 int ufshcd_shutdown(struct ufs_hba *hba)
8149 {
8150         int ret = 0;
8151 
8152         if (!hba->is_powered)
8153                 goto out;
8154 
8155         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8156                 goto out;
8157 
8158         if (pm_runtime_suspended(hba->dev)) {
8159                 ret = ufshcd_runtime_resume(hba);
8160                 if (ret)
8161                         goto out;
8162         }
8163 
8164         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8165 out:
8166         if (ret)
8167                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8168         
8169         return 0;
8170 }
8171 EXPORT_SYMBOL(ufshcd_shutdown);
8172 
8173 
8174 
8175 
8176 
8177 
8178 void ufshcd_remove(struct ufs_hba *hba)
8179 {
8180         ufs_bsg_remove(hba);
8181         ufs_sysfs_remove_nodes(hba->dev);
8182         scsi_remove_host(hba->host);
8183         
8184         ufshcd_disable_intr(hba, hba->intr_mask);
8185         ufshcd_hba_stop(hba, true);
8186 
8187         ufshcd_exit_clk_scaling(hba);
8188         ufshcd_exit_clk_gating(hba);
8189         if (ufshcd_is_clkscaling_supported(hba))
8190                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8191         ufshcd_hba_exit(hba);
8192 }
8193 EXPORT_SYMBOL_GPL(ufshcd_remove);
8194 
8195 
8196 
8197 
8198 
8199 void ufshcd_dealloc_host(struct ufs_hba *hba)
8200 {
8201         scsi_host_put(hba->host);
8202 }
8203 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8204 
8205 
8206 
8207 
8208 
8209 
8210 
8211 
8212 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8213 {
8214         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8215                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8216                         return 0;
8217         }
8218         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8219 }
8220 
8221 
8222 
8223 
8224 
8225 
8226 
8227 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8228 {
8229         struct Scsi_Host *host;
8230         struct ufs_hba *hba;
8231         int err = 0;
8232 
8233         if (!dev) {
8234                 dev_err(dev,
8235                 "Invalid memory reference for dev is NULL\n");
8236                 err = -ENODEV;
8237                 goto out_error;
8238         }
8239 
8240         host = scsi_host_alloc(&ufshcd_driver_template,
8241                                 sizeof(struct ufs_hba));
8242         if (!host) {
8243                 dev_err(dev, "scsi_host_alloc failed\n");
8244                 err = -ENOMEM;
8245                 goto out_error;
8246         }
8247         hba = shost_priv(host);
8248         hba->host = host;
8249         hba->dev = dev;
8250         *hba_handle = hba;
8251         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
8252 
8253         INIT_LIST_HEAD(&hba->clk_list_head);
8254 
8255 out_error:
8256         return err;
8257 }
8258 EXPORT_SYMBOL(ufshcd_alloc_host);
8259 
8260 
8261 
8262 
8263 
8264 
8265 
8266 
8267 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8268 {
8269         int err;
8270         struct Scsi_Host *host = hba->host;
8271         struct device *dev = hba->dev;
8272 
8273         if (!mmio_base) {
8274                 dev_err(hba->dev,
8275                 "Invalid memory reference for mmio_base is NULL\n");
8276                 err = -ENODEV;
8277                 goto out_error;
8278         }
8279 
8280         hba->mmio_base = mmio_base;
8281         hba->irq = irq;
8282 
8283         err = ufshcd_hba_init(hba);
8284         if (err)
8285                 goto out_error;
8286 
8287         
8288         ufshcd_hba_capabilities(hba);
8289 
8290         
8291         hba->ufs_version = ufshcd_get_ufs_version(hba);
8292 
8293         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8294             (hba->ufs_version != UFSHCI_VERSION_11) &&
8295             (hba->ufs_version != UFSHCI_VERSION_20) &&
8296             (hba->ufs_version != UFSHCI_VERSION_21))
8297                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8298                         hba->ufs_version);
8299 
8300         
8301         hba->intr_mask = ufshcd_get_intr_mask(hba);
8302 
8303         err = ufshcd_set_dma_mask(hba);
8304         if (err) {
8305                 dev_err(hba->dev, "set dma mask failed\n");
8306                 goto out_disable;
8307         }
8308 
8309         
8310         err = ufshcd_memory_alloc(hba);
8311         if (err) {
8312                 dev_err(hba->dev, "Memory allocation failed\n");
8313                 goto out_disable;
8314         }
8315 
8316         
8317         ufshcd_host_memory_configure(hba);
8318 
8319         host->can_queue = hba->nutrs;
8320         host->cmd_per_lun = hba->nutrs;
8321         host->max_id = UFSHCD_MAX_ID;
8322         host->max_lun = UFS_MAX_LUNS;
8323         host->max_channel = UFSHCD_MAX_CHANNEL;
8324         host->unique_id = host->host_no;
8325         host->max_cmd_len = UFS_CDB_SIZE;
8326 
8327         hba->max_pwr_info.is_valid = false;
8328 
8329         
8330         init_waitqueue_head(&hba->tm_wq);
8331         init_waitqueue_head(&hba->tm_tag_wq);
8332 
8333         
8334         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8335         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8336 
8337         
8338         mutex_init(&hba->uic_cmd_mutex);
8339 
8340         
8341         mutex_init(&hba->dev_cmd.lock);
8342 
8343         init_rwsem(&hba->clk_scaling_lock);
8344 
8345         
8346         init_waitqueue_head(&hba->dev_cmd.tag_wq);
8347 
8348         ufshcd_init_clk_gating(hba);
8349 
8350         ufshcd_init_clk_scaling(hba);
8351 
8352         
8353 
8354 
8355 
8356 
8357         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8358                       REG_INTERRUPT_STATUS);
8359         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8360         
8361 
8362 
8363 
8364         mb();
8365 
8366         
8367         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8368         if (err) {
8369                 dev_err(hba->dev, "request irq failed\n");
8370                 goto exit_gating;
8371         } else {
8372                 hba->is_irq_enabled = true;
8373         }
8374 
8375         err = scsi_add_host(host, hba->dev);
8376         if (err) {
8377                 dev_err(hba->dev, "scsi_add_host failed\n");
8378                 goto exit_gating;
8379         }
8380 
8381         
8382         ufshcd_vops_device_reset(hba);
8383 
8384         
8385         err = ufshcd_hba_enable(hba);
8386         if (err) {
8387                 dev_err(hba->dev, "Host controller enable failed\n");
8388                 ufshcd_print_host_regs(hba);
8389                 ufshcd_print_host_state(hba);
8390                 goto out_remove_scsi_host;
8391         }
8392 
8393         
8394 
8395 
8396 
8397 
8398         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8399                                                 UFS_SLEEP_PWR_MODE,
8400                                                 UIC_LINK_HIBERN8_STATE);
8401         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8402                                                 UFS_SLEEP_PWR_MODE,
8403                                                 UIC_LINK_HIBERN8_STATE);
8404 
8405         
8406         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
8407                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8408                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8409         }
8410 
8411         
8412         pm_runtime_get_sync(dev);
8413         atomic_set(&hba->scsi_block_reqs_cnt, 0);
8414         
8415 
8416 
8417 
8418 
8419 
8420         ufshcd_set_ufs_dev_active(hba);
8421 
8422         async_schedule(ufshcd_async_scan, hba);
8423         ufs_sysfs_add_nodes(hba->dev);
8424 
8425         return 0;
8426 
8427 out_remove_scsi_host:
8428         scsi_remove_host(hba->host);
8429 exit_gating:
8430         ufshcd_exit_clk_scaling(hba);
8431         ufshcd_exit_clk_gating(hba);
8432 out_disable:
8433         hba->is_irq_enabled = false;
8434         ufshcd_hba_exit(hba);
8435 out_error:
8436         return err;
8437 }
8438 EXPORT_SYMBOL_GPL(ufshcd_init);
8439 
8440 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8441 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8442 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8443 MODULE_LICENSE("GPL");
8444 MODULE_VERSION(UFSHCD_DRIVER_VERSION);