Lines Matching refs:hba

175 static void ufshcd_tmc_handler(struct ufs_hba *hba);
177 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
178 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
179 static void ufshcd_hba_exit(struct ufs_hba *hba);
180 static int ufshcd_probe_hba(struct ufs_hba *hba);
181 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
183 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
184 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
185 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
186 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
187 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
189 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
192 static inline int ufshcd_enable_irq(struct ufs_hba *hba) in ufshcd_enable_irq() argument
196 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
197 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, in ufshcd_enable_irq()
198 hba); in ufshcd_enable_irq()
200 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", in ufshcd_enable_irq()
202 hba->is_irq_enabled = true; in ufshcd_enable_irq()
208 static inline void ufshcd_disable_irq(struct ufs_hba *hba) in ufshcd_disable_irq() argument
210 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
211 free_irq(hba->irq, hba); in ufshcd_disable_irq()
212 hba->is_irq_enabled = false; in ufshcd_disable_irq()
227 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, in ufshcd_wait_for_register() argument
236 while ((ufshcd_readl(hba, reg) & mask) != val) { in ufshcd_wait_for_register()
241 if ((ufshcd_readl(hba, reg) & mask) != val) in ufshcd_wait_for_register()
256 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) in ufshcd_get_intr_mask() argument
258 if (hba->ufs_version == UFSHCI_VERSION_10) in ufshcd_get_intr_mask()
270 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) in ufshcd_get_ufs_version() argument
272 return ufshcd_readl(hba, REG_UFS_VERSION); in ufshcd_get_ufs_version()
282 static inline int ufshcd_is_device_present(struct ufs_hba *hba) in ufshcd_is_device_present() argument
284 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & in ufshcd_is_device_present()
322 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) in ufshcd_get_tm_free_slot() argument
331 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); in ufshcd_get_tm_free_slot()
332 if (tag >= hba->nutmrs) in ufshcd_get_tm_free_slot()
334 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); in ufshcd_get_tm_free_slot()
342 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) in ufshcd_put_tm_slot() argument
344 clear_bit_unlock(slot, &hba->tm_slots_in_use); in ufshcd_put_tm_slot()
352 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utrl_clear() argument
354 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); in ufshcd_utrl_clear()
386 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) in ufshcd_get_uic_cmd_result() argument
388 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & in ufshcd_get_uic_cmd_result()
399 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) in ufshcd_get_dme_attr_val() argument
401 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); in ufshcd_get_dme_attr_val()
461 ufshcd_reset_intr_aggr(struct ufs_hba *hba) in ufshcd_reset_intr_aggr() argument
463 ufshcd_writel(hba, INT_AGGR_ENABLE | in ufshcd_reset_intr_aggr()
475 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) in ufshcd_config_intr_aggr() argument
477 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | in ufshcd_config_intr_aggr()
489 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) in ufshcd_enable_run_stop_reg() argument
491 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
493 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
501 static inline void ufshcd_hba_start(struct ufs_hba *hba) in ufshcd_hba_start() argument
503 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); in ufshcd_hba_start()
512 static inline int ufshcd_is_hba_active(struct ufs_hba *hba) in ufshcd_is_hba_active() argument
514 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; in ufshcd_is_hba_active()
521 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_ungate_work() local
524 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
526 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_ungate_work()
527 if (hba->clk_gating.state == CLKS_ON) { in ufshcd_ungate_work()
528 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
532 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
533 ufshcd_setup_clocks(hba, true); in ufshcd_ungate_work()
536 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_ungate_work()
538 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
539 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_ungate_work()
540 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_ungate_work()
542 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
545 ufshcd_set_link_active(hba); in ufshcd_ungate_work()
547 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
550 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_ungate_work()
551 devfreq_resume_device(hba->devfreq); in ufshcd_ungate_work()
552 scsi_unblock_requests(hba->host); in ufshcd_ungate_work()
561 int ufshcd_hold(struct ufs_hba *hba, bool async) in ufshcd_hold() argument
566 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_hold()
568 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
569 hba->clk_gating.active_reqs++; in ufshcd_hold()
572 switch (hba->clk_gating.state) { in ufshcd_hold()
576 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
577 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
586 scsi_block_requests(hba->host); in ufshcd_hold()
587 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
588 schedule_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
596 hba->clk_gating.active_reqs--; in ufshcd_hold()
600 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
601 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
603 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
606 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
607 __func__, hba->clk_gating.state); in ufshcd_hold()
610 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
617 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_gate_work() local
621 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
622 if (hba->clk_gating.is_suspended) { in ufshcd_gate_work()
623 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
627 if (hba->clk_gating.active_reqs in ufshcd_gate_work()
628 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in ufshcd_gate_work()
629 || hba->lrb_in_use || hba->outstanding_tasks in ufshcd_gate_work()
630 || hba->active_uic_cmd || hba->uic_async_done) in ufshcd_gate_work()
633 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
636 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_gate_work()
637 if (ufshcd_uic_hibern8_enter(hba)) { in ufshcd_gate_work()
638 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
641 ufshcd_set_link_hibern8(hba); in ufshcd_gate_work()
644 if (ufshcd_is_clkscaling_enabled(hba)) { in ufshcd_gate_work()
645 devfreq_suspend_device(hba->devfreq); in ufshcd_gate_work()
646 hba->clk_scaling.window_start_t = 0; in ufshcd_gate_work()
649 if (!ufshcd_is_link_active(hba)) in ufshcd_gate_work()
650 ufshcd_setup_clocks(hba, false); in ufshcd_gate_work()
653 __ufshcd_setup_clocks(hba, false, true); in ufshcd_gate_work()
664 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
665 if (hba->clk_gating.state == REQ_CLKS_OFF) in ufshcd_gate_work()
666 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
669 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
675 static void __ufshcd_release(struct ufs_hba *hba) in __ufshcd_release() argument
677 if (!ufshcd_is_clkgating_allowed(hba)) in __ufshcd_release()
680 hba->clk_gating.active_reqs--; in __ufshcd_release()
682 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended in __ufshcd_release()
683 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in __ufshcd_release()
684 || hba->lrb_in_use || hba->outstanding_tasks in __ufshcd_release()
685 || hba->active_uic_cmd || hba->uic_async_done) in __ufshcd_release()
688 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
689 schedule_delayed_work(&hba->clk_gating.gate_work, in __ufshcd_release()
690 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
693 void ufshcd_release(struct ufs_hba *hba) in ufshcd_release() argument
697 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_release()
698 __ufshcd_release(hba); in ufshcd_release()
699 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_release()
705 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_show() local
707 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
713 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_store() local
719 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
720 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_store()
721 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
725 static void ufshcd_init_clk_gating(struct ufs_hba *hba) in ufshcd_init_clk_gating() argument
727 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_init_clk_gating()
730 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
731 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
732 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
734 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating()
735 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating()
736 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating()
737 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating()
738 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; in ufshcd_init_clk_gating()
739 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating()
740 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating()
743 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) in ufshcd_exit_clk_gating() argument
745 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_exit_clk_gating()
747 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_exit_clk_gating()
748 cancel_work_sync(&hba->clk_gating.ungate_work); in ufshcd_exit_clk_gating()
749 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_exit_clk_gating()
753 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_start_busy() argument
755 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_clk_scaling_start_busy()
758 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
759 hba->clk_scaling.busy_start_t = ktime_get(); in ufshcd_clk_scaling_start_busy()
760 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
764 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_update_busy() argument
766 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
768 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_clk_scaling_update_busy()
771 if (!hba->outstanding_reqs && scaling->is_busy_started) { in ufshcd_clk_scaling_update_busy()
784 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) in ufshcd_send_command() argument
786 ufshcd_clk_scaling_start_busy(hba); in ufshcd_send_command()
787 __set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
788 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_send_command()
814 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_copy_query_response() argument
816 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
831 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
833 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
835 dev_warn(hba->dev, in ufshcd_copy_query_response()
849 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) in ufshcd_hba_capabilities() argument
851 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
854 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; in ufshcd_hba_capabilities()
855 hba->nutmrs = in ufshcd_hba_capabilities()
856 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
865 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) in ufshcd_ready_for_uic_cmd() argument
867 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) in ufshcd_ready_for_uic_cmd()
880 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) in ufshcd_get_upmcrs() argument
882 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; in ufshcd_get_upmcrs()
893 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_dispatch_uic_cmd() argument
895 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
897 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
900 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
901 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
902 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
905 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
918 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_wait_for_uic_cmd() argument
929 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
930 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
931 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
946 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in __ufshcd_send_uic_cmd() argument
948 if (!ufshcd_ready_for_uic_cmd(hba)) { in __ufshcd_send_uic_cmd()
949 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
956 ufshcd_dispatch_uic_cmd(hba, uic_cmd); in __ufshcd_send_uic_cmd()
969 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_uic_cmd() argument
974 ufshcd_hold(hba, false); in ufshcd_send_uic_cmd()
975 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
976 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_uic_cmd()
978 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
979 ret = __ufshcd_send_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
980 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
982 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
984 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
986 ufshcd_release(hba); in ufshcd_send_uic_cmd()
1035 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_enable_intr() argument
1037 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
1039 if (hba->ufs_version == UFSHCI_VERSION_10) { in ufshcd_enable_intr()
1047 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
1055 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_disable_intr() argument
1057 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
1059 if (hba->ufs_version == UFSHCI_VERSION_10) { in ufshcd_disable_intr()
1069 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
1149 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, in ufshcd_prepare_utp_query_req_upiu() argument
1153 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
1195 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_compose_upiu() argument
1212 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_upiu()
1214 hba, lrbp, upiu_flags); in ufshcd_compose_upiu()
1215 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_upiu()
1223 dev_err(hba->dev, "%s: UFS native command are not supported\n", in ufshcd_compose_upiu()
1228 dev_err(hba->dev, "%s: unknown command type: 0x%x\n", in ufshcd_compose_upiu()
1272 struct ufs_hba *hba; in ufshcd_queuecommand() local
1277 hba = shost_priv(host); in ufshcd_queuecommand()
1281 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
1282 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
1293 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", in ufshcd_queuecommand()
1294 __func__, hba->ufshcd_state); in ufshcd_queuecommand()
1299 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
1302 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { in ufshcd_queuecommand()
1313 err = ufshcd_hold(hba, true); in ufshcd_queuecommand()
1316 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_queuecommand()
1319 WARN_ON(hba->clk_gating.state != CLKS_ON); in ufshcd_queuecommand()
1321 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
1333 ufshcd_compose_upiu(hba, lrbp); in ufshcd_queuecommand()
1337 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_queuecommand()
1342 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
1343 ufshcd_send_command(hba, tag); in ufshcd_queuecommand()
1345 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
1350 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, in ufshcd_compose_dev_cmd() argument
1360 hba->dev_cmd.type = cmd_type; in ufshcd_compose_dev_cmd()
1362 return ufshcd_compose_upiu(hba, lrbp); in ufshcd_compose_dev_cmd()
1366 ufshcd_clear_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_cmd() argument
1373 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_cmd()
1374 ufshcd_utrl_clear(hba, tag); in ufshcd_clear_cmd()
1375 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_cmd()
1381 err = ufshcd_wait_for_register(hba, in ufshcd_clear_cmd()
1389 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_check_query_response() argument
1391 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_check_query_response()
1405 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_dev_cmd_completion() argument
1414 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
1416 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
1421 err = ufshcd_check_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
1423 err = ufshcd_copy_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
1428 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
1433 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
1441 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, in ufshcd_wait_for_dev_cmd() argument
1448 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
1451 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
1452 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
1456 err = ufshcd_dev_cmd_completion(hba, lrbp); in ufshcd_wait_for_dev_cmd()
1458 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
1462 if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) in ufshcd_wait_for_dev_cmd()
1481 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) in ufshcd_get_dev_cmd_tag() argument
1491 tmp = ~hba->lrb_in_use; in ufshcd_get_dev_cmd_tag()
1492 tag = find_last_bit(&tmp, hba->nutrs); in ufshcd_get_dev_cmd_tag()
1493 if (tag >= hba->nutrs) in ufshcd_get_dev_cmd_tag()
1495 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); in ufshcd_get_dev_cmd_tag()
1503 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) in ufshcd_put_dev_cmd_tag() argument
1505 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_put_dev_cmd_tag()
1517 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, in ufshcd_exec_dev_cmd() argument
1531 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); in ufshcd_exec_dev_cmd()
1534 lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
1536 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); in ufshcd_exec_dev_cmd()
1540 hba->dev_cmd.complete = &wait; in ufshcd_exec_dev_cmd()
1542 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_exec_dev_cmd()
1543 ufshcd_send_command(hba, tag); in ufshcd_exec_dev_cmd()
1544 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_exec_dev_cmd()
1546 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); in ufshcd_exec_dev_cmd()
1549 ufshcd_put_dev_cmd_tag(hba, tag); in ufshcd_exec_dev_cmd()
1550 wake_up(&hba->dev_cmd.tag_wq); in ufshcd_exec_dev_cmd()
1564 static inline void ufshcd_init_query(struct ufs_hba *hba, in ufshcd_init_query() argument
1568 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
1569 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
1587 static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_flag() argument
1594 BUG_ON(!hba); in ufshcd_query_flag()
1596 ufshcd_hold(hba, false); in ufshcd_query_flag()
1597 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_flag()
1598 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_flag()
1611 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
1618 dev_err(hba->dev, in ufshcd_query_flag()
1625 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_flag()
1628 dev_err(hba->dev, in ufshcd_query_flag()
1639 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_flag()
1640 ufshcd_release(hba); in ufshcd_query_flag()
1655 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_attr() argument
1662 BUG_ON(!hba); in ufshcd_query_attr()
1664 ufshcd_hold(hba, false); in ufshcd_query_attr()
1666 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
1672 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_attr()
1673 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_attr()
1685 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
1691 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_attr()
1694 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", in ufshcd_query_attr()
1702 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_attr()
1704 ufshcd_release(hba); in ufshcd_query_attr()
1722 static int ufshcd_query_descriptor(struct ufs_hba *hba, in ufshcd_query_descriptor() argument
1730 BUG_ON(!hba); in ufshcd_query_descriptor()
1732 ufshcd_hold(hba, false); in ufshcd_query_descriptor()
1734 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in ufshcd_query_descriptor()
1741 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in ufshcd_query_descriptor()
1747 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_descriptor()
1748 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_descriptor()
1750 hba->dev_cmd.query.descriptor = desc_buf; in ufshcd_query_descriptor()
1761 dev_err(hba->dev, in ufshcd_query_descriptor()
1768 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_descriptor()
1771 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", in ufshcd_query_descriptor()
1776 hba->dev_cmd.query.descriptor = NULL; in ufshcd_query_descriptor()
1780 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_descriptor()
1782 ufshcd_release(hba); in ufshcd_query_descriptor()
1797 static int ufshcd_read_desc_param(struct ufs_hba *hba, in ufshcd_read_desc_param() argument
1828 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, in ufshcd_read_desc_param()
1836 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", in ufshcd_read_desc_param()
1852 static inline int ufshcd_read_desc(struct ufs_hba *hba, in ufshcd_read_desc() argument
1858 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); in ufshcd_read_desc()
1861 static inline int ufshcd_read_power_desc(struct ufs_hba *hba, in ufshcd_read_power_desc() argument
1865 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); in ufshcd_read_power_desc()
1878 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, in ufshcd_read_unit_desc_param() argument
1891 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, in ufshcd_read_unit_desc_param()
1908 static int ufshcd_memory_alloc(struct ufs_hba *hba) in ufshcd_memory_alloc() argument
1913 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); in ufshcd_memory_alloc()
1914 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
1916 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
1925 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
1926 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
1927 dev_err(hba->dev, in ufshcd_memory_alloc()
1936 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); in ufshcd_memory_alloc()
1937 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
1939 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
1941 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
1942 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
1943 dev_err(hba->dev, in ufshcd_memory_alloc()
1952 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
1953 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
1955 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
1957 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
1958 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
1959 dev_err(hba->dev, in ufshcd_memory_alloc()
1965 hba->lrb = devm_kzalloc(hba->dev, in ufshcd_memory_alloc()
1966 hba->nutrs * sizeof(struct ufshcd_lrb), in ufshcd_memory_alloc()
1968 if (!hba->lrb) { in ufshcd_memory_alloc()
1969 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
1990 static void ufshcd_host_memory_configure(struct ufs_hba *hba) in ufshcd_host_memory_configure() argument
2001 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
2002 cmd_descp = hba->ucdl_base_addr; in ufshcd_host_memory_configure()
2010 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
2012 for (i = 0; i < hba->nutrs; i++) { in ufshcd_host_memory_configure()
2029 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); in ufshcd_host_memory_configure()
2030 hba->lrb[i].ucd_req_ptr = in ufshcd_host_memory_configure()
2032 hba->lrb[i].ucd_rsp_ptr = in ufshcd_host_memory_configure()
2034 hba->lrb[i].ucd_prdt_ptr = in ufshcd_host_memory_configure()
2050 static int ufshcd_dme_link_startup(struct ufs_hba *hba) in ufshcd_dme_link_startup() argument
2057 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_link_startup()
2059 dev_err(hba->dev, in ufshcd_dme_link_startup()
2064 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) in ufshcd_add_delay_before_dme_cmd() argument
2069 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
2076 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
2082 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
2105 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_set_attr() argument
2122 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_set_attr()
2124 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
2140 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_get_attr() argument
2155 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_get_attr()
2157 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
2185 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) in ufshcd_uic_pwr_ctrl() argument
2192 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
2194 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_uic_pwr_ctrl()
2196 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2197 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
2198 ret = __ufshcd_send_uic_cmd(hba, cmd); in ufshcd_uic_pwr_ctrl()
2199 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2201 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2206 ret = ufshcd_wait_for_uic_cmd(hba, cmd); in ufshcd_uic_pwr_ctrl()
2208 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2214 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
2216 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2223 status = ufshcd_get_upmcrs(hba); in ufshcd_uic_pwr_ctrl()
2225 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2231 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2232 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
2233 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2234 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
2247 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) in ufshcd_uic_change_pwr_mode() argument
2255 ufshcd_hold(hba, false); in ufshcd_uic_change_pwr_mode()
2256 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_change_pwr_mode()
2257 ufshcd_release(hba); in ufshcd_uic_change_pwr_mode()
2262 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) in ufshcd_uic_hibern8_enter() argument
2268 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_enter()
2271 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) in ufshcd_uic_hibern8_exit() argument
2277 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_exit()
2279 ufshcd_set_link_off(hba); in ufshcd_uic_hibern8_exit()
2280 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_uic_hibern8_exit()
2291 static void ufshcd_init_pwr_info(struct ufs_hba *hba) in ufshcd_init_pwr_info() argument
2293 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
2294 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
2295 hba->pwr_info.lane_rx = 1; in ufshcd_init_pwr_info()
2296 hba->pwr_info.lane_tx = 1; in ufshcd_init_pwr_info()
2297 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
2298 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
2299 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
2306 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) in ufshcd_get_max_pwr_mode() argument
2308 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
2310 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
2318 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), in ufshcd_get_max_pwr_mode()
2320 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_get_max_pwr_mode()
2324 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
2336 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
2338 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
2341 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
2348 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), in ufshcd_get_max_pwr_mode()
2351 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
2354 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
2361 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
2365 static int ufshcd_change_power_mode(struct ufs_hba *hba, in ufshcd_change_power_mode() argument
2371 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
2372 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
2373 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
2374 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
2375 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
2376 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
2377 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
2378 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
2388 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
2389 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufshcd_change_power_mode()
2393 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); in ufshcd_change_power_mode()
2395 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); in ufshcd_change_power_mode()
2397 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
2398 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufshcd_change_power_mode()
2402 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); in ufshcd_change_power_mode()
2404 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); in ufshcd_change_power_mode()
2410 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufshcd_change_power_mode()
2413 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
2417 dev_err(hba->dev, in ufshcd_change_power_mode()
2420 if (hba->vops && hba->vops->pwr_change_notify) in ufshcd_change_power_mode()
2421 hba->vops->pwr_change_notify(hba, in ufshcd_change_power_mode()
2424 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
2436 static int ufshcd_config_pwr_mode(struct ufs_hba *hba, in ufshcd_config_pwr_mode() argument
2442 if (hba->vops && hba->vops->pwr_change_notify) in ufshcd_config_pwr_mode()
2443 hba->vops->pwr_change_notify(hba, in ufshcd_config_pwr_mode()
2448 ret = ufshcd_change_power_mode(hba, &final_params); in ufshcd_config_pwr_mode()
2459 static int ufshcd_complete_dev_init(struct ufs_hba *hba) in ufshcd_complete_dev_init() argument
2466 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_complete_dev_init()
2470 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_complete_dev_init()
2473 dev_err(hba->dev, in ufshcd_complete_dev_init()
2482 err = ufshcd_query_flag(hba, in ufshcd_complete_dev_init()
2487 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, in ufshcd_complete_dev_init()
2492 dev_err(hba->dev, in ufshcd_complete_dev_init()
2496 dev_err(hba->dev, in ufshcd_complete_dev_init()
2516 static int ufshcd_make_hba_operational(struct ufs_hba *hba) in ufshcd_make_hba_operational() argument
2522 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); in ufshcd_make_hba_operational()
2525 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
2528 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
2530 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
2532 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
2534 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
2541 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); in ufshcd_make_hba_operational()
2543 ufshcd_enable_run_stop_reg(hba); in ufshcd_make_hba_operational()
2545 dev_err(hba->dev, in ufshcd_make_hba_operational()
2565 static int ufshcd_hba_enable(struct ufs_hba *hba) in ufshcd_hba_enable() argument
2575 if (!ufshcd_is_hba_active(hba)) { in ufshcd_hba_enable()
2578 ufshcd_hba_stop(hba); in ufshcd_hba_enable()
2589 ufshcd_set_link_off(hba); in ufshcd_hba_enable()
2591 if (hba->vops && hba->vops->hce_enable_notify) in ufshcd_hba_enable()
2592 hba->vops->hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_enable()
2595 ufshcd_hba_start(hba); in ufshcd_hba_enable()
2611 while (ufshcd_is_hba_active(hba)) { in ufshcd_hba_enable()
2615 dev_err(hba->dev, in ufshcd_hba_enable()
2623 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_enable()
2625 if (hba->vops && hba->vops->hce_enable_notify) in ufshcd_hba_enable()
2626 hba->vops->hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_enable()
2637 static int ufshcd_link_startup(struct ufs_hba *hba) in ufshcd_link_startup() argument
2643 if (hba->vops && hba->vops->link_startup_notify) in ufshcd_link_startup()
2644 hba->vops->link_startup_notify(hba, PRE_CHANGE); in ufshcd_link_startup()
2646 ret = ufshcd_dme_link_startup(hba); in ufshcd_link_startup()
2649 if (!ret && !ufshcd_is_device_present(hba)) { in ufshcd_link_startup()
2650 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
2660 if (ret && ufshcd_hba_enable(hba)) in ufshcd_link_startup()
2669 if (hba->vops && hba->vops->link_startup_notify) { in ufshcd_link_startup()
2670 ret = hba->vops->link_startup_notify(hba, POST_CHANGE); in ufshcd_link_startup()
2675 ret = ufshcd_make_hba_operational(hba); in ufshcd_link_startup()
2678 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
2692 static int ufshcd_verify_dev_init(struct ufs_hba *hba) in ufshcd_verify_dev_init() argument
2697 ufshcd_hold(hba, false); in ufshcd_verify_dev_init()
2698 mutex_lock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
2700 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, in ufshcd_verify_dev_init()
2706 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
2708 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
2709 ufshcd_release(hba); in ufshcd_verify_dev_init()
2712 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
2729 struct ufs_hba *hba; in ufshcd_set_queue_depth() local
2731 hba = shost_priv(sdev->host); in ufshcd_set_queue_depth()
2733 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
2734 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_set_queue_depth()
2745 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
2747 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); in ufshcd_set_queue_depth()
2749 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", in ufshcd_set_queue_depth()
2765 static int ufshcd_get_lu_wp(struct ufs_hba *hba, in ufshcd_get_lu_wp() argument
2781 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_get_lu_wp()
2796 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, in ufshcd_get_lu_power_on_wp_status() argument
2799 if (hba->dev_info.f_power_on_wp_en && in ufshcd_get_lu_power_on_wp_status()
2800 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_get_lu_power_on_wp_status()
2803 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_get_lu_power_on_wp_status()
2806 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_get_lu_power_on_wp_status()
2818 struct ufs_hba *hba; in ufshcd_slave_alloc() local
2820 hba = shost_priv(sdev->host); in ufshcd_slave_alloc()
2834 ufshcd_get_lu_power_on_wp_status(hba, sdev); in ufshcd_slave_alloc()
2848 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_change_queue_depth() local
2850 if (depth > hba->nutrs) in ufshcd_change_queue_depth()
2851 depth = hba->nutrs; in ufshcd_change_queue_depth()
2875 struct ufs_hba *hba; in ufshcd_slave_destroy() local
2877 hba = shost_priv(sdev->host); in ufshcd_slave_destroy()
2882 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
2883 hba->sdev_ufs_device = NULL; in ufshcd_slave_destroy()
2884 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
2896 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) in ufshcd_task_req_compl() argument
2904 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_task_req_compl()
2907 __clear_bit(index, &hba->outstanding_tasks); in ufshcd_task_req_compl()
2909 task_req_descp = hba->utmrdl_base_addr; in ufshcd_task_req_compl()
2920 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_task_req_compl()
2923 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_task_req_compl()
2970 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_transfer_rsp_status() argument
2999 schedule_work(&hba->eeh_work); in ufshcd_transfer_rsp_status()
3004 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
3009 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
3029 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
3042 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) in ufshcd_uic_cmd_compl() argument
3044 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { in ufshcd_uic_cmd_compl()
3045 hba->active_uic_cmd->argument2 |= in ufshcd_uic_cmd_compl()
3046 ufshcd_get_uic_cmd_result(hba); in ufshcd_uic_cmd_compl()
3047 hba->active_uic_cmd->argument3 = in ufshcd_uic_cmd_compl()
3048 ufshcd_get_dme_attr_val(hba); in ufshcd_uic_cmd_compl()
3049 complete(&hba->active_uic_cmd->done); in ufshcd_uic_cmd_compl()
3052 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) in ufshcd_uic_cmd_compl()
3053 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
3060 static void ufshcd_transfer_req_compl(struct ufs_hba *hba) in ufshcd_transfer_req_compl() argument
3076 ufshcd_reset_intr_aggr(hba); in ufshcd_transfer_req_compl()
3078 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_transfer_req_compl()
3079 completed_reqs = tr_doorbell ^ hba->outstanding_reqs; in ufshcd_transfer_req_compl()
3081 for_each_set_bit(index, &completed_reqs, hba->nutrs) { in ufshcd_transfer_req_compl()
3082 lrbp = &hba->lrb[index]; in ufshcd_transfer_req_compl()
3085 result = ufshcd_transfer_rsp_status(hba, lrbp); in ufshcd_transfer_req_compl()
3090 clear_bit_unlock(index, &hba->lrb_in_use); in ufshcd_transfer_req_compl()
3093 __ufshcd_release(hba); in ufshcd_transfer_req_compl()
3095 if (hba->dev_cmd.complete) in ufshcd_transfer_req_compl()
3096 complete(hba->dev_cmd.complete); in ufshcd_transfer_req_compl()
3101 hba->outstanding_reqs ^= completed_reqs; in ufshcd_transfer_req_compl()
3103 ufshcd_clk_scaling_update_busy(hba); in ufshcd_transfer_req_compl()
3106 wake_up(&hba->dev_cmd.tag_wq); in ufshcd_transfer_req_compl()
3119 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_disable_ee() argument
3124 if (!(hba->ee_ctrl_mask & mask)) in ufshcd_disable_ee()
3127 val = hba->ee_ctrl_mask & ~mask; in ufshcd_disable_ee()
3129 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_disable_ee()
3132 hba->ee_ctrl_mask &= ~mask; in ufshcd_disable_ee()
3147 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_enable_ee() argument
3152 if (hba->ee_ctrl_mask & mask) in ufshcd_enable_ee()
3155 val = hba->ee_ctrl_mask | mask; in ufshcd_enable_ee()
3157 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_enable_ee()
3160 hba->ee_ctrl_mask |= mask; in ufshcd_enable_ee()
3176 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) in ufshcd_enable_auto_bkops() argument
3180 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
3183 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_enable_auto_bkops()
3186 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
3191 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
3194 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_enable_auto_bkops()
3196 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
3214 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) in ufshcd_disable_auto_bkops() argument
3218 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
3225 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
3227 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
3232 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_disable_auto_bkops()
3235 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
3237 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
3241 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
3254 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) in ufshcd_force_reset_auto_bkops() argument
3256 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
3257 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
3258 ufshcd_enable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
3261 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_bkops_status() argument
3263 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_bkops_status()
3283 static int ufshcd_bkops_ctrl(struct ufs_hba *hba, in ufshcd_bkops_ctrl() argument
3289 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_ctrl()
3291 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
3295 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
3302 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_ctrl()
3304 err = ufshcd_disable_auto_bkops(hba); in ufshcd_bkops_ctrl()
3319 static int ufshcd_urgent_bkops(struct ufs_hba *hba) in ufshcd_urgent_bkops() argument
3321 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); in ufshcd_urgent_bkops()
3324 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_ee_status() argument
3326 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ee_status()
3339 struct ufs_hba *hba; in ufshcd_exception_event_handler() local
3342 hba = container_of(work, struct ufs_hba, eeh_work); in ufshcd_exception_event_handler()
3344 pm_runtime_get_sync(hba->dev); in ufshcd_exception_event_handler()
3345 err = ufshcd_get_ee_status(hba, &status); in ufshcd_exception_event_handler()
3347 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
3352 status &= hba->ee_ctrl_mask; in ufshcd_exception_event_handler()
3354 err = ufshcd_urgent_bkops(hba); in ufshcd_exception_event_handler()
3356 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_exception_event_handler()
3360 pm_runtime_put_sync(hba->dev); in ufshcd_exception_event_handler()
3370 struct ufs_hba *hba; in ufshcd_err_handler() local
3377 hba = container_of(work, struct ufs_hba, eh_work); in ufshcd_err_handler()
3379 pm_runtime_get_sync(hba->dev); in ufshcd_err_handler()
3380 ufshcd_hold(hba, false); in ufshcd_err_handler()
3382 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
3383 if (hba->ufshcd_state == UFSHCD_STATE_RESET) { in ufshcd_err_handler()
3384 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
3388 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
3389 ufshcd_set_eh_in_progress(hba); in ufshcd_err_handler()
3392 ufshcd_transfer_req_compl(hba); in ufshcd_err_handler()
3393 ufshcd_tmc_handler(hba); in ufshcd_err_handler()
3394 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
3397 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) in ufshcd_err_handler()
3398 if (ufshcd_clear_cmd(hba, tag)) in ufshcd_err_handler()
3402 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) in ufshcd_err_handler()
3403 if (ufshcd_clear_tm_cmd(hba, tag)) in ufshcd_err_handler()
3407 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
3408 ufshcd_transfer_req_compl(hba); in ufshcd_err_handler()
3409 ufshcd_tmc_handler(hba); in ufshcd_err_handler()
3410 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
3413 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_err_handler()
3414 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
3415 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { in ufshcd_err_handler()
3416 err = ufshcd_reset_and_restore(hba); in ufshcd_err_handler()
3418 dev_err(hba->dev, "%s: reset and restore failed\n", in ufshcd_err_handler()
3420 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_err_handler()
3426 scsi_report_bus_reset(hba->host, 0); in ufshcd_err_handler()
3427 hba->saved_err = 0; in ufshcd_err_handler()
3428 hba->saved_uic_err = 0; in ufshcd_err_handler()
3430 ufshcd_clear_eh_in_progress(hba); in ufshcd_err_handler()
3433 scsi_unblock_requests(hba->host); in ufshcd_err_handler()
3434 ufshcd_release(hba); in ufshcd_err_handler()
3435 pm_runtime_put_sync(hba->dev); in ufshcd_err_handler()
3442 static void ufshcd_update_uic_error(struct ufs_hba *hba) in ufshcd_update_uic_error() argument
3447 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); in ufshcd_update_uic_error()
3449 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
3452 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); in ufshcd_update_uic_error()
3454 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
3456 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); in ufshcd_update_uic_error()
3458 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
3460 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); in ufshcd_update_uic_error()
3462 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
3464 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
3465 __func__, hba->uic_error); in ufshcd_update_uic_error()
3472 static void ufshcd_check_errors(struct ufs_hba *hba) in ufshcd_check_errors() argument
3476 if (hba->errors & INT_FATAL_ERRORS) in ufshcd_check_errors()
3479 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
3480 hba->uic_error = 0; in ufshcd_check_errors()
3481 ufshcd_update_uic_error(hba); in ufshcd_check_errors()
3482 if (hba->uic_error) in ufshcd_check_errors()
3488 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { in ufshcd_check_errors()
3490 scsi_block_requests(hba->host); in ufshcd_check_errors()
3493 hba->saved_err |= hba->errors; in ufshcd_check_errors()
3494 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
3496 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_check_errors()
3497 schedule_work(&hba->eh_work); in ufshcd_check_errors()
3512 static void ufshcd_tmc_handler(struct ufs_hba *hba) in ufshcd_tmc_handler() argument
3516 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_tmc_handler()
3517 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; in ufshcd_tmc_handler()
3518 wake_up(&hba->tm_wq); in ufshcd_tmc_handler()
3526 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) in ufshcd_sl_intr() argument
3528 hba->errors = UFSHCD_ERROR_MASK & intr_status; in ufshcd_sl_intr()
3529 if (hba->errors) in ufshcd_sl_intr()
3530 ufshcd_check_errors(hba); in ufshcd_sl_intr()
3533 ufshcd_uic_cmd_compl(hba, intr_status); in ufshcd_sl_intr()
3536 ufshcd_tmc_handler(hba); in ufshcd_sl_intr()
3539 ufshcd_transfer_req_compl(hba); in ufshcd_sl_intr()
3554 struct ufs_hba *hba = __hba; in ufshcd_intr() local
3556 spin_lock(hba->host->host_lock); in ufshcd_intr()
3557 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
3560 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); in ufshcd_intr()
3561 ufshcd_sl_intr(hba, intr_status); in ufshcd_intr()
3564 spin_unlock(hba->host->host_lock); in ufshcd_intr()
3568 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_tm_cmd() argument
3574 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
3577 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
3578 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_clear_tm_cmd()
3579 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
3582 err = ufshcd_wait_for_register(hba, in ufshcd_clear_tm_cmd()
3599 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, in ufshcd_issue_tm_cmd() argument
3610 host = hba->host; in ufshcd_issue_tm_cmd()
3617 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); in ufshcd_issue_tm_cmd()
3618 ufshcd_hold(hba, false); in ufshcd_issue_tm_cmd()
3621 task_req_descp = hba->utmrdl_base_addr; in ufshcd_issue_tm_cmd()
3632 task_tag = hba->nutrs + free_slot; in ufshcd_issue_tm_cmd()
3646 __set_bit(free_slot, &hba->outstanding_tasks); in ufshcd_issue_tm_cmd()
3647 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_issue_tm_cmd()
3652 err = wait_event_timeout(hba->tm_wq, in ufshcd_issue_tm_cmd()
3653 test_bit(free_slot, &hba->tm_condition), in ufshcd_issue_tm_cmd()
3656 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in ufshcd_issue_tm_cmd()
3658 if (ufshcd_clear_tm_cmd(hba, free_slot)) in ufshcd_issue_tm_cmd()
3659 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", in ufshcd_issue_tm_cmd()
3663 err = ufshcd_task_req_compl(hba, free_slot, tm_response); in ufshcd_issue_tm_cmd()
3666 clear_bit(free_slot, &hba->tm_condition); in ufshcd_issue_tm_cmd()
3667 ufshcd_put_tm_slot(hba, free_slot); in ufshcd_issue_tm_cmd()
3668 wake_up(&hba->tm_tag_wq); in ufshcd_issue_tm_cmd()
3670 ufshcd_release(hba); in ufshcd_issue_tm_cmd()
3684 struct ufs_hba *hba; in ufshcd_eh_device_reset_handler() local
3693 hba = shost_priv(host); in ufshcd_eh_device_reset_handler()
3696 lrbp = &hba->lrb[tag]; in ufshcd_eh_device_reset_handler()
3697 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); in ufshcd_eh_device_reset_handler()
3705 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_eh_device_reset_handler()
3706 if (hba->lrb[pos].lun == lrbp->lun) { in ufshcd_eh_device_reset_handler()
3707 err = ufshcd_clear_cmd(hba, pos); in ufshcd_eh_device_reset_handler()
3713 ufshcd_transfer_req_compl(hba); in ufshcd_eh_device_reset_handler()
3719 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
3740 struct ufs_hba *hba; in ufshcd_abort() local
3750 hba = shost_priv(host); in ufshcd_abort()
3753 ufshcd_hold(hba, false); in ufshcd_abort()
3755 if (!(test_bit(tag, &hba->outstanding_reqs))) in ufshcd_abort()
3758 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
3760 dev_err(hba->dev, in ufshcd_abort()
3765 lrbp = &hba->lrb[tag]; in ufshcd_abort()
3767 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_abort()
3777 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
3797 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_abort()
3805 err = ufshcd_clear_cmd(hba, tag); in ufshcd_abort()
3812 __clear_bit(tag, &hba->outstanding_reqs); in ufshcd_abort()
3813 hba->lrb[tag].cmd = NULL; in ufshcd_abort()
3816 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_abort()
3817 wake_up(&hba->dev_cmd.tag_wq); in ufshcd_abort()
3823 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_abort()
3831 ufshcd_release(hba); in ufshcd_abort()
3845 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) in ufshcd_host_reset_and_restore() argument
3851 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_host_reset_and_restore()
3852 ufshcd_hba_stop(hba); in ufshcd_host_reset_and_restore()
3853 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_host_reset_and_restore()
3855 err = ufshcd_hba_enable(hba); in ufshcd_host_reset_and_restore()
3860 err = ufshcd_probe_hba(hba); in ufshcd_host_reset_and_restore()
3862 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) in ufshcd_host_reset_and_restore()
3866 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
3880 static int ufshcd_reset_and_restore(struct ufs_hba *hba) in ufshcd_reset_and_restore() argument
3887 err = ufshcd_host_reset_and_restore(hba); in ufshcd_reset_and_restore()
3894 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
3895 ufshcd_transfer_req_compl(hba); in ufshcd_reset_and_restore()
3896 ufshcd_tmc_handler(hba); in ufshcd_reset_and_restore()
3897 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
3912 struct ufs_hba *hba; in ufshcd_eh_host_reset_handler() local
3914 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
3916 ufshcd_hold(hba, false); in ufshcd_eh_host_reset_handler()
3924 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
3925 if (!(work_pending(&hba->eh_work) || in ufshcd_eh_host_reset_handler()
3926 hba->ufshcd_state == UFSHCD_STATE_RESET)) in ufshcd_eh_host_reset_handler()
3928 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
3929 dev_dbg(hba->dev, "%s: reset in progress\n", __func__); in ufshcd_eh_host_reset_handler()
3930 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
3933 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_eh_host_reset_handler()
3934 ufshcd_set_eh_in_progress(hba); in ufshcd_eh_host_reset_handler()
3935 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
3937 err = ufshcd_reset_and_restore(hba); in ufshcd_eh_host_reset_handler()
3939 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
3942 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_eh_host_reset_handler()
3945 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_eh_host_reset_handler()
3947 ufshcd_clear_eh_in_progress(hba); in ufshcd_eh_host_reset_handler()
3948 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
3950 ufshcd_release(hba); in ufshcd_eh_host_reset_handler()
4008 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, in ufshcd_find_max_sup_active_icc_level() argument
4013 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
4014 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
4015 dev_err(hba->dev, in ufshcd_find_max_sup_active_icc_level()
4021 if (hba->vreg_info.vcc) in ufshcd_find_max_sup_active_icc_level()
4023 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
4027 if (hba->vreg_info.vccq) in ufshcd_find_max_sup_active_icc_level()
4029 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
4033 if (hba->vreg_info.vccq2) in ufshcd_find_max_sup_active_icc_level()
4035 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
4042 static void ufshcd_init_icc_levels(struct ufs_hba *hba) in ufshcd_init_icc_levels() argument
4048 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); in ufshcd_init_icc_levels()
4050 dev_err(hba->dev, in ufshcd_init_icc_levels()
4056 hba->init_prefetch_data.icc_level = in ufshcd_init_icc_levels()
4057 ufshcd_find_max_sup_active_icc_level(hba, in ufshcd_init_icc_levels()
4059 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", in ufshcd_init_icc_levels()
4060 __func__, hba->init_prefetch_data.icc_level); in ufshcd_init_icc_levels()
4062 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_init_icc_levels()
4064 &hba->init_prefetch_data.icc_level); in ufshcd_init_icc_levels()
4067 dev_err(hba->dev, in ufshcd_init_icc_levels()
4069 __func__, hba->init_prefetch_data.icc_level , ret); in ufshcd_init_icc_levels()
4099 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) in ufshcd_scsi_add_wlus() argument
4105 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
4107 if (IS_ERR(hba->sdev_ufs_device)) { in ufshcd_scsi_add_wlus()
4108 ret = PTR_ERR(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
4109 hba->sdev_ufs_device = NULL; in ufshcd_scsi_add_wlus()
4112 scsi_device_put(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
4114 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
4122 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
4134 scsi_remove_device(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
4145 static int ufshcd_probe_hba(struct ufs_hba *hba) in ufshcd_probe_hba() argument
4149 ret = ufshcd_link_startup(hba); in ufshcd_probe_hba()
4153 ufshcd_init_pwr_info(hba); in ufshcd_probe_hba()
4156 ufshcd_set_link_active(hba); in ufshcd_probe_hba()
4158 ret = ufshcd_verify_dev_init(hba); in ufshcd_probe_hba()
4162 ret = ufshcd_complete_dev_init(hba); in ufshcd_probe_hba()
4167 ufshcd_set_ufs_dev_active(hba); in ufshcd_probe_hba()
4168 ufshcd_force_reset_auto_bkops(hba); in ufshcd_probe_hba()
4169 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_probe_hba()
4170 hba->wlun_dev_clr_ua = true; in ufshcd_probe_hba()
4172 if (ufshcd_get_max_pwr_mode(hba)) { in ufshcd_probe_hba()
4173 dev_err(hba->dev, in ufshcd_probe_hba()
4177 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_probe_hba()
4179 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_probe_hba()
4187 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { in ufshcd_probe_hba()
4191 memset(&hba->dev_info, 0, sizeof(hba->dev_info)); in ufshcd_probe_hba()
4192 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_probe_hba()
4194 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_probe_hba()
4196 if (!hba->is_init_prefetch) in ufshcd_probe_hba()
4197 ufshcd_init_icc_levels(hba); in ufshcd_probe_hba()
4200 if (ufshcd_scsi_add_wlus(hba)) in ufshcd_probe_hba()
4203 scsi_scan_host(hba->host); in ufshcd_probe_hba()
4204 pm_runtime_put_sync(hba->dev); in ufshcd_probe_hba()
4207 if (!hba->is_init_prefetch) in ufshcd_probe_hba()
4208 hba->is_init_prefetch = true; in ufshcd_probe_hba()
4211 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_probe_hba()
4212 devfreq_resume_device(hba->devfreq); in ufshcd_probe_hba()
4219 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { in ufshcd_probe_hba()
4220 pm_runtime_put_sync(hba->dev); in ufshcd_probe_hba()
4221 ufshcd_hba_exit(hba); in ufshcd_probe_hba()
4234 struct ufs_hba *hba = (struct ufs_hba *)data; in ufshcd_async_scan() local
4236 ufshcd_probe_hba(hba); in ufshcd_async_scan()
4277 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, in ufshcd_config_vreg_lpm() argument
4280 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
4283 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, in ufshcd_config_vreg_hpm() argument
4286 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
4358 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_vreg() argument
4361 struct device *dev = hba->dev; in ufshcd_setup_vreg()
4362 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
4388 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_hba_vreg() argument
4390 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
4393 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
4415 static int ufshcd_init_vreg(struct ufs_hba *hba) in ufshcd_init_vreg() argument
4418 struct device *dev = hba->dev; in ufshcd_init_vreg()
4419 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
4437 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) in ufshcd_init_hba_vreg() argument
4439 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
4442 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
4447 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, in __ufshcd_setup_clocks() argument
4452 struct list_head *head = &hba->clk_list_head; in __ufshcd_setup_clocks()
4466 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in __ufshcd_setup_clocks()
4474 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in __ufshcd_setup_clocks()
4479 if (hba->vops && hba->vops->setup_clocks) in __ufshcd_setup_clocks()
4480 ret = hba->vops->setup_clocks(hba, on); in __ufshcd_setup_clocks()
4488 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_setup_clocks()
4489 hba->clk_gating.state = CLKS_ON; in __ufshcd_setup_clocks()
4490 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_setup_clocks()
4495 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) in ufshcd_setup_clocks() argument
4497 return __ufshcd_setup_clocks(hba, on, false); in ufshcd_setup_clocks()
4500 static int ufshcd_init_clocks(struct ufs_hba *hba) in ufshcd_init_clocks() argument
4504 struct device *dev = hba->dev; in ufshcd_init_clocks()
4505 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
4525 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
4539 static int ufshcd_variant_hba_init(struct ufs_hba *hba) in ufshcd_variant_hba_init() argument
4543 if (!hba->vops) in ufshcd_variant_hba_init()
4546 if (hba->vops->init) { in ufshcd_variant_hba_init()
4547 err = hba->vops->init(hba); in ufshcd_variant_hba_init()
4552 if (hba->vops->setup_regulators) { in ufshcd_variant_hba_init()
4553 err = hba->vops->setup_regulators(hba, true); in ufshcd_variant_hba_init()
4561 if (hba->vops->exit) in ufshcd_variant_hba_init()
4562 hba->vops->exit(hba); in ufshcd_variant_hba_init()
4565 dev_err(hba->dev, "%s: variant %s init failed err %d\n", in ufshcd_variant_hba_init()
4566 __func__, hba->vops ? hba->vops->name : "", err); in ufshcd_variant_hba_init()
4570 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) in ufshcd_variant_hba_exit() argument
4572 if (!hba->vops) in ufshcd_variant_hba_exit()
4575 if (hba->vops->setup_clocks) in ufshcd_variant_hba_exit()
4576 hba->vops->setup_clocks(hba, false); in ufshcd_variant_hba_exit()
4578 if (hba->vops->setup_regulators) in ufshcd_variant_hba_exit()
4579 hba->vops->setup_regulators(hba, false); in ufshcd_variant_hba_exit()
4581 if (hba->vops->exit) in ufshcd_variant_hba_exit()
4582 hba->vops->exit(hba); in ufshcd_variant_hba_exit()
4585 static int ufshcd_hba_init(struct ufs_hba *hba) in ufshcd_hba_init() argument
4596 err = ufshcd_init_hba_vreg(hba); in ufshcd_hba_init()
4600 err = ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_init()
4604 err = ufshcd_init_clocks(hba); in ufshcd_hba_init()
4608 err = ufshcd_setup_clocks(hba, true); in ufshcd_hba_init()
4612 err = ufshcd_init_vreg(hba); in ufshcd_hba_init()
4616 err = ufshcd_setup_vreg(hba, true); in ufshcd_hba_init()
4620 err = ufshcd_variant_hba_init(hba); in ufshcd_hba_init()
4624 hba->is_powered = true; in ufshcd_hba_init()
4628 ufshcd_setup_vreg(hba, false); in ufshcd_hba_init()
4630 ufshcd_setup_clocks(hba, false); in ufshcd_hba_init()
4632 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_init()
4637 static void ufshcd_hba_exit(struct ufs_hba *hba) in ufshcd_hba_exit() argument
4639 if (hba->is_powered) { in ufshcd_hba_exit()
4640 ufshcd_variant_hba_exit(hba); in ufshcd_hba_exit()
4641 ufshcd_setup_vreg(hba, false); in ufshcd_hba_exit()
4642 ufshcd_setup_clocks(hba, false); in ufshcd_hba_exit()
4643 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_exit()
4644 hba->is_powered = false; in ufshcd_hba_exit()
4649 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) in ufshcd_send_request_sense() argument
4686 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, in ufshcd_set_dev_pwr_mode() argument
4695 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
4696 sdp = hba->sdev_ufs_device; in ufshcd_set_dev_pwr_mode()
4706 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
4717 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
4718 if (hba->wlun_dev_clr_ua) { in ufshcd_set_dev_pwr_mode()
4719 ret = ufshcd_send_request_sense(hba, sdp); in ufshcd_set_dev_pwr_mode()
4723 hba->wlun_dev_clr_ua = false; in ufshcd_set_dev_pwr_mode()
4744 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
4747 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
4751 static int ufshcd_link_state_transition(struct ufs_hba *hba, in ufshcd_link_state_transition() argument
4757 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
4761 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
4763 ufshcd_set_link_hibern8(hba); in ufshcd_link_state_transition()
4773 !hba->auto_bkops_enabled))) { in ufshcd_link_state_transition()
4778 ufshcd_hba_stop(hba); in ufshcd_link_state_transition()
4783 ufshcd_set_link_off(hba); in ufshcd_link_state_transition()
4790 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_vreg_set_lpm() argument
4804 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_lpm()
4805 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
4806 ufshcd_setup_vreg(hba, false); in ufshcd_vreg_set_lpm()
4807 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_lpm()
4808 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
4809 if (!ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_lpm()
4810 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
4811 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
4816 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_vreg_set_hpm() argument
4820 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_hpm()
4821 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
4822 ret = ufshcd_setup_vreg(hba, true); in ufshcd_vreg_set_hpm()
4823 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_hpm()
4824 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
4825 if (!ret && !ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_hpm()
4826 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
4829 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
4837 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
4839 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
4844 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_lpm() argument
4846 if (ufshcd_is_link_off(hba)) in ufshcd_hba_vreg_set_lpm()
4847 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_vreg_set_lpm()
4850 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_hpm() argument
4852 if (ufshcd_is_link_off(hba)) in ufshcd_hba_vreg_set_hpm()
4853 ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_vreg_set_hpm()
4872 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufshcd_suspend() argument
4879 hba->pm_op_in_progress = 1; in ufshcd_suspend()
4882 hba->rpm_lvl : hba->spm_lvl; in ufshcd_suspend()
4894 ufshcd_hold(hba, false); in ufshcd_suspend()
4895 hba->clk_gating.is_suspended = true; in ufshcd_suspend()
4902 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in ufshcd_suspend()
4903 (req_link_state == hba->uic_link_state)) in ufshcd_suspend()
4907 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { in ufshcd_suspend()
4913 if (ufshcd_can_autobkops_during_suspend(hba)) { in ufshcd_suspend()
4919 ret = ufshcd_urgent_bkops(hba); in ufshcd_suspend()
4924 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
4928 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && in ufshcd_suspend()
4929 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || in ufshcd_suspend()
4932 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
4933 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); in ufshcd_suspend()
4938 ret = ufshcd_link_state_transition(hba, req_link_state, 1); in ufshcd_suspend()
4942 ufshcd_vreg_set_lpm(hba); in ufshcd_suspend()
4950 if (ufshcd_is_clkscaling_enabled(hba)) { in ufshcd_suspend()
4951 devfreq_suspend_device(hba->devfreq); in ufshcd_suspend()
4952 hba->clk_scaling.window_start_t = 0; in ufshcd_suspend()
4959 if (hba->vops && hba->vops->suspend) { in ufshcd_suspend()
4960 ret = hba->vops->suspend(hba, pm_op); in ufshcd_suspend()
4965 if (hba->vops && hba->vops->setup_clocks) { in ufshcd_suspend()
4966 ret = hba->vops->setup_clocks(hba, false); in ufshcd_suspend()
4971 if (!ufshcd_is_link_active(hba)) in ufshcd_suspend()
4972 ufshcd_setup_clocks(hba, false); in ufshcd_suspend()
4975 __ufshcd_setup_clocks(hba, false, true); in ufshcd_suspend()
4977 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
4982 ufshcd_disable_irq(hba); in ufshcd_suspend()
4984 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_suspend()
4988 if (hba->vops && hba->vops->resume) in ufshcd_suspend()
4989 hba->vops->resume(hba, pm_op); in ufshcd_suspend()
4991 ufshcd_vreg_set_hpm(hba); in ufshcd_suspend()
4992 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) in ufshcd_suspend()
4993 ufshcd_set_link_active(hba); in ufshcd_suspend()
4994 else if (ufshcd_is_link_off(hba)) in ufshcd_suspend()
4995 ufshcd_host_reset_and_restore(hba); in ufshcd_suspend()
4997 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) in ufshcd_suspend()
4998 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
5000 hba->clk_gating.is_suspended = false; in ufshcd_suspend()
5001 ufshcd_release(hba); in ufshcd_suspend()
5003 hba->pm_op_in_progress = 0; in ufshcd_suspend()
5017 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufshcd_resume() argument
5022 hba->pm_op_in_progress = 1; in ufshcd_resume()
5023 old_link_state = hba->uic_link_state; in ufshcd_resume()
5025 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_resume()
5027 ret = ufshcd_setup_clocks(hba, true); in ufshcd_resume()
5032 ret = ufshcd_enable_irq(hba); in ufshcd_resume()
5036 ret = ufshcd_vreg_set_hpm(hba); in ufshcd_resume()
5045 if (hba->vops && hba->vops->resume) { in ufshcd_resume()
5046 ret = hba->vops->resume(hba, pm_op); in ufshcd_resume()
5051 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_resume()
5052 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_resume()
5054 ufshcd_set_link_active(hba); in ufshcd_resume()
5057 } else if (ufshcd_is_link_off(hba)) { in ufshcd_resume()
5058 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_resume()
5063 if (ret || !ufshcd_is_link_active(hba)) in ufshcd_resume()
5067 if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_resume()
5068 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); in ufshcd_resume()
5077 ufshcd_urgent_bkops(hba); in ufshcd_resume()
5078 hba->clk_gating.is_suspended = false; in ufshcd_resume()
5080 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_resume()
5081 devfreq_resume_device(hba->devfreq); in ufshcd_resume()
5084 ufshcd_release(hba); in ufshcd_resume()
5088 ufshcd_link_state_transition(hba, old_link_state, 0); in ufshcd_resume()
5090 if (hba->vops && hba->vops->suspend) in ufshcd_resume()
5091 hba->vops->suspend(hba, pm_op); in ufshcd_resume()
5093 ufshcd_vreg_set_lpm(hba); in ufshcd_resume()
5095 ufshcd_disable_irq(hba); in ufshcd_resume()
5096 ufshcd_setup_clocks(hba, false); in ufshcd_resume()
5098 hba->pm_op_in_progress = 0; in ufshcd_resume()
5111 int ufshcd_system_suspend(struct ufs_hba *hba) in ufshcd_system_suspend() argument
5115 if (!hba || !hba->is_powered) in ufshcd_system_suspend()
5118 if (pm_runtime_suspended(hba->dev)) { in ufshcd_system_suspend()
5119 if (hba->rpm_lvl == hba->spm_lvl) in ufshcd_system_suspend()
5124 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == in ufshcd_system_suspend()
5125 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) in ufshcd_system_suspend()
5136 ret = ufshcd_runtime_resume(hba); in ufshcd_system_suspend()
5141 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); in ufshcd_system_suspend()
5144 hba->is_sys_suspended = true; in ufshcd_system_suspend()
5156 int ufshcd_system_resume(struct ufs_hba *hba) in ufshcd_system_resume() argument
5158 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
5165 return ufshcd_resume(hba, UFS_SYSTEM_PM); in ufshcd_system_resume()
5177 int ufshcd_runtime_suspend(struct ufs_hba *hba) in ufshcd_runtime_suspend() argument
5179 if (!hba || !hba->is_powered) in ufshcd_runtime_suspend()
5182 return ufshcd_suspend(hba, UFS_RUNTIME_PM); in ufshcd_runtime_suspend()
5207 int ufshcd_runtime_resume(struct ufs_hba *hba) in ufshcd_runtime_resume() argument
5209 if (!hba || !hba->is_powered) in ufshcd_runtime_resume()
5212 return ufshcd_resume(hba, UFS_RUNTIME_PM); in ufshcd_runtime_resume()
5216 int ufshcd_runtime_idle(struct ufs_hba *hba) in ufshcd_runtime_idle() argument
5230 int ufshcd_shutdown(struct ufs_hba *hba) in ufshcd_shutdown() argument
5234 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) in ufshcd_shutdown()
5237 if (pm_runtime_suspended(hba->dev)) { in ufshcd_shutdown()
5238 ret = ufshcd_runtime_resume(hba); in ufshcd_shutdown()
5243 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_shutdown()
5246 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); in ufshcd_shutdown()
5257 void ufshcd_remove(struct ufs_hba *hba) in ufshcd_remove() argument
5259 scsi_remove_host(hba->host); in ufshcd_remove()
5261 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
5262 ufshcd_hba_stop(hba); in ufshcd_remove()
5264 scsi_host_put(hba->host); in ufshcd_remove()
5266 ufshcd_exit_clk_gating(hba); in ufshcd_remove()
5267 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_remove()
5268 devfreq_remove_device(hba->devfreq); in ufshcd_remove()
5269 ufshcd_hba_exit(hba); in ufshcd_remove()
5280 static int ufshcd_set_dma_mask(struct ufs_hba *hba) in ufshcd_set_dma_mask() argument
5282 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
5283 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
5286 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
5298 struct ufs_hba *hba; in ufshcd_alloc_host() local
5315 hba = shost_priv(host); in ufshcd_alloc_host()
5316 hba->host = host; in ufshcd_alloc_host()
5317 hba->dev = dev; in ufshcd_alloc_host()
5318 *hba_handle = hba; in ufshcd_alloc_host()
5325 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_clks() argument
5329 struct list_head *head = &hba->clk_list_head; in ufshcd_scale_clks()
5341 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_scale_clks()
5353 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_scale_clks()
5361 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_scale_clks()
5364 if (hba->vops->clk_scale_notify) in ufshcd_scale_clks()
5365 hba->vops->clk_scale_notify(hba); in ufshcd_scale_clks()
5374 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_target() local
5376 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_devfreq_target()
5380 err = ufshcd_scale_clks(hba, true); in ufshcd_devfreq_target()
5382 err = ufshcd_scale_clks(hba, false); in ufshcd_devfreq_target()
5390 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_get_dev_status() local
5391 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
5394 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_devfreq_get_dev_status()
5399 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
5414 if (hba->outstanding_reqs) { in ufshcd_devfreq_get_dev_status()
5421 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
5438 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) in ufshcd_init() argument
5441 struct Scsi_Host *host = hba->host; in ufshcd_init()
5442 struct device *dev = hba->dev; in ufshcd_init()
5445 dev_err(hba->dev, in ufshcd_init()
5451 hba->mmio_base = mmio_base; in ufshcd_init()
5452 hba->irq = irq; in ufshcd_init()
5454 err = ufshcd_hba_init(hba); in ufshcd_init()
5459 ufshcd_hba_capabilities(hba); in ufshcd_init()
5462 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
5465 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
5467 err = ufshcd_set_dma_mask(hba); in ufshcd_init()
5469 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
5474 err = ufshcd_memory_alloc(hba); in ufshcd_init()
5476 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
5481 ufshcd_host_memory_configure(hba); in ufshcd_init()
5483 host->can_queue = hba->nutrs; in ufshcd_init()
5484 host->cmd_per_lun = hba->nutrs; in ufshcd_init()
5491 hba->max_pwr_info.is_valid = false; in ufshcd_init()
5494 init_waitqueue_head(&hba->tm_wq); in ufshcd_init()
5495 init_waitqueue_head(&hba->tm_tag_wq); in ufshcd_init()
5498 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
5499 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
5502 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
5505 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
5508 init_waitqueue_head(&hba->dev_cmd.tag_wq); in ufshcd_init()
5510 ufshcd_init_clk_gating(hba); in ufshcd_init()
5512 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); in ufshcd_init()
5514 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
5517 hba->is_irq_enabled = true; in ufshcd_init()
5523 dev_err(hba->dev, "init shared queue failed\n"); in ufshcd_init()
5527 err = scsi_add_host(host, hba->dev); in ufshcd_init()
5529 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_init()
5534 err = ufshcd_hba_enable(hba); in ufshcd_init()
5536 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
5540 if (ufshcd_is_clkscaling_enabled(hba)) { in ufshcd_init()
5541 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, in ufshcd_init()
5543 if (IS_ERR(hba->devfreq)) { in ufshcd_init()
5544 dev_err(hba->dev, "Unable to register with devfreq %ld\n", in ufshcd_init()
5545 PTR_ERR(hba->devfreq)); in ufshcd_init()
5549 devfreq_suspend_device(hba->devfreq); in ufshcd_init()
5550 hba->clk_scaling.window_start_t = 0; in ufshcd_init()
5560 ufshcd_set_ufs_dev_poweroff(hba); in ufshcd_init()
5562 async_schedule(ufshcd_async_scan, hba); in ufshcd_init()
5567 scsi_remove_host(hba->host); in ufshcd_init()
5569 ufshcd_exit_clk_gating(hba); in ufshcd_init()
5571 hba->is_irq_enabled = false; in ufshcd_init()
5573 ufshcd_hba_exit(hba); in ufshcd_init()