Lines Matching refs:hba
175 static void ufshcd_tmc_handler(struct ufs_hba *hba);
177 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
178 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
179 static void ufshcd_hba_exit(struct ufs_hba *hba);
180 static int ufshcd_probe_hba(struct ufs_hba *hba);
181 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
183 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
184 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
185 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
186 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
187 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
189 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
191 static int ufshcd_change_power_mode(struct ufs_hba *hba,
194 static inline int ufshcd_enable_irq(struct ufs_hba *hba) in ufshcd_enable_irq() argument
198 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
199 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, in ufshcd_enable_irq()
200 hba); in ufshcd_enable_irq()
202 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", in ufshcd_enable_irq()
204 hba->is_irq_enabled = true; in ufshcd_enable_irq()
210 static inline void ufshcd_disable_irq(struct ufs_hba *hba) in ufshcd_disable_irq() argument
212 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
213 free_irq(hba->irq, hba); in ufshcd_disable_irq()
214 hba->is_irq_enabled = false; in ufshcd_disable_irq()
229 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, in ufshcd_wait_for_register() argument
238 while ((ufshcd_readl(hba, reg) & mask) != val) { in ufshcd_wait_for_register()
243 if ((ufshcd_readl(hba, reg) & mask) != val) in ufshcd_wait_for_register()
258 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) in ufshcd_get_intr_mask() argument
260 if (hba->ufs_version == UFSHCI_VERSION_10) in ufshcd_get_intr_mask()
272 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) in ufshcd_get_ufs_version() argument
274 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) in ufshcd_get_ufs_version()
275 return ufshcd_vops_get_ufs_hci_version(hba); in ufshcd_get_ufs_version()
277 return ufshcd_readl(hba, REG_UFS_VERSION); in ufshcd_get_ufs_version()
287 static inline int ufshcd_is_device_present(struct ufs_hba *hba) in ufshcd_is_device_present() argument
289 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & in ufshcd_is_device_present()
327 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) in ufshcd_get_tm_free_slot() argument
336 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); in ufshcd_get_tm_free_slot()
337 if (tag >= hba->nutmrs) in ufshcd_get_tm_free_slot()
339 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); in ufshcd_get_tm_free_slot()
347 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) in ufshcd_put_tm_slot() argument
349 clear_bit_unlock(slot, &hba->tm_slots_in_use); in ufshcd_put_tm_slot()
357 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utrl_clear() argument
359 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); in ufshcd_utrl_clear()
391 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) in ufshcd_get_uic_cmd_result() argument
393 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & in ufshcd_get_uic_cmd_result()
404 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) in ufshcd_get_dme_attr_val() argument
406 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); in ufshcd_get_dme_attr_val()
466 ufshcd_reset_intr_aggr(struct ufs_hba *hba) in ufshcd_reset_intr_aggr() argument
468 ufshcd_writel(hba, INT_AGGR_ENABLE | in ufshcd_reset_intr_aggr()
480 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) in ufshcd_config_intr_aggr() argument
482 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | in ufshcd_config_intr_aggr()
492 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) in ufshcd_disable_intr_aggr() argument
494 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); in ufshcd_disable_intr_aggr()
503 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) in ufshcd_enable_run_stop_reg() argument
505 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
507 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
515 static inline void ufshcd_hba_start(struct ufs_hba *hba) in ufshcd_hba_start() argument
517 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); in ufshcd_hba_start()
526 static inline int ufshcd_is_hba_active(struct ufs_hba *hba) in ufshcd_is_hba_active() argument
528 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; in ufshcd_is_hba_active()
535 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_ungate_work() local
538 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
540 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_ungate_work()
541 if (hba->clk_gating.state == CLKS_ON) { in ufshcd_ungate_work()
542 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
546 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
547 ufshcd_setup_clocks(hba, true); in ufshcd_ungate_work()
550 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_ungate_work()
552 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
553 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_ungate_work()
554 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_ungate_work()
556 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
559 ufshcd_set_link_active(hba); in ufshcd_ungate_work()
561 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
564 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_ungate_work()
565 devfreq_resume_device(hba->devfreq); in ufshcd_ungate_work()
566 scsi_unblock_requests(hba->host); in ufshcd_ungate_work()
575 int ufshcd_hold(struct ufs_hba *hba, bool async) in ufshcd_hold() argument
580 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_hold()
582 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
583 hba->clk_gating.active_reqs++; in ufshcd_hold()
586 switch (hba->clk_gating.state) { in ufshcd_hold()
590 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
591 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
600 scsi_block_requests(hba->host); in ufshcd_hold()
601 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
602 schedule_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
610 hba->clk_gating.active_reqs--; in ufshcd_hold()
614 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
615 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
617 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
620 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
621 __func__, hba->clk_gating.state); in ufshcd_hold()
624 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
632 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_gate_work() local
636 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
637 if (hba->clk_gating.is_suspended) { in ufshcd_gate_work()
638 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
642 if (hba->clk_gating.active_reqs in ufshcd_gate_work()
643 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in ufshcd_gate_work()
644 || hba->lrb_in_use || hba->outstanding_tasks in ufshcd_gate_work()
645 || hba->active_uic_cmd || hba->uic_async_done) in ufshcd_gate_work()
648 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
651 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_gate_work()
652 if (ufshcd_uic_hibern8_enter(hba)) { in ufshcd_gate_work()
653 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
656 ufshcd_set_link_hibern8(hba); in ufshcd_gate_work()
659 if (ufshcd_is_clkscaling_enabled(hba)) { in ufshcd_gate_work()
660 devfreq_suspend_device(hba->devfreq); in ufshcd_gate_work()
661 hba->clk_scaling.window_start_t = 0; in ufshcd_gate_work()
664 if (!ufshcd_is_link_active(hba)) in ufshcd_gate_work()
665 ufshcd_setup_clocks(hba, false); in ufshcd_gate_work()
668 __ufshcd_setup_clocks(hba, false, true); in ufshcd_gate_work()
679 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
680 if (hba->clk_gating.state == REQ_CLKS_OFF) in ufshcd_gate_work()
681 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
684 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
690 static void __ufshcd_release(struct ufs_hba *hba) in __ufshcd_release() argument
692 if (!ufshcd_is_clkgating_allowed(hba)) in __ufshcd_release()
695 hba->clk_gating.active_reqs--; in __ufshcd_release()
697 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended in __ufshcd_release()
698 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in __ufshcd_release()
699 || hba->lrb_in_use || hba->outstanding_tasks in __ufshcd_release()
700 || hba->active_uic_cmd || hba->uic_async_done) in __ufshcd_release()
703 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
704 schedule_delayed_work(&hba->clk_gating.gate_work, in __ufshcd_release()
705 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
708 void ufshcd_release(struct ufs_hba *hba) in ufshcd_release() argument
712 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_release()
713 __ufshcd_release(hba); in ufshcd_release()
714 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_release()
721 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_show() local
723 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
729 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_store() local
735 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
736 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_store()
737 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
741 static void ufshcd_init_clk_gating(struct ufs_hba *hba) in ufshcd_init_clk_gating() argument
743 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_init_clk_gating()
746 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
747 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
748 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
750 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating()
751 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating()
752 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating()
753 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating()
754 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; in ufshcd_init_clk_gating()
755 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating()
756 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating()
759 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) in ufshcd_exit_clk_gating() argument
761 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_exit_clk_gating()
763 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_exit_clk_gating()
764 cancel_work_sync(&hba->clk_gating.ungate_work); in ufshcd_exit_clk_gating()
765 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_exit_clk_gating()
769 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_start_busy() argument
771 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_clk_scaling_start_busy()
774 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
775 hba->clk_scaling.busy_start_t = ktime_get(); in ufshcd_clk_scaling_start_busy()
776 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
780 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_update_busy() argument
782 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
784 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_clk_scaling_update_busy()
787 if (!hba->outstanding_reqs && scaling->is_busy_started) { in ufshcd_clk_scaling_update_busy()
800 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) in ufshcd_send_command() argument
802 ufshcd_clk_scaling_start_busy(hba); in ufshcd_send_command()
803 __set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
804 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_send_command()
830 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_copy_query_response() argument
832 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
847 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
849 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
851 dev_warn(hba->dev, in ufshcd_copy_query_response()
865 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) in ufshcd_hba_capabilities() argument
867 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
870 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; in ufshcd_hba_capabilities()
871 hba->nutmrs = in ufshcd_hba_capabilities()
872 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
881 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) in ufshcd_ready_for_uic_cmd() argument
883 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) in ufshcd_ready_for_uic_cmd()
896 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) in ufshcd_get_upmcrs() argument
898 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; in ufshcd_get_upmcrs()
909 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_dispatch_uic_cmd() argument
911 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
913 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
916 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
917 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
918 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
921 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
934 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_wait_for_uic_cmd() argument
945 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
946 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
947 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
962 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in __ufshcd_send_uic_cmd() argument
964 if (!ufshcd_ready_for_uic_cmd(hba)) { in __ufshcd_send_uic_cmd()
965 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
972 ufshcd_dispatch_uic_cmd(hba, uic_cmd); in __ufshcd_send_uic_cmd()
985 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_uic_cmd() argument
990 ufshcd_hold(hba, false); in ufshcd_send_uic_cmd()
991 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
992 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_uic_cmd()
994 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
995 ret = __ufshcd_send_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
996 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
998 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
1000 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
1002 ufshcd_release(hba); in ufshcd_send_uic_cmd()
1051 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_enable_intr() argument
1053 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
1055 if (hba->ufs_version == UFSHCI_VERSION_10) { in ufshcd_enable_intr()
1063 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
1071 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_disable_intr() argument
1073 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
1075 if (hba->ufs_version == UFSHCI_VERSION_10) { in ufshcd_disable_intr()
1085 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
1165 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, in ufshcd_prepare_utp_query_req_upiu() argument
1169 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
1211 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_compose_upiu() argument
1228 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_upiu()
1230 hba, lrbp, upiu_flags); in ufshcd_compose_upiu()
1231 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_upiu()
1239 dev_err(hba->dev, "%s: UFS native command are not supported\n", in ufshcd_compose_upiu()
1244 dev_err(hba->dev, "%s: unknown command type: 0x%x\n", in ufshcd_compose_upiu()
1288 struct ufs_hba *hba; in ufshcd_queuecommand() local
1293 hba = shost_priv(host); in ufshcd_queuecommand()
1297 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
1298 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
1309 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", in ufshcd_queuecommand()
1310 __func__, hba->ufshcd_state); in ufshcd_queuecommand()
1315 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
1318 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { in ufshcd_queuecommand()
1329 err = ufshcd_hold(hba, true); in ufshcd_queuecommand()
1332 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_queuecommand()
1335 WARN_ON(hba->clk_gating.state != CLKS_ON); in ufshcd_queuecommand()
1337 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
1345 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; in ufshcd_queuecommand()
1349 ufshcd_compose_upiu(hba, lrbp); in ufshcd_queuecommand()
1353 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_queuecommand()
1358 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
1359 ufshcd_send_command(hba, tag); in ufshcd_queuecommand()
1361 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
1366 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, in ufshcd_compose_dev_cmd() argument
1376 hba->dev_cmd.type = cmd_type; in ufshcd_compose_dev_cmd()
1378 return ufshcd_compose_upiu(hba, lrbp); in ufshcd_compose_dev_cmd()
1382 ufshcd_clear_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_cmd() argument
1389 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_cmd()
1390 ufshcd_utrl_clear(hba, tag); in ufshcd_clear_cmd()
1391 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_cmd()
1397 err = ufshcd_wait_for_register(hba, in ufshcd_clear_cmd()
1405 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_check_query_response() argument
1407 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_check_query_response()
1421 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_dev_cmd_completion() argument
1430 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
1432 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
1437 err = ufshcd_check_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
1439 err = ufshcd_copy_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
1444 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
1449 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
1457 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, in ufshcd_wait_for_dev_cmd() argument
1464 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
1467 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
1468 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
1472 err = ufshcd_dev_cmd_completion(hba, lrbp); in ufshcd_wait_for_dev_cmd()
1474 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
1478 if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) in ufshcd_wait_for_dev_cmd()
1497 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) in ufshcd_get_dev_cmd_tag() argument
1507 tmp = ~hba->lrb_in_use; in ufshcd_get_dev_cmd_tag()
1508 tag = find_last_bit(&tmp, hba->nutrs); in ufshcd_get_dev_cmd_tag()
1509 if (tag >= hba->nutrs) in ufshcd_get_dev_cmd_tag()
1511 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); in ufshcd_get_dev_cmd_tag()
1519 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) in ufshcd_put_dev_cmd_tag() argument
1521 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_put_dev_cmd_tag()
1533 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, in ufshcd_exec_dev_cmd() argument
1547 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); in ufshcd_exec_dev_cmd()
1550 lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
1552 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); in ufshcd_exec_dev_cmd()
1556 hba->dev_cmd.complete = &wait; in ufshcd_exec_dev_cmd()
1558 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_exec_dev_cmd()
1559 ufshcd_send_command(hba, tag); in ufshcd_exec_dev_cmd()
1560 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_exec_dev_cmd()
1562 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); in ufshcd_exec_dev_cmd()
1565 ufshcd_put_dev_cmd_tag(hba, tag); in ufshcd_exec_dev_cmd()
1566 wake_up(&hba->dev_cmd.tag_wq); in ufshcd_exec_dev_cmd()
1580 static inline void ufshcd_init_query(struct ufs_hba *hba, in ufshcd_init_query() argument
1584 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
1585 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
1603 static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_flag() argument
1610 BUG_ON(!hba); in ufshcd_query_flag()
1612 ufshcd_hold(hba, false); in ufshcd_query_flag()
1613 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_flag()
1614 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_flag()
1627 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
1634 dev_err(hba->dev, in ufshcd_query_flag()
1641 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_flag()
1644 dev_err(hba->dev, in ufshcd_query_flag()
1655 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_flag()
1656 ufshcd_release(hba); in ufshcd_query_flag()
1671 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_attr() argument
1678 BUG_ON(!hba); in ufshcd_query_attr()
1680 ufshcd_hold(hba, false); in ufshcd_query_attr()
1682 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
1688 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_attr()
1689 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_attr()
1701 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
1707 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_attr()
1710 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", in ufshcd_query_attr()
1718 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_attr()
1720 ufshcd_release(hba); in ufshcd_query_attr()
1738 static int ufshcd_query_descriptor(struct ufs_hba *hba, in ufshcd_query_descriptor() argument
1746 BUG_ON(!hba); in ufshcd_query_descriptor()
1748 ufshcd_hold(hba, false); in ufshcd_query_descriptor()
1750 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in ufshcd_query_descriptor()
1757 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in ufshcd_query_descriptor()
1763 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_descriptor()
1764 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_descriptor()
1766 hba->dev_cmd.query.descriptor = desc_buf; in ufshcd_query_descriptor()
1777 dev_err(hba->dev, in ufshcd_query_descriptor()
1784 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_descriptor()
1787 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", in ufshcd_query_descriptor()
1792 hba->dev_cmd.query.descriptor = NULL; in ufshcd_query_descriptor()
1796 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_descriptor()
1798 ufshcd_release(hba); in ufshcd_query_descriptor()
1813 static int ufshcd_read_desc_param(struct ufs_hba *hba, in ufshcd_read_desc_param() argument
1844 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, in ufshcd_read_desc_param()
1852 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", in ufshcd_read_desc_param()
1868 static inline int ufshcd_read_desc(struct ufs_hba *hba, in ufshcd_read_desc() argument
1874 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); in ufshcd_read_desc()
1877 static inline int ufshcd_read_power_desc(struct ufs_hba *hba, in ufshcd_read_power_desc() argument
1881 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); in ufshcd_read_power_desc()
1894 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, in ufshcd_read_unit_desc_param() argument
1907 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, in ufshcd_read_unit_desc_param()
1924 static int ufshcd_memory_alloc(struct ufs_hba *hba) in ufshcd_memory_alloc() argument
1929 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); in ufshcd_memory_alloc()
1930 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
1932 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
1941 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
1942 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
1943 dev_err(hba->dev, in ufshcd_memory_alloc()
1952 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); in ufshcd_memory_alloc()
1953 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
1955 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
1957 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
1958 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
1959 dev_err(hba->dev, in ufshcd_memory_alloc()
1968 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
1969 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
1971 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
1973 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
1974 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
1975 dev_err(hba->dev, in ufshcd_memory_alloc()
1981 hba->lrb = devm_kzalloc(hba->dev, in ufshcd_memory_alloc()
1982 hba->nutrs * sizeof(struct ufshcd_lrb), in ufshcd_memory_alloc()
1984 if (!hba->lrb) { in ufshcd_memory_alloc()
1985 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
2006 static void ufshcd_host_memory_configure(struct ufs_hba *hba) in ufshcd_host_memory_configure() argument
2017 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
2018 cmd_descp = hba->ucdl_base_addr; in ufshcd_host_memory_configure()
2026 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
2028 for (i = 0; i < hba->nutrs; i++) { in ufshcd_host_memory_configure()
2045 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); in ufshcd_host_memory_configure()
2046 hba->lrb[i].ucd_req_ptr = in ufshcd_host_memory_configure()
2048 hba->lrb[i].ucd_rsp_ptr = in ufshcd_host_memory_configure()
2050 hba->lrb[i].ucd_prdt_ptr = in ufshcd_host_memory_configure()
2066 static int ufshcd_dme_link_startup(struct ufs_hba *hba) in ufshcd_dme_link_startup() argument
2073 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_link_startup()
2075 dev_err(hba->dev, in ufshcd_dme_link_startup()
2080 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) in ufshcd_add_delay_before_dme_cmd() argument
2085 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
2092 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
2098 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
2121 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_set_attr() argument
2138 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_set_attr()
2140 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
2156 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_get_attr() argument
2170 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { in ufshcd_dme_get_attr()
2171 orig_pwr_info = hba->pwr_info; in ufshcd_dme_get_attr()
2186 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); in ufshcd_dme_get_attr()
2196 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_get_attr()
2198 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
2206 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) in ufshcd_dme_get_attr()
2208 ufshcd_change_power_mode(hba, &orig_pwr_info); in ufshcd_dme_get_attr()
2230 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) in ufshcd_uic_pwr_ctrl() argument
2237 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
2239 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_uic_pwr_ctrl()
2241 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2242 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
2243 ret = __ufshcd_send_uic_cmd(hba, cmd); in ufshcd_uic_pwr_ctrl()
2244 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2246 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2251 ret = ufshcd_wait_for_uic_cmd(hba, cmd); in ufshcd_uic_pwr_ctrl()
2253 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2259 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
2261 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2268 status = ufshcd_get_upmcrs(hba); in ufshcd_uic_pwr_ctrl()
2270 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
2276 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2277 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
2278 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
2279 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
2292 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) in ufshcd_uic_change_pwr_mode() argument
2297 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { in ufshcd_uic_change_pwr_mode()
2298 ret = ufshcd_dme_set(hba, in ufshcd_uic_change_pwr_mode()
2301 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", in ufshcd_uic_change_pwr_mode()
2310 ufshcd_hold(hba, false); in ufshcd_uic_change_pwr_mode()
2311 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_change_pwr_mode()
2312 ufshcd_release(hba); in ufshcd_uic_change_pwr_mode()
2318 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) in ufshcd_uic_hibern8_enter() argument
2324 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_enter()
2327 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) in ufshcd_uic_hibern8_exit() argument
2333 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_exit()
2335 ufshcd_set_link_off(hba); in ufshcd_uic_hibern8_exit()
2336 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_uic_hibern8_exit()
2347 static void ufshcd_init_pwr_info(struct ufs_hba *hba) in ufshcd_init_pwr_info() argument
2349 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
2350 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
2351 hba->pwr_info.lane_rx = 1; in ufshcd_init_pwr_info()
2352 hba->pwr_info.lane_tx = 1; in ufshcd_init_pwr_info()
2353 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
2354 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
2355 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
2362 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) in ufshcd_get_max_pwr_mode() argument
2364 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
2366 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
2374 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), in ufshcd_get_max_pwr_mode()
2376 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_get_max_pwr_mode()
2380 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
2392 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
2394 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
2397 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
2404 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), in ufshcd_get_max_pwr_mode()
2407 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
2410 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
2417 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
2421 static int ufshcd_change_power_mode(struct ufs_hba *hba, in ufshcd_change_power_mode() argument
2427 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
2428 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
2429 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
2430 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
2431 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
2432 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
2433 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
2434 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
2444 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
2445 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufshcd_change_power_mode()
2449 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); in ufshcd_change_power_mode()
2451 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); in ufshcd_change_power_mode()
2453 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
2454 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufshcd_change_power_mode()
2458 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); in ufshcd_change_power_mode()
2460 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); in ufshcd_change_power_mode()
2466 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufshcd_change_power_mode()
2469 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
2473 dev_err(hba->dev, in ufshcd_change_power_mode()
2476 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, in ufshcd_change_power_mode()
2479 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
2491 static int ufshcd_config_pwr_mode(struct ufs_hba *hba, in ufshcd_config_pwr_mode() argument
2497 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, in ufshcd_config_pwr_mode()
2503 ret = ufshcd_change_power_mode(hba, &final_params); in ufshcd_config_pwr_mode()
2514 static int ufshcd_complete_dev_init(struct ufs_hba *hba) in ufshcd_complete_dev_init() argument
2521 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_complete_dev_init()
2525 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_complete_dev_init()
2528 dev_err(hba->dev, in ufshcd_complete_dev_init()
2537 err = ufshcd_query_flag(hba, in ufshcd_complete_dev_init()
2542 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, in ufshcd_complete_dev_init()
2547 dev_err(hba->dev, in ufshcd_complete_dev_init()
2551 dev_err(hba->dev, in ufshcd_complete_dev_init()
2571 static int ufshcd_make_hba_operational(struct ufs_hba *hba) in ufshcd_make_hba_operational() argument
2577 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); in ufshcd_make_hba_operational()
2580 if (ufshcd_is_intr_aggr_allowed(hba)) in ufshcd_make_hba_operational()
2581 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
2583 ufshcd_disable_intr_aggr(hba); in ufshcd_make_hba_operational()
2586 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
2588 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
2590 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
2592 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
2599 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); in ufshcd_make_hba_operational()
2601 ufshcd_enable_run_stop_reg(hba); in ufshcd_make_hba_operational()
2603 dev_err(hba->dev, in ufshcd_make_hba_operational()
2623 static int ufshcd_hba_enable(struct ufs_hba *hba) in ufshcd_hba_enable() argument
2633 if (!ufshcd_is_hba_active(hba)) { in ufshcd_hba_enable()
2636 ufshcd_hba_stop(hba); in ufshcd_hba_enable()
2647 ufshcd_set_link_off(hba); in ufshcd_hba_enable()
2649 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_enable()
2652 ufshcd_hba_start(hba); in ufshcd_hba_enable()
2668 while (ufshcd_is_hba_active(hba)) { in ufshcd_hba_enable()
2672 dev_err(hba->dev, in ufshcd_hba_enable()
2680 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_enable()
2682 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_enable()
2687 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) in ufshcd_disable_tx_lcc() argument
2692 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
2695 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
2699 err = ufshcd_dme_set(hba, in ufshcd_disable_tx_lcc()
2704 err = ufshcd_dme_peer_set(hba, in ufshcd_disable_tx_lcc()
2709 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", in ufshcd_disable_tx_lcc()
2718 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) in ufshcd_disable_device_tx_lcc() argument
2720 return ufshcd_disable_tx_lcc(hba, true); in ufshcd_disable_device_tx_lcc()
2729 static int ufshcd_link_startup(struct ufs_hba *hba) in ufshcd_link_startup() argument
2735 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); in ufshcd_link_startup()
2737 ret = ufshcd_dme_link_startup(hba); in ufshcd_link_startup()
2740 if (!ret && !ufshcd_is_device_present(hba)) { in ufshcd_link_startup()
2741 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
2751 if (ret && ufshcd_hba_enable(hba)) in ufshcd_link_startup()
2759 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { in ufshcd_link_startup()
2760 ret = ufshcd_disable_device_tx_lcc(hba); in ufshcd_link_startup()
2766 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); in ufshcd_link_startup()
2770 ret = ufshcd_make_hba_operational(hba); in ufshcd_link_startup()
2773 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
2787 static int ufshcd_verify_dev_init(struct ufs_hba *hba) in ufshcd_verify_dev_init() argument
2792 ufshcd_hold(hba, false); in ufshcd_verify_dev_init()
2793 mutex_lock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
2795 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, in ufshcd_verify_dev_init()
2801 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
2803 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
2804 ufshcd_release(hba); in ufshcd_verify_dev_init()
2807 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
2824 struct ufs_hba *hba; in ufshcd_set_queue_depth() local
2826 hba = shost_priv(sdev->host); in ufshcd_set_queue_depth()
2828 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
2829 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_set_queue_depth()
2840 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
2842 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); in ufshcd_set_queue_depth()
2844 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", in ufshcd_set_queue_depth()
2860 static int ufshcd_get_lu_wp(struct ufs_hba *hba, in ufshcd_get_lu_wp() argument
2876 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_get_lu_wp()
2891 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, in ufshcd_get_lu_power_on_wp_status() argument
2894 if (hba->dev_info.f_power_on_wp_en && in ufshcd_get_lu_power_on_wp_status()
2895 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_get_lu_power_on_wp_status()
2898 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_get_lu_power_on_wp_status()
2901 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_get_lu_power_on_wp_status()
2913 struct ufs_hba *hba; in ufshcd_slave_alloc() local
2915 hba = shost_priv(sdev->host); in ufshcd_slave_alloc()
2929 ufshcd_get_lu_power_on_wp_status(hba, sdev); in ufshcd_slave_alloc()
2943 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_change_queue_depth() local
2945 if (depth > hba->nutrs) in ufshcd_change_queue_depth()
2946 depth = hba->nutrs; in ufshcd_change_queue_depth()
2970 struct ufs_hba *hba; in ufshcd_slave_destroy() local
2972 hba = shost_priv(sdev->host); in ufshcd_slave_destroy()
2977 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
2978 hba->sdev_ufs_device = NULL; in ufshcd_slave_destroy()
2979 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
2991 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) in ufshcd_task_req_compl() argument
2999 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_task_req_compl()
3002 __clear_bit(index, &hba->outstanding_tasks); in ufshcd_task_req_compl()
3004 task_req_descp = hba->utmrdl_base_addr; in ufshcd_task_req_compl()
3015 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_task_req_compl()
3018 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_task_req_compl()
3065 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_transfer_rsp_status() argument
3094 schedule_work(&hba->eeh_work); in ufshcd_transfer_rsp_status()
3099 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
3104 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
3124 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
3137 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) in ufshcd_uic_cmd_compl() argument
3139 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { in ufshcd_uic_cmd_compl()
3140 hba->active_uic_cmd->argument2 |= in ufshcd_uic_cmd_compl()
3141 ufshcd_get_uic_cmd_result(hba); in ufshcd_uic_cmd_compl()
3142 hba->active_uic_cmd->argument3 = in ufshcd_uic_cmd_compl()
3143 ufshcd_get_dme_attr_val(hba); in ufshcd_uic_cmd_compl()
3144 complete(&hba->active_uic_cmd->done); in ufshcd_uic_cmd_compl()
3147 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) in ufshcd_uic_cmd_compl()
3148 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
3155 static void ufshcd_transfer_req_compl(struct ufs_hba *hba) in ufshcd_transfer_req_compl() argument
3171 if (ufshcd_is_intr_aggr_allowed(hba)) in ufshcd_transfer_req_compl()
3172 ufshcd_reset_intr_aggr(hba); in ufshcd_transfer_req_compl()
3174 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_transfer_req_compl()
3175 completed_reqs = tr_doorbell ^ hba->outstanding_reqs; in ufshcd_transfer_req_compl()
3177 for_each_set_bit(index, &completed_reqs, hba->nutrs) { in ufshcd_transfer_req_compl()
3178 lrbp = &hba->lrb[index]; in ufshcd_transfer_req_compl()
3181 result = ufshcd_transfer_rsp_status(hba, lrbp); in ufshcd_transfer_req_compl()
3186 clear_bit_unlock(index, &hba->lrb_in_use); in ufshcd_transfer_req_compl()
3189 __ufshcd_release(hba); in ufshcd_transfer_req_compl()
3191 if (hba->dev_cmd.complete) in ufshcd_transfer_req_compl()
3192 complete(hba->dev_cmd.complete); in ufshcd_transfer_req_compl()
3197 hba->outstanding_reqs ^= completed_reqs; in ufshcd_transfer_req_compl()
3199 ufshcd_clk_scaling_update_busy(hba); in ufshcd_transfer_req_compl()
3202 wake_up(&hba->dev_cmd.tag_wq); in ufshcd_transfer_req_compl()
3215 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_disable_ee() argument
3220 if (!(hba->ee_ctrl_mask & mask)) in ufshcd_disable_ee()
3223 val = hba->ee_ctrl_mask & ~mask; in ufshcd_disable_ee()
3225 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_disable_ee()
3228 hba->ee_ctrl_mask &= ~mask; in ufshcd_disable_ee()
3243 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_enable_ee() argument
3248 if (hba->ee_ctrl_mask & mask) in ufshcd_enable_ee()
3251 val = hba->ee_ctrl_mask | mask; in ufshcd_enable_ee()
3253 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_enable_ee()
3256 hba->ee_ctrl_mask |= mask; in ufshcd_enable_ee()
3272 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) in ufshcd_enable_auto_bkops() argument
3276 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
3279 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_enable_auto_bkops()
3282 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
3287 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
3290 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_enable_auto_bkops()
3292 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
3310 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) in ufshcd_disable_auto_bkops() argument
3314 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
3321 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
3323 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
3328 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_disable_auto_bkops()
3331 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
3333 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
3337 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
3350 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) in ufshcd_force_reset_auto_bkops() argument
3352 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
3353 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
3354 ufshcd_enable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
3357 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_bkops_status() argument
3359 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_bkops_status()
3379 static int ufshcd_bkops_ctrl(struct ufs_hba *hba, in ufshcd_bkops_ctrl() argument
3385 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_ctrl()
3387 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
3391 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
3398 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_ctrl()
3400 err = ufshcd_disable_auto_bkops(hba); in ufshcd_bkops_ctrl()
3415 static int ufshcd_urgent_bkops(struct ufs_hba *hba) in ufshcd_urgent_bkops() argument
3417 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); in ufshcd_urgent_bkops()
3420 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_ee_status() argument
3422 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ee_status()
3435 struct ufs_hba *hba; in ufshcd_exception_event_handler() local
3438 hba = container_of(work, struct ufs_hba, eeh_work); in ufshcd_exception_event_handler()
3440 pm_runtime_get_sync(hba->dev); in ufshcd_exception_event_handler()
3441 err = ufshcd_get_ee_status(hba, &status); in ufshcd_exception_event_handler()
3443 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
3448 status &= hba->ee_ctrl_mask; in ufshcd_exception_event_handler()
3450 err = ufshcd_urgent_bkops(hba); in ufshcd_exception_event_handler()
3452 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_exception_event_handler()
3456 pm_runtime_put_sync(hba->dev); in ufshcd_exception_event_handler()
3466 struct ufs_hba *hba; in ufshcd_err_handler() local
3473 hba = container_of(work, struct ufs_hba, eh_work); in ufshcd_err_handler()
3475 pm_runtime_get_sync(hba->dev); in ufshcd_err_handler()
3476 ufshcd_hold(hba, false); in ufshcd_err_handler()
3478 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
3479 if (hba->ufshcd_state == UFSHCD_STATE_RESET) { in ufshcd_err_handler()
3480 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
3484 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
3485 ufshcd_set_eh_in_progress(hba); in ufshcd_err_handler()
3488 ufshcd_transfer_req_compl(hba); in ufshcd_err_handler()
3489 ufshcd_tmc_handler(hba); in ufshcd_err_handler()
3490 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
3493 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) in ufshcd_err_handler()
3494 if (ufshcd_clear_cmd(hba, tag)) in ufshcd_err_handler()
3498 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) in ufshcd_err_handler()
3499 if (ufshcd_clear_tm_cmd(hba, tag)) in ufshcd_err_handler()
3503 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
3504 ufshcd_transfer_req_compl(hba); in ufshcd_err_handler()
3505 ufshcd_tmc_handler(hba); in ufshcd_err_handler()
3506 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
3509 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_err_handler()
3510 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
3511 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { in ufshcd_err_handler()
3512 err = ufshcd_reset_and_restore(hba); in ufshcd_err_handler()
3514 dev_err(hba->dev, "%s: reset and restore failed\n", in ufshcd_err_handler()
3516 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_err_handler()
3522 scsi_report_bus_reset(hba->host, 0); in ufshcd_err_handler()
3523 hba->saved_err = 0; in ufshcd_err_handler()
3524 hba->saved_uic_err = 0; in ufshcd_err_handler()
3526 ufshcd_clear_eh_in_progress(hba); in ufshcd_err_handler()
3529 scsi_unblock_requests(hba->host); in ufshcd_err_handler()
3530 ufshcd_release(hba); in ufshcd_err_handler()
3531 pm_runtime_put_sync(hba->dev); in ufshcd_err_handler()
3538 static void ufshcd_update_uic_error(struct ufs_hba *hba) in ufshcd_update_uic_error() argument
3543 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); in ufshcd_update_uic_error()
3545 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
3548 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); in ufshcd_update_uic_error()
3550 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
3552 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); in ufshcd_update_uic_error()
3554 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
3556 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); in ufshcd_update_uic_error()
3558 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
3560 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
3561 __func__, hba->uic_error); in ufshcd_update_uic_error()
3568 static void ufshcd_check_errors(struct ufs_hba *hba) in ufshcd_check_errors() argument
3572 if (hba->errors & INT_FATAL_ERRORS) in ufshcd_check_errors()
3575 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
3576 hba->uic_error = 0; in ufshcd_check_errors()
3577 ufshcd_update_uic_error(hba); in ufshcd_check_errors()
3578 if (hba->uic_error) in ufshcd_check_errors()
3584 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { in ufshcd_check_errors()
3586 scsi_block_requests(hba->host); in ufshcd_check_errors()
3589 hba->saved_err |= hba->errors; in ufshcd_check_errors()
3590 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
3592 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_check_errors()
3593 schedule_work(&hba->eh_work); in ufshcd_check_errors()
3608 static void ufshcd_tmc_handler(struct ufs_hba *hba) in ufshcd_tmc_handler() argument
3612 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_tmc_handler()
3613 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; in ufshcd_tmc_handler()
3614 wake_up(&hba->tm_wq); in ufshcd_tmc_handler()
3622 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) in ufshcd_sl_intr() argument
3624 hba->errors = UFSHCD_ERROR_MASK & intr_status; in ufshcd_sl_intr()
3625 if (hba->errors) in ufshcd_sl_intr()
3626 ufshcd_check_errors(hba); in ufshcd_sl_intr()
3629 ufshcd_uic_cmd_compl(hba, intr_status); in ufshcd_sl_intr()
3632 ufshcd_tmc_handler(hba); in ufshcd_sl_intr()
3635 ufshcd_transfer_req_compl(hba); in ufshcd_sl_intr()
3650 struct ufs_hba *hba = __hba; in ufshcd_intr() local
3652 spin_lock(hba->host->host_lock); in ufshcd_intr()
3653 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
3656 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); in ufshcd_intr()
3657 ufshcd_sl_intr(hba, intr_status); in ufshcd_intr()
3660 spin_unlock(hba->host->host_lock); in ufshcd_intr()
3664 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_tm_cmd() argument
3670 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
3673 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
3674 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_clear_tm_cmd()
3675 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
3678 err = ufshcd_wait_for_register(hba, in ufshcd_clear_tm_cmd()
3695 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, in ufshcd_issue_tm_cmd() argument
3706 host = hba->host; in ufshcd_issue_tm_cmd()
3713 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); in ufshcd_issue_tm_cmd()
3714 ufshcd_hold(hba, false); in ufshcd_issue_tm_cmd()
3717 task_req_descp = hba->utmrdl_base_addr; in ufshcd_issue_tm_cmd()
3728 task_tag = hba->nutrs + free_slot; in ufshcd_issue_tm_cmd()
3742 __set_bit(free_slot, &hba->outstanding_tasks); in ufshcd_issue_tm_cmd()
3743 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_issue_tm_cmd()
3748 err = wait_event_timeout(hba->tm_wq, in ufshcd_issue_tm_cmd()
3749 test_bit(free_slot, &hba->tm_condition), in ufshcd_issue_tm_cmd()
3752 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in ufshcd_issue_tm_cmd()
3754 if (ufshcd_clear_tm_cmd(hba, free_slot)) in ufshcd_issue_tm_cmd()
3755 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", in ufshcd_issue_tm_cmd()
3759 err = ufshcd_task_req_compl(hba, free_slot, tm_response); in ufshcd_issue_tm_cmd()
3762 clear_bit(free_slot, &hba->tm_condition); in ufshcd_issue_tm_cmd()
3763 ufshcd_put_tm_slot(hba, free_slot); in ufshcd_issue_tm_cmd()
3764 wake_up(&hba->tm_tag_wq); in ufshcd_issue_tm_cmd()
3766 ufshcd_release(hba); in ufshcd_issue_tm_cmd()
3780 struct ufs_hba *hba; in ufshcd_eh_device_reset_handler() local
3789 hba = shost_priv(host); in ufshcd_eh_device_reset_handler()
3792 lrbp = &hba->lrb[tag]; in ufshcd_eh_device_reset_handler()
3793 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); in ufshcd_eh_device_reset_handler()
3801 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_eh_device_reset_handler()
3802 if (hba->lrb[pos].lun == lrbp->lun) { in ufshcd_eh_device_reset_handler()
3803 err = ufshcd_clear_cmd(hba, pos); in ufshcd_eh_device_reset_handler()
3809 ufshcd_transfer_req_compl(hba); in ufshcd_eh_device_reset_handler()
3815 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
3836 struct ufs_hba *hba; in ufshcd_abort() local
3846 hba = shost_priv(host); in ufshcd_abort()
3849 ufshcd_hold(hba, false); in ufshcd_abort()
3851 if (!(test_bit(tag, &hba->outstanding_reqs))) in ufshcd_abort()
3854 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
3856 dev_err(hba->dev, in ufshcd_abort()
3861 lrbp = &hba->lrb[tag]; in ufshcd_abort()
3863 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_abort()
3873 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
3893 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_abort()
3901 err = ufshcd_clear_cmd(hba, tag); in ufshcd_abort()
3908 __clear_bit(tag, &hba->outstanding_reqs); in ufshcd_abort()
3909 hba->lrb[tag].cmd = NULL; in ufshcd_abort()
3912 clear_bit_unlock(tag, &hba->lrb_in_use); in ufshcd_abort()
3913 wake_up(&hba->dev_cmd.tag_wq); in ufshcd_abort()
3919 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_abort()
3927 ufshcd_release(hba); in ufshcd_abort()
3941 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) in ufshcd_host_reset_and_restore() argument
3947 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_host_reset_and_restore()
3948 ufshcd_hba_stop(hba); in ufshcd_host_reset_and_restore()
3949 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_host_reset_and_restore()
3951 err = ufshcd_hba_enable(hba); in ufshcd_host_reset_and_restore()
3956 err = ufshcd_probe_hba(hba); in ufshcd_host_reset_and_restore()
3958 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) in ufshcd_host_reset_and_restore()
3962 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
3976 static int ufshcd_reset_and_restore(struct ufs_hba *hba) in ufshcd_reset_and_restore() argument
3983 err = ufshcd_host_reset_and_restore(hba); in ufshcd_reset_and_restore()
3990 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
3991 ufshcd_transfer_req_compl(hba); in ufshcd_reset_and_restore()
3992 ufshcd_tmc_handler(hba); in ufshcd_reset_and_restore()
3993 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
4008 struct ufs_hba *hba; in ufshcd_eh_host_reset_handler() local
4010 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
4012 ufshcd_hold(hba, false); in ufshcd_eh_host_reset_handler()
4020 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
4021 if (!(work_pending(&hba->eh_work) || in ufshcd_eh_host_reset_handler()
4022 hba->ufshcd_state == UFSHCD_STATE_RESET)) in ufshcd_eh_host_reset_handler()
4024 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
4025 dev_dbg(hba->dev, "%s: reset in progress\n", __func__); in ufshcd_eh_host_reset_handler()
4026 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
4029 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_eh_host_reset_handler()
4030 ufshcd_set_eh_in_progress(hba); in ufshcd_eh_host_reset_handler()
4031 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
4033 err = ufshcd_reset_and_restore(hba); in ufshcd_eh_host_reset_handler()
4035 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
4038 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_eh_host_reset_handler()
4041 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_eh_host_reset_handler()
4043 ufshcd_clear_eh_in_progress(hba); in ufshcd_eh_host_reset_handler()
4044 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
4046 ufshcd_release(hba); in ufshcd_eh_host_reset_handler()
4104 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, in ufshcd_find_max_sup_active_icc_level() argument
4109 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
4110 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
4111 dev_err(hba->dev, in ufshcd_find_max_sup_active_icc_level()
4117 if (hba->vreg_info.vcc) in ufshcd_find_max_sup_active_icc_level()
4119 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
4123 if (hba->vreg_info.vccq) in ufshcd_find_max_sup_active_icc_level()
4125 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
4129 if (hba->vreg_info.vccq2) in ufshcd_find_max_sup_active_icc_level()
4131 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
4138 static void ufshcd_init_icc_levels(struct ufs_hba *hba) in ufshcd_init_icc_levels() argument
4144 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); in ufshcd_init_icc_levels()
4146 dev_err(hba->dev, in ufshcd_init_icc_levels()
4152 hba->init_prefetch_data.icc_level = in ufshcd_init_icc_levels()
4153 ufshcd_find_max_sup_active_icc_level(hba, in ufshcd_init_icc_levels()
4155 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", in ufshcd_init_icc_levels()
4156 __func__, hba->init_prefetch_data.icc_level); in ufshcd_init_icc_levels()
4158 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_init_icc_levels()
4160 &hba->init_prefetch_data.icc_level); in ufshcd_init_icc_levels()
4163 dev_err(hba->dev, in ufshcd_init_icc_levels()
4165 __func__, hba->init_prefetch_data.icc_level , ret); in ufshcd_init_icc_levels()
4195 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) in ufshcd_scsi_add_wlus() argument
4201 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
4203 if (IS_ERR(hba->sdev_ufs_device)) { in ufshcd_scsi_add_wlus()
4204 ret = PTR_ERR(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
4205 hba->sdev_ufs_device = NULL; in ufshcd_scsi_add_wlus()
4208 scsi_device_put(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
4210 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
4218 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
4230 scsi_remove_device(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
4241 static int ufshcd_probe_hba(struct ufs_hba *hba) in ufshcd_probe_hba() argument
4245 ret = ufshcd_link_startup(hba); in ufshcd_probe_hba()
4249 ufshcd_init_pwr_info(hba); in ufshcd_probe_hba()
4252 ufshcd_set_link_active(hba); in ufshcd_probe_hba()
4254 ret = ufshcd_verify_dev_init(hba); in ufshcd_probe_hba()
4258 ret = ufshcd_complete_dev_init(hba); in ufshcd_probe_hba()
4263 ufshcd_set_ufs_dev_active(hba); in ufshcd_probe_hba()
4264 ufshcd_force_reset_auto_bkops(hba); in ufshcd_probe_hba()
4265 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_probe_hba()
4266 hba->wlun_dev_clr_ua = true; in ufshcd_probe_hba()
4268 if (ufshcd_get_max_pwr_mode(hba)) { in ufshcd_probe_hba()
4269 dev_err(hba->dev, in ufshcd_probe_hba()
4273 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_probe_hba()
4275 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_probe_hba()
4283 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { in ufshcd_probe_hba()
4287 memset(&hba->dev_info, 0, sizeof(hba->dev_info)); in ufshcd_probe_hba()
4288 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_probe_hba()
4290 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_probe_hba()
4292 if (!hba->is_init_prefetch) in ufshcd_probe_hba()
4293 ufshcd_init_icc_levels(hba); in ufshcd_probe_hba()
4296 if (ufshcd_scsi_add_wlus(hba)) in ufshcd_probe_hba()
4299 scsi_scan_host(hba->host); in ufshcd_probe_hba()
4300 pm_runtime_put_sync(hba->dev); in ufshcd_probe_hba()
4303 if (!hba->is_init_prefetch) in ufshcd_probe_hba()
4304 hba->is_init_prefetch = true; in ufshcd_probe_hba()
4307 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_probe_hba()
4308 devfreq_resume_device(hba->devfreq); in ufshcd_probe_hba()
4315 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { in ufshcd_probe_hba()
4316 pm_runtime_put_sync(hba->dev); in ufshcd_probe_hba()
4317 ufshcd_hba_exit(hba); in ufshcd_probe_hba()
4330 struct ufs_hba *hba = (struct ufs_hba *)data; in ufshcd_async_scan() local
4332 ufshcd_probe_hba(hba); in ufshcd_async_scan()
4372 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, in ufshcd_config_vreg_lpm() argument
4375 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
4378 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, in ufshcd_config_vreg_hpm() argument
4381 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
4453 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_vreg() argument
4456 struct device *dev = hba->dev; in ufshcd_setup_vreg()
4457 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
4483 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_hba_vreg() argument
4485 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
4488 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
4510 static int ufshcd_init_vreg(struct ufs_hba *hba) in ufshcd_init_vreg() argument
4513 struct device *dev = hba->dev; in ufshcd_init_vreg()
4514 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
4532 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) in ufshcd_init_hba_vreg() argument
4534 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
4537 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
4542 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, in __ufshcd_setup_clocks() argument
4547 struct list_head *head = &hba->clk_list_head; in __ufshcd_setup_clocks()
4561 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in __ufshcd_setup_clocks()
4569 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in __ufshcd_setup_clocks()
4574 ret = ufshcd_vops_setup_clocks(hba, on); in __ufshcd_setup_clocks()
4582 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_setup_clocks()
4583 hba->clk_gating.state = CLKS_ON; in __ufshcd_setup_clocks()
4584 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_setup_clocks()
4589 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) in ufshcd_setup_clocks() argument
4591 return __ufshcd_setup_clocks(hba, on, false); in ufshcd_setup_clocks()
4594 static int ufshcd_init_clocks(struct ufs_hba *hba) in ufshcd_init_clocks() argument
4598 struct device *dev = hba->dev; in ufshcd_init_clocks()
4599 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
4619 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
4633 static int ufshcd_variant_hba_init(struct ufs_hba *hba) in ufshcd_variant_hba_init() argument
4637 if (!hba->vops) in ufshcd_variant_hba_init()
4640 err = ufshcd_vops_init(hba); in ufshcd_variant_hba_init()
4644 err = ufshcd_vops_setup_regulators(hba, true); in ufshcd_variant_hba_init()
4651 ufshcd_vops_exit(hba); in ufshcd_variant_hba_init()
4654 dev_err(hba->dev, "%s: variant %s init failed err %d\n", in ufshcd_variant_hba_init()
4655 __func__, ufshcd_get_var_name(hba), err); in ufshcd_variant_hba_init()
4659 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) in ufshcd_variant_hba_exit() argument
4661 if (!hba->vops) in ufshcd_variant_hba_exit()
4664 ufshcd_vops_setup_clocks(hba, false); in ufshcd_variant_hba_exit()
4666 ufshcd_vops_setup_regulators(hba, false); in ufshcd_variant_hba_exit()
4668 ufshcd_vops_exit(hba); in ufshcd_variant_hba_exit()
4671 static int ufshcd_hba_init(struct ufs_hba *hba) in ufshcd_hba_init() argument
4682 err = ufshcd_init_hba_vreg(hba); in ufshcd_hba_init()
4686 err = ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_init()
4690 err = ufshcd_init_clocks(hba); in ufshcd_hba_init()
4694 err = ufshcd_setup_clocks(hba, true); in ufshcd_hba_init()
4698 err = ufshcd_init_vreg(hba); in ufshcd_hba_init()
4702 err = ufshcd_setup_vreg(hba, true); in ufshcd_hba_init()
4706 err = ufshcd_variant_hba_init(hba); in ufshcd_hba_init()
4710 hba->is_powered = true; in ufshcd_hba_init()
4714 ufshcd_setup_vreg(hba, false); in ufshcd_hba_init()
4716 ufshcd_setup_clocks(hba, false); in ufshcd_hba_init()
4718 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_init()
4723 static void ufshcd_hba_exit(struct ufs_hba *hba) in ufshcd_hba_exit() argument
4725 if (hba->is_powered) { in ufshcd_hba_exit()
4726 ufshcd_variant_hba_exit(hba); in ufshcd_hba_exit()
4727 ufshcd_setup_vreg(hba, false); in ufshcd_hba_exit()
4728 ufshcd_setup_clocks(hba, false); in ufshcd_hba_exit()
4729 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_exit()
4730 hba->is_powered = false; in ufshcd_hba_exit()
4735 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) in ufshcd_send_request_sense() argument
4772 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, in ufshcd_set_dev_pwr_mode() argument
4781 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
4782 sdp = hba->sdev_ufs_device; in ufshcd_set_dev_pwr_mode()
4792 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
4803 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
4804 if (hba->wlun_dev_clr_ua) { in ufshcd_set_dev_pwr_mode()
4805 ret = ufshcd_send_request_sense(hba, sdp); in ufshcd_set_dev_pwr_mode()
4809 hba->wlun_dev_clr_ua = false; in ufshcd_set_dev_pwr_mode()
4830 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
4833 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
4837 static int ufshcd_link_state_transition(struct ufs_hba *hba, in ufshcd_link_state_transition() argument
4843 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
4847 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
4849 ufshcd_set_link_hibern8(hba); in ufshcd_link_state_transition()
4859 !hba->auto_bkops_enabled))) { in ufshcd_link_state_transition()
4864 ufshcd_hba_stop(hba); in ufshcd_link_state_transition()
4869 ufshcd_set_link_off(hba); in ufshcd_link_state_transition()
4876 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_vreg_set_lpm() argument
4890 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_lpm()
4891 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
4892 ufshcd_setup_vreg(hba, false); in ufshcd_vreg_set_lpm()
4893 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_lpm()
4894 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
4895 if (!ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_lpm()
4896 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
4897 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
4902 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_vreg_set_hpm() argument
4906 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_hpm()
4907 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
4908 ret = ufshcd_setup_vreg(hba, true); in ufshcd_vreg_set_hpm()
4909 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_hpm()
4910 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
4911 if (!ret && !ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_hpm()
4912 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
4915 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
4923 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
4925 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
4930 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_lpm() argument
4932 if (ufshcd_is_link_off(hba)) in ufshcd_hba_vreg_set_lpm()
4933 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_vreg_set_lpm()
4936 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_hpm() argument
4938 if (ufshcd_is_link_off(hba)) in ufshcd_hba_vreg_set_hpm()
4939 ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_vreg_set_hpm()
4958 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufshcd_suspend() argument
4965 hba->pm_op_in_progress = 1; in ufshcd_suspend()
4968 hba->rpm_lvl : hba->spm_lvl; in ufshcd_suspend()
4980 ufshcd_hold(hba, false); in ufshcd_suspend()
4981 hba->clk_gating.is_suspended = true; in ufshcd_suspend()
4988 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in ufshcd_suspend()
4989 (req_link_state == hba->uic_link_state)) in ufshcd_suspend()
4993 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { in ufshcd_suspend()
4999 if (ufshcd_can_autobkops_during_suspend(hba)) { in ufshcd_suspend()
5005 ret = ufshcd_urgent_bkops(hba); in ufshcd_suspend()
5010 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
5014 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && in ufshcd_suspend()
5015 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || in ufshcd_suspend()
5018 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
5019 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); in ufshcd_suspend()
5024 ret = ufshcd_link_state_transition(hba, req_link_state, 1); in ufshcd_suspend()
5028 ufshcd_vreg_set_lpm(hba); in ufshcd_suspend()
5036 if (ufshcd_is_clkscaling_enabled(hba)) { in ufshcd_suspend()
5037 devfreq_suspend_device(hba->devfreq); in ufshcd_suspend()
5038 hba->clk_scaling.window_start_t = 0; in ufshcd_suspend()
5045 ret = ufshcd_vops_suspend(hba, pm_op); in ufshcd_suspend()
5049 ret = ufshcd_vops_setup_clocks(hba, false); in ufshcd_suspend()
5053 if (!ufshcd_is_link_active(hba)) in ufshcd_suspend()
5054 ufshcd_setup_clocks(hba, false); in ufshcd_suspend()
5057 __ufshcd_setup_clocks(hba, false, true); in ufshcd_suspend()
5059 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
5064 ufshcd_disable_irq(hba); in ufshcd_suspend()
5066 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_suspend()
5070 ufshcd_vops_resume(hba, pm_op); in ufshcd_suspend()
5072 ufshcd_vreg_set_hpm(hba); in ufshcd_suspend()
5073 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) in ufshcd_suspend()
5074 ufshcd_set_link_active(hba); in ufshcd_suspend()
5075 else if (ufshcd_is_link_off(hba)) in ufshcd_suspend()
5076 ufshcd_host_reset_and_restore(hba); in ufshcd_suspend()
5078 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) in ufshcd_suspend()
5079 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
5081 hba->clk_gating.is_suspended = false; in ufshcd_suspend()
5082 ufshcd_release(hba); in ufshcd_suspend()
5084 hba->pm_op_in_progress = 0; in ufshcd_suspend()
5098 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufshcd_resume() argument
5103 hba->pm_op_in_progress = 1; in ufshcd_resume()
5104 old_link_state = hba->uic_link_state; in ufshcd_resume()
5106 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_resume()
5108 ret = ufshcd_setup_clocks(hba, true); in ufshcd_resume()
5113 ret = ufshcd_enable_irq(hba); in ufshcd_resume()
5117 ret = ufshcd_vreg_set_hpm(hba); in ufshcd_resume()
5126 ret = ufshcd_vops_resume(hba, pm_op); in ufshcd_resume()
5130 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_resume()
5131 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_resume()
5133 ufshcd_set_link_active(hba); in ufshcd_resume()
5136 } else if (ufshcd_is_link_off(hba)) { in ufshcd_resume()
5137 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_resume()
5142 if (ret || !ufshcd_is_link_active(hba)) in ufshcd_resume()
5146 if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_resume()
5147 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); in ufshcd_resume()
5156 ufshcd_urgent_bkops(hba); in ufshcd_resume()
5157 hba->clk_gating.is_suspended = false; in ufshcd_resume()
5159 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_resume()
5160 devfreq_resume_device(hba->devfreq); in ufshcd_resume()
5163 ufshcd_release(hba); in ufshcd_resume()
5167 ufshcd_link_state_transition(hba, old_link_state, 0); in ufshcd_resume()
5169 ufshcd_vops_suspend(hba, pm_op); in ufshcd_resume()
5171 ufshcd_vreg_set_lpm(hba); in ufshcd_resume()
5173 ufshcd_disable_irq(hba); in ufshcd_resume()
5174 ufshcd_setup_clocks(hba, false); in ufshcd_resume()
5176 hba->pm_op_in_progress = 0; in ufshcd_resume()
5189 int ufshcd_system_suspend(struct ufs_hba *hba) in ufshcd_system_suspend() argument
5193 if (!hba || !hba->is_powered) in ufshcd_system_suspend()
5196 if (pm_runtime_suspended(hba->dev)) { in ufshcd_system_suspend()
5197 if (hba->rpm_lvl == hba->spm_lvl) in ufshcd_system_suspend()
5202 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == in ufshcd_system_suspend()
5203 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) in ufshcd_system_suspend()
5214 ret = ufshcd_runtime_resume(hba); in ufshcd_system_suspend()
5219 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); in ufshcd_system_suspend()
5222 hba->is_sys_suspended = true; in ufshcd_system_suspend()
5234 int ufshcd_system_resume(struct ufs_hba *hba) in ufshcd_system_resume() argument
5236 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
5243 return ufshcd_resume(hba, UFS_SYSTEM_PM); in ufshcd_system_resume()
5255 int ufshcd_runtime_suspend(struct ufs_hba *hba) in ufshcd_runtime_suspend() argument
5257 if (!hba || !hba->is_powered) in ufshcd_runtime_suspend()
5260 return ufshcd_suspend(hba, UFS_RUNTIME_PM); in ufshcd_runtime_suspend()
5285 int ufshcd_runtime_resume(struct ufs_hba *hba) in ufshcd_runtime_resume() argument
5287 if (!hba || !hba->is_powered) in ufshcd_runtime_resume()
5290 return ufshcd_resume(hba, UFS_RUNTIME_PM); in ufshcd_runtime_resume()
5294 int ufshcd_runtime_idle(struct ufs_hba *hba) in ufshcd_runtime_idle() argument
5308 int ufshcd_shutdown(struct ufs_hba *hba) in ufshcd_shutdown() argument
5312 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) in ufshcd_shutdown()
5315 if (pm_runtime_suspended(hba->dev)) { in ufshcd_shutdown()
5316 ret = ufshcd_runtime_resume(hba); in ufshcd_shutdown()
5321 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_shutdown()
5324 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); in ufshcd_shutdown()
5335 void ufshcd_remove(struct ufs_hba *hba) in ufshcd_remove() argument
5337 scsi_remove_host(hba->host); in ufshcd_remove()
5339 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
5340 ufshcd_hba_stop(hba); in ufshcd_remove()
5342 scsi_host_put(hba->host); in ufshcd_remove()
5344 ufshcd_exit_clk_gating(hba); in ufshcd_remove()
5345 if (ufshcd_is_clkscaling_enabled(hba)) in ufshcd_remove()
5346 devfreq_remove_device(hba->devfreq); in ufshcd_remove()
5347 ufshcd_hba_exit(hba); in ufshcd_remove()
5355 void ufshcd_dealloc_host(struct ufs_hba *hba) in ufshcd_dealloc_host() argument
5357 scsi_host_put(hba->host); in ufshcd_dealloc_host()
5368 static int ufshcd_set_dma_mask(struct ufs_hba *hba) in ufshcd_set_dma_mask() argument
5370 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
5371 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
5374 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
5386 struct ufs_hba *hba; in ufshcd_alloc_host() local
5403 hba = shost_priv(host); in ufshcd_alloc_host()
5404 hba->host = host; in ufshcd_alloc_host()
5405 hba->dev = dev; in ufshcd_alloc_host()
5406 *hba_handle = hba; in ufshcd_alloc_host()
5413 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_clks() argument
5417 struct list_head *head = &hba->clk_list_head; in ufshcd_scale_clks()
5422 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); in ufshcd_scale_clks()
5433 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_scale_clks()
5445 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_scale_clks()
5453 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_scale_clks()
5457 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); in ufshcd_scale_clks()
5467 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_target() local
5469 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_devfreq_target()
5473 err = ufshcd_scale_clks(hba, true); in ufshcd_devfreq_target()
5475 err = ufshcd_scale_clks(hba, false); in ufshcd_devfreq_target()
5483 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_get_dev_status() local
5484 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
5487 if (!ufshcd_is_clkscaling_enabled(hba)) in ufshcd_devfreq_get_dev_status()
5492 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
5507 if (hba->outstanding_reqs) { in ufshcd_devfreq_get_dev_status()
5514 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
5531 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) in ufshcd_init() argument
5534 struct Scsi_Host *host = hba->host; in ufshcd_init()
5535 struct device *dev = hba->dev; in ufshcd_init()
5538 dev_err(hba->dev, in ufshcd_init()
5544 hba->mmio_base = mmio_base; in ufshcd_init()
5545 hba->irq = irq; in ufshcd_init()
5547 err = ufshcd_hba_init(hba); in ufshcd_init()
5552 ufshcd_hba_capabilities(hba); in ufshcd_init()
5555 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
5558 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
5560 err = ufshcd_set_dma_mask(hba); in ufshcd_init()
5562 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
5567 err = ufshcd_memory_alloc(hba); in ufshcd_init()
5569 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
5574 ufshcd_host_memory_configure(hba); in ufshcd_init()
5576 host->can_queue = hba->nutrs; in ufshcd_init()
5577 host->cmd_per_lun = hba->nutrs; in ufshcd_init()
5584 hba->max_pwr_info.is_valid = false; in ufshcd_init()
5587 init_waitqueue_head(&hba->tm_wq); in ufshcd_init()
5588 init_waitqueue_head(&hba->tm_tag_wq); in ufshcd_init()
5591 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
5592 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
5595 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
5598 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
5601 init_waitqueue_head(&hba->dev_cmd.tag_wq); in ufshcd_init()
5603 ufshcd_init_clk_gating(hba); in ufshcd_init()
5605 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); in ufshcd_init()
5607 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
5610 hba->is_irq_enabled = true; in ufshcd_init()
5613 err = scsi_add_host(host, hba->dev); in ufshcd_init()
5615 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_init()
5620 err = ufshcd_hba_enable(hba); in ufshcd_init()
5622 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
5626 if (ufshcd_is_clkscaling_enabled(hba)) { in ufshcd_init()
5627 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, in ufshcd_init()
5629 if (IS_ERR(hba->devfreq)) { in ufshcd_init()
5630 dev_err(hba->dev, "Unable to register with devfreq %ld\n", in ufshcd_init()
5631 PTR_ERR(hba->devfreq)); in ufshcd_init()
5635 devfreq_suspend_device(hba->devfreq); in ufshcd_init()
5636 hba->clk_scaling.window_start_t = 0; in ufshcd_init()
5646 ufshcd_set_ufs_dev_poweroff(hba); in ufshcd_init()
5648 async_schedule(ufshcd_async_scan, hba); in ufshcd_init()
5653 scsi_remove_host(hba->host); in ufshcd_init()
5655 ufshcd_exit_clk_gating(hba); in ufshcd_init()
5657 hba->is_irq_enabled = false; in ufshcd_init()
5659 ufshcd_hba_exit(hba); in ufshcd_init()