Lines Matching refs:hi
52 struct cs_hsi_iface *hi; member
136 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
137 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
224 struct cs_hsi_iface *hi = msg->context; in cs_release_cmd() local
226 list_add_tail(&msg->link, &hi->cmdqueue); in cs_release_cmd()
231 struct cs_hsi_iface *hi = msg->context; in cs_cmd_destructor() local
233 spin_lock(&hi->lock); in cs_cmd_destructor()
237 if (hi->iface_state != CS_STATE_CLOSED) in cs_cmd_destructor()
238 dev_err(&hi->cl->device, "Cmd flushed while driver active\n"); in cs_cmd_destructor()
241 hi->control_state &= in cs_cmd_destructor()
244 hi->control_state & SSI_CHANNEL_STATE_WRITING) in cs_cmd_destructor()
245 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_cmd_destructor()
249 spin_unlock(&hi->lock); in cs_cmd_destructor()
277 static int cs_alloc_cmds(struct cs_hsi_iface *hi) in cs_alloc_cmds() argument
283 INIT_LIST_HEAD(&hi->cmdqueue); in cs_alloc_cmds()
296 msg->context = hi; in cs_alloc_cmds()
297 list_add_tail(&msg->link, &hi->cmdqueue); in cs_alloc_cmds()
303 cs_free_cmds(hi); in cs_alloc_cmds()
309 struct cs_hsi_iface *hi = msg->context; in cs_hsi_data_destructor() local
314 spin_lock(&hi->lock); in cs_hsi_data_destructor()
315 if (hi->iface_state != CS_STATE_CLOSED) in cs_hsi_data_destructor()
319 hi->data_state &= in cs_hsi_data_destructor()
322 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_data_destructor()
325 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_data_destructor()
326 wake_up_interruptible(&hi->datawait); in cs_hsi_data_destructor()
328 spin_unlock(&hi->lock); in cs_hsi_data_destructor()
331 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi) in cs_hsi_alloc_data() argument
343 rxmsg->context = hi; in cs_hsi_alloc_data()
352 txmsg->context = hi; in cs_hsi_alloc_data()
354 hi->data_rx_msg = rxmsg; in cs_hsi_alloc_data()
355 hi->data_tx_msg = txmsg; in cs_hsi_alloc_data()
372 static void cs_hsi_free_data(struct cs_hsi_iface *hi) in cs_hsi_free_data() argument
374 cs_hsi_free_data_msg(hi->data_rx_msg); in cs_hsi_free_data()
375 cs_hsi_free_data_msg(hi->data_tx_msg); in cs_hsi_free_data()
378 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi, in __cs_hsi_error_pre() argument
382 spin_lock(&hi->lock); in __cs_hsi_error_pre()
383 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", in __cs_hsi_error_pre()
387 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi) in __cs_hsi_error_post() argument
389 spin_unlock(&hi->lock); in __cs_hsi_error_post()
404 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi, in cs_hsi_control_read_error() argument
407 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); in cs_hsi_control_read_error()
409 __cs_hsi_error_read_bits(&hi->control_state); in cs_hsi_control_read_error()
410 __cs_hsi_error_post(hi); in cs_hsi_control_read_error()
413 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi, in cs_hsi_control_write_error() argument
416 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); in cs_hsi_control_write_error()
418 __cs_hsi_error_write_bits(&hi->control_state); in cs_hsi_control_write_error()
419 __cs_hsi_error_post(hi); in cs_hsi_control_write_error()
423 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) in cs_hsi_data_read_error() argument
425 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); in cs_hsi_data_read_error()
426 __cs_hsi_error_read_bits(&hi->data_state); in cs_hsi_data_read_error()
427 __cs_hsi_error_post(hi); in cs_hsi_data_read_error()
430 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi, in cs_hsi_data_write_error() argument
433 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); in cs_hsi_data_write_error()
434 __cs_hsi_error_write_bits(&hi->data_state); in cs_hsi_data_write_error()
435 __cs_hsi_error_post(hi); in cs_hsi_data_write_error()
441 struct cs_hsi_iface *hi = msg->context; in cs_hsi_read_on_control_complete() local
443 spin_lock(&hi->lock); in cs_hsi_read_on_control_complete()
444 hi->control_state &= ~SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_control_complete()
446 dev_err(&hi->cl->device, "Control RX error detected\n"); in cs_hsi_read_on_control_complete()
447 cs_hsi_control_read_error(hi, msg); in cs_hsi_read_on_control_complete()
448 spin_unlock(&hi->lock); in cs_hsi_read_on_control_complete()
451 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); in cs_hsi_read_on_control_complete()
453 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) { in cs_hsi_read_on_control_complete()
456 &hi->mmap_cfg->tstamp_rx_ctrl; in cs_hsi_read_on_control_complete()
463 spin_unlock(&hi->lock); in cs_hsi_read_on_control_complete()
468 cs_hsi_read_on_control(hi); in cs_hsi_read_on_control_complete()
473 struct cs_hsi_iface *hi = msg->context; in cs_hsi_peek_on_control_complete() local
477 dev_err(&hi->cl->device, "Control peek RX error detected\n"); in cs_hsi_peek_on_control_complete()
478 cs_hsi_control_read_error(hi, msg); in cs_hsi_peek_on_control_complete()
482 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING)); in cs_hsi_peek_on_control_complete()
484 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n"); in cs_hsi_peek_on_control_complete()
487 ret = hsi_async_read(hi->cl, msg); in cs_hsi_peek_on_control_complete()
489 cs_hsi_control_read_error(hi, msg); in cs_hsi_peek_on_control_complete()
492 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi) in cs_hsi_read_on_control() argument
497 spin_lock(&hi->lock); in cs_hsi_read_on_control()
498 if (hi->control_state & SSI_CHANNEL_STATE_READING) { in cs_hsi_read_on_control()
499 dev_err(&hi->cl->device, "Control read already pending (%d)\n", in cs_hsi_read_on_control()
500 hi->control_state); in cs_hsi_read_on_control()
501 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
504 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_read_on_control()
505 dev_err(&hi->cl->device, "Control read error (%d)\n", in cs_hsi_read_on_control()
506 hi->control_state); in cs_hsi_read_on_control()
507 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
510 hi->control_state |= SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_control()
511 dev_dbg(&hi->cl->device, "Issuing RX on control\n"); in cs_hsi_read_on_control()
512 msg = cs_claim_cmd(hi); in cs_hsi_read_on_control()
513 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
517 ret = hsi_async_read(hi->cl, msg); in cs_hsi_read_on_control()
519 cs_hsi_control_read_error(hi, msg); in cs_hsi_read_on_control()
524 struct cs_hsi_iface *hi = msg->context; in cs_hsi_write_on_control_complete() local
526 spin_lock(&hi->lock); in cs_hsi_write_on_control_complete()
527 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_control_complete()
529 spin_unlock(&hi->lock); in cs_hsi_write_on_control_complete()
531 cs_hsi_control_write_error(hi, msg); in cs_hsi_write_on_control_complete()
533 dev_err(&hi->cl->device, in cs_hsi_write_on_control_complete()
539 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message) in cs_hsi_write_on_control() argument
544 spin_lock(&hi->lock); in cs_hsi_write_on_control()
545 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_write_on_control()
546 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
549 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) { in cs_hsi_write_on_control()
550 dev_err(&hi->cl->device, in cs_hsi_write_on_control()
552 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
555 hi->control_state |= SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_control()
556 msg = cs_claim_cmd(hi); in cs_hsi_write_on_control()
557 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
562 dev_dbg(&hi->cl->device, in cs_hsi_write_on_control()
564 ret = hsi_async_write(hi->cl, msg); in cs_hsi_write_on_control()
566 dev_err(&hi->cl->device, in cs_hsi_write_on_control()
568 cs_hsi_control_write_error(hi, msg); in cs_hsi_write_on_control()
578 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) { in cs_hsi_write_on_control()
579 dev_err(&hi->cl->device, "Restarting control reads\n"); in cs_hsi_write_on_control()
580 cs_hsi_read_on_control(hi); in cs_hsi_write_on_control()
588 struct cs_hsi_iface *hi = msg->context; in cs_hsi_read_on_data_complete() local
592 cs_hsi_data_read_error(hi, msg); in cs_hsi_read_on_data_complete()
596 spin_lock(&hi->lock); in cs_hsi_read_on_data_complete()
597 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING)); in cs_hsi_read_on_data_complete()
598 hi->data_state &= ~SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_data_complete()
600 payload |= hi->rx_slot; in cs_hsi_read_on_data_complete()
601 hi->rx_slot++; in cs_hsi_read_on_data_complete()
602 hi->rx_slot %= hi->rx_ptr_boundary; in cs_hsi_read_on_data_complete()
604 hi->mmap_cfg->rx_ptr = hi->rx_slot; in cs_hsi_read_on_data_complete()
605 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_read_on_data_complete()
606 wake_up_interruptible(&hi->datawait); in cs_hsi_read_on_data_complete()
607 spin_unlock(&hi->lock); in cs_hsi_read_on_data_complete()
609 cs_notify_data(payload, hi->rx_bufs); in cs_hsi_read_on_data_complete()
610 cs_hsi_read_on_data(hi); in cs_hsi_read_on_data_complete()
615 struct cs_hsi_iface *hi = msg->context; in cs_hsi_peek_on_data_complete() local
620 cs_hsi_data_read_error(hi, msg); in cs_hsi_peek_on_data_complete()
623 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) { in cs_hsi_peek_on_data_complete()
624 dev_err(&hi->cl->device, "Data received in invalid state\n"); in cs_hsi_peek_on_data_complete()
625 cs_hsi_data_read_error(hi, msg); in cs_hsi_peek_on_data_complete()
629 spin_lock(&hi->lock); in cs_hsi_peek_on_data_complete()
630 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL)); in cs_hsi_peek_on_data_complete()
631 hi->data_state &= ~SSI_CHANNEL_STATE_POLL; in cs_hsi_peek_on_data_complete()
632 hi->data_state |= SSI_CHANNEL_STATE_READING; in cs_hsi_peek_on_data_complete()
633 spin_unlock(&hi->lock); in cs_hsi_peek_on_data_complete()
635 address = (u32 *)(hi->mmap_base + in cs_hsi_peek_on_data_complete()
636 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]); in cs_hsi_peek_on_data_complete()
637 sg_init_one(msg->sgt.sgl, address, hi->buf_size); in cs_hsi_peek_on_data_complete()
640 ret = hsi_async_read(hi->cl, msg); in cs_hsi_peek_on_data_complete()
642 cs_hsi_data_read_error(hi, msg); in cs_hsi_peek_on_data_complete()
663 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi) in cs_hsi_read_on_data() argument
668 spin_lock(&hi->lock); in cs_hsi_read_on_data()
669 if (hi->data_state & in cs_hsi_read_on_data()
671 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n", in cs_hsi_read_on_data()
672 hi->data_state); in cs_hsi_read_on_data()
673 spin_unlock(&hi->lock); in cs_hsi_read_on_data()
676 hi->data_state |= SSI_CHANNEL_STATE_POLL; in cs_hsi_read_on_data()
677 spin_unlock(&hi->lock); in cs_hsi_read_on_data()
679 rxmsg = hi->data_rx_msg; in cs_hsi_read_on_data()
680 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0); in cs_hsi_read_on_data()
684 ret = hsi_async_read(hi->cl, rxmsg); in cs_hsi_read_on_data()
686 cs_hsi_data_read_error(hi, rxmsg); in cs_hsi_read_on_data()
691 struct cs_hsi_iface *hi = msg->context; in cs_hsi_write_on_data_complete() local
694 spin_lock(&hi->lock); in cs_hsi_write_on_data_complete()
695 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_data_complete()
696 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_write_on_data_complete()
697 wake_up_interruptible(&hi->datawait); in cs_hsi_write_on_data_complete()
698 spin_unlock(&hi->lock); in cs_hsi_write_on_data_complete()
700 cs_hsi_data_write_error(hi, msg); in cs_hsi_write_on_data_complete()
704 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot) in cs_hsi_write_on_data() argument
710 spin_lock(&hi->lock); in cs_hsi_write_on_data()
711 if (hi->iface_state != CS_STATE_CONFIGURED) { in cs_hsi_write_on_data()
712 dev_err(&hi->cl->device, "Not configured, aborting\n"); in cs_hsi_write_on_data()
716 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_write_on_data()
717 dev_err(&hi->cl->device, "HSI error, aborting\n"); in cs_hsi_write_on_data()
721 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) { in cs_hsi_write_on_data()
722 dev_err(&hi->cl->device, "Write pending on data channel.\n"); in cs_hsi_write_on_data()
726 hi->data_state |= SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_data()
727 spin_unlock(&hi->lock); in cs_hsi_write_on_data()
729 hi->tx_slot = slot; in cs_hsi_write_on_data()
730 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]); in cs_hsi_write_on_data()
731 txmsg = hi->data_tx_msg; in cs_hsi_write_on_data()
732 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); in cs_hsi_write_on_data()
734 ret = hsi_async_write(hi->cl, txmsg); in cs_hsi_write_on_data()
736 cs_hsi_data_write_error(hi, txmsg); in cs_hsi_write_on_data()
741 spin_unlock(&hi->lock); in cs_hsi_write_on_data()
743 cs_hsi_data_write_error(hi, hi->data_tx_msg); in cs_hsi_write_on_data()
748 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi) in cs_hsi_get_state() argument
750 return hi->iface_state; in cs_hsi_get_state()
753 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd) in cs_hsi_command() argument
760 ret = cs_hsi_write_on_control(hi, cmd); in cs_hsi_command()
764 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK); in cs_hsi_command()
777 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state) in cs_hsi_set_wakeline() argument
781 spin_lock_bh(&hi->lock); in cs_hsi_set_wakeline()
782 if (hi->wakeline_state != new_state) { in cs_hsi_set_wakeline()
783 hi->wakeline_state = new_state; in cs_hsi_set_wakeline()
785 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n", in cs_hsi_set_wakeline()
786 new_state, hi->cl); in cs_hsi_set_wakeline()
788 spin_unlock_bh(&hi->lock); in cs_hsi_set_wakeline()
792 ssip_slave_start_tx(hi->master); in cs_hsi_set_wakeline()
794 ssip_slave_stop_tx(hi->master); in cs_hsi_set_wakeline()
797 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n", in cs_hsi_set_wakeline()
798 new_state, hi->cl); in cs_hsi_set_wakeline()
801 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs) in set_buffer_sizes() argument
803 hi->rx_bufs = rx_bufs; in set_buffer_sizes()
804 hi->tx_bufs = tx_bufs; in set_buffer_sizes()
805 hi->mmap_cfg->rx_bufs = rx_bufs; in set_buffer_sizes()
806 hi->mmap_cfg->tx_bufs = tx_bufs; in set_buffer_sizes()
808 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) { in set_buffer_sizes()
816 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT); in set_buffer_sizes()
817 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary; in set_buffer_sizes()
819 hi->rx_ptr_boundary = hi->rx_bufs; in set_buffer_sizes()
823 static int check_buf_params(struct cs_hsi_iface *hi, in check_buf_params() argument
828 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); in check_buf_params()
834 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) { in check_buf_params()
835 dev_err(&hi->cl->device, "No space for the requested buffer " in check_buf_params()
846 static int cs_hsi_data_sync(struct cs_hsi_iface *hi) in cs_hsi_data_sync() argument
850 spin_lock_bh(&hi->lock); in cs_hsi_data_sync()
852 if (!cs_state_xfer_active(hi->data_state)) { in cs_hsi_data_sync()
853 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n"); in cs_hsi_data_sync()
860 if (!cs_state_xfer_active(hi->data_state)) in cs_hsi_data_sync()
870 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE); in cs_hsi_data_sync()
871 spin_unlock_bh(&hi->lock); in cs_hsi_data_sync()
874 spin_lock_bh(&hi->lock); in cs_hsi_data_sync()
875 finish_wait(&hi->datawait, &wait); in cs_hsi_data_sync()
877 dev_dbg(&hi->cl->device, in cs_hsi_data_sync()
886 spin_unlock_bh(&hi->lock); in cs_hsi_data_sync()
887 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r); in cs_hsi_data_sync()
892 static void cs_hsi_data_enable(struct cs_hsi_iface *hi, in cs_hsi_data_enable() argument
897 BUG_ON(hi->buf_size == 0); in cs_hsi_data_enable()
899 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs); in cs_hsi_data_enable()
901 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size); in cs_hsi_data_enable()
902 dev_dbg(&hi->cl->device, in cs_hsi_data_enable()
904 hi->slot_size, hi->buf_size, L1_CACHE_BYTES); in cs_hsi_data_enable()
906 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); in cs_hsi_data_enable()
907 dev_dbg(&hi->cl->device, in cs_hsi_data_enable()
909 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES); in cs_hsi_data_enable()
911 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) { in cs_hsi_data_enable()
912 hi->rx_offsets[i] = data_start + i * hi->slot_size; in cs_hsi_data_enable()
913 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i]; in cs_hsi_data_enable()
914 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n", in cs_hsi_data_enable()
915 i, hi->rx_offsets[i]); in cs_hsi_data_enable()
917 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) { in cs_hsi_data_enable()
918 hi->tx_offsets[i] = data_start + in cs_hsi_data_enable()
919 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size; in cs_hsi_data_enable()
920 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i]; in cs_hsi_data_enable()
921 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n", in cs_hsi_data_enable()
922 i, hi->rx_offsets[i]); in cs_hsi_data_enable()
925 hi->iface_state = CS_STATE_CONFIGURED; in cs_hsi_data_enable()
928 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state) in cs_hsi_data_disable() argument
931 dev_dbg(&hi->cl->device, in cs_hsi_data_disable()
933 hi->iface_state = CS_STATE_OPENED; in cs_hsi_data_disable()
937 static int cs_hsi_buf_config(struct cs_hsi_iface *hi, in cs_hsi_buf_config() argument
941 unsigned int old_state = hi->iface_state; in cs_hsi_buf_config()
943 spin_lock_bh(&hi->lock); in cs_hsi_buf_config()
946 hi->iface_state = CS_STATE_OPENED; in cs_hsi_buf_config()
947 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
953 r = cs_hsi_data_sync(hi); in cs_hsi_buf_config()
957 WARN_ON(cs_state_xfer_active(hi->data_state)); in cs_hsi_buf_config()
959 spin_lock_bh(&hi->lock); in cs_hsi_buf_config()
960 r = check_buf_params(hi, buf_cfg); in cs_hsi_buf_config()
964 hi->buf_size = buf_cfg->buf_size; in cs_hsi_buf_config()
965 hi->mmap_cfg->buf_size = hi->buf_size; in cs_hsi_buf_config()
966 hi->flags = buf_cfg->flags; in cs_hsi_buf_config()
968 hi->rx_slot = 0; in cs_hsi_buf_config()
969 hi->tx_slot = 0; in cs_hsi_buf_config()
970 hi->slot_size = 0; in cs_hsi_buf_config()
972 if (hi->buf_size) in cs_hsi_buf_config()
973 cs_hsi_data_enable(hi, buf_cfg); in cs_hsi_buf_config()
975 cs_hsi_data_disable(hi, old_state); in cs_hsi_buf_config()
977 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
979 if (old_state != hi->iface_state) { in cs_hsi_buf_config()
980 if (hi->iface_state == CS_STATE_CONFIGURED) { in cs_hsi_buf_config()
981 pm_qos_add_request(&hi->pm_qos_req, in cs_hsi_buf_config()
985 cs_hsi_read_on_data(hi); in cs_hsi_buf_config()
988 pm_qos_remove_request(&hi->pm_qos_req); in cs_hsi_buf_config()
994 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
998 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl, in cs_hsi_start() argument
1054 BUG_ON(!hi); in cs_hsi_start()
1055 *hi = hsi_if; in cs_hsi_start()
1073 static void cs_hsi_stop(struct cs_hsi_iface *hi) in cs_hsi_stop() argument
1075 dev_dbg(&hi->cl->device, "cs_hsi_stop\n"); in cs_hsi_stop()
1076 cs_hsi_set_wakeline(hi, 0); in cs_hsi_stop()
1077 ssip_slave_put_master(hi->master); in cs_hsi_stop()
1080 hi->iface_state = CS_STATE_CLOSED; in cs_hsi_stop()
1081 hsi_release_port(hi->cl); in cs_hsi_stop()
1088 WARN_ON(!cs_state_idle(hi->control_state)); in cs_hsi_stop()
1089 WARN_ON(!cs_state_idle(hi->data_state)); in cs_hsi_stop()
1091 if (pm_qos_request_active(&hi->pm_qos_req)) in cs_hsi_stop()
1092 pm_qos_remove_request(&hi->pm_qos_req); in cs_hsi_stop()
1094 spin_lock_bh(&hi->lock); in cs_hsi_stop()
1095 cs_hsi_free_data(hi); in cs_hsi_stop()
1096 cs_free_cmds(hi); in cs_hsi_stop()
1097 spin_unlock_bh(&hi->lock); in cs_hsi_stop()
1098 kfree(hi); in cs_hsi_stop()
1206 err = cs_hsi_command(csdata->hi, data); in cs_char_write()
1223 state = cs_hsi_get_state(csdata->hi); in cs_char_ioctl()
1242 cs_hsi_set_wakeline(csdata->hi, !!state); in cs_char_ioctl()
1261 r = cs_hsi_buf_config(csdata->hi, &buf_cfg); in cs_char_ioctl()
1309 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE); in cs_char_open()
1352 cs_hsi_stop(csdata->hi); in cs_char_release()
1354 csdata->hi = NULL; in cs_char_release()
1392 cs_char_data.hi = NULL; in cs_hsi_client_probe()
1421 struct cs_hsi_iface *hi; in cs_hsi_client_remove() local
1426 hi = cs_char_data.hi; in cs_hsi_client_remove()
1427 cs_char_data.hi = NULL; in cs_hsi_client_remove()
1429 if (hi) in cs_hsi_client_remove()
1430 cs_hsi_stop(hi); in cs_hsi_client_remove()