Lines Matching refs:hi

52 	struct cs_hsi_iface	*hi;  member
136 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
137 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
224 struct cs_hsi_iface *hi = msg->context; in cs_release_cmd() local
226 list_add_tail(&msg->link, &hi->cmdqueue); in cs_release_cmd()
231 struct cs_hsi_iface *hi = msg->context; in cs_cmd_destructor() local
233 spin_lock(&hi->lock); in cs_cmd_destructor()
237 if (hi->iface_state != CS_STATE_CLOSED) in cs_cmd_destructor()
238 dev_err(&hi->cl->device, "Cmd flushed while driver active\n"); in cs_cmd_destructor()
241 hi->control_state &= in cs_cmd_destructor()
244 hi->control_state & SSI_CHANNEL_STATE_WRITING) in cs_cmd_destructor()
245 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_cmd_destructor()
249 spin_unlock(&hi->lock); in cs_cmd_destructor()
277 static int cs_alloc_cmds(struct cs_hsi_iface *hi) in cs_alloc_cmds() argument
283 INIT_LIST_HEAD(&hi->cmdqueue); in cs_alloc_cmds()
296 msg->context = hi; in cs_alloc_cmds()
297 list_add_tail(&msg->link, &hi->cmdqueue); in cs_alloc_cmds()
303 cs_free_cmds(hi); in cs_alloc_cmds()
309 struct cs_hsi_iface *hi = msg->context; in cs_hsi_data_destructor() local
314 spin_lock(&hi->lock); in cs_hsi_data_destructor()
315 if (hi->iface_state != CS_STATE_CLOSED) in cs_hsi_data_destructor()
319 hi->data_state &= in cs_hsi_data_destructor()
322 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_data_destructor()
325 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_data_destructor()
326 wake_up_interruptible(&hi->datawait); in cs_hsi_data_destructor()
328 spin_unlock(&hi->lock); in cs_hsi_data_destructor()
331 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi) in cs_hsi_alloc_data() argument
343 rxmsg->context = hi; in cs_hsi_alloc_data()
352 txmsg->context = hi; in cs_hsi_alloc_data()
354 hi->data_rx_msg = rxmsg; in cs_hsi_alloc_data()
355 hi->data_tx_msg = txmsg; in cs_hsi_alloc_data()
372 static void cs_hsi_free_data(struct cs_hsi_iface *hi) in cs_hsi_free_data() argument
374 cs_hsi_free_data_msg(hi->data_rx_msg); in cs_hsi_free_data()
375 cs_hsi_free_data_msg(hi->data_tx_msg); in cs_hsi_free_data()
378 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi, in __cs_hsi_error_pre() argument
382 spin_lock(&hi->lock); in __cs_hsi_error_pre()
383 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", in __cs_hsi_error_pre()
387 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi) in __cs_hsi_error_post() argument
389 spin_unlock(&hi->lock); in __cs_hsi_error_post()
404 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi, in cs_hsi_control_read_error() argument
407 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); in cs_hsi_control_read_error()
409 __cs_hsi_error_read_bits(&hi->control_state); in cs_hsi_control_read_error()
410 __cs_hsi_error_post(hi); in cs_hsi_control_read_error()
413 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi, in cs_hsi_control_write_error() argument
416 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); in cs_hsi_control_write_error()
418 __cs_hsi_error_write_bits(&hi->control_state); in cs_hsi_control_write_error()
419 __cs_hsi_error_post(hi); in cs_hsi_control_write_error()
423 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) in cs_hsi_data_read_error() argument
425 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); in cs_hsi_data_read_error()
426 __cs_hsi_error_read_bits(&hi->data_state); in cs_hsi_data_read_error()
427 __cs_hsi_error_post(hi); in cs_hsi_data_read_error()
430 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi, in cs_hsi_data_write_error() argument
433 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); in cs_hsi_data_write_error()
434 __cs_hsi_error_write_bits(&hi->data_state); in cs_hsi_data_write_error()
435 __cs_hsi_error_post(hi); in cs_hsi_data_write_error()
441 struct cs_hsi_iface *hi = msg->context; in cs_hsi_read_on_control_complete() local
443 spin_lock(&hi->lock); in cs_hsi_read_on_control_complete()
444 hi->control_state &= ~SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_control_complete()
446 dev_err(&hi->cl->device, "Control RX error detected\n"); in cs_hsi_read_on_control_complete()
447 cs_hsi_control_read_error(hi, msg); in cs_hsi_read_on_control_complete()
448 spin_unlock(&hi->lock); in cs_hsi_read_on_control_complete()
451 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); in cs_hsi_read_on_control_complete()
453 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) { in cs_hsi_read_on_control_complete()
455 &hi->mmap_cfg->tstamp_rx_ctrl; in cs_hsi_read_on_control_complete()
458 spin_unlock(&hi->lock); in cs_hsi_read_on_control_complete()
463 cs_hsi_read_on_control(hi); in cs_hsi_read_on_control_complete()
468 struct cs_hsi_iface *hi = msg->context; in cs_hsi_peek_on_control_complete() local
472 dev_err(&hi->cl->device, "Control peek RX error detected\n"); in cs_hsi_peek_on_control_complete()
473 cs_hsi_control_read_error(hi, msg); in cs_hsi_peek_on_control_complete()
477 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING)); in cs_hsi_peek_on_control_complete()
479 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n"); in cs_hsi_peek_on_control_complete()
482 ret = hsi_async_read(hi->cl, msg); in cs_hsi_peek_on_control_complete()
484 cs_hsi_control_read_error(hi, msg); in cs_hsi_peek_on_control_complete()
487 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi) in cs_hsi_read_on_control() argument
492 spin_lock(&hi->lock); in cs_hsi_read_on_control()
493 if (hi->control_state & SSI_CHANNEL_STATE_READING) { in cs_hsi_read_on_control()
494 dev_err(&hi->cl->device, "Control read already pending (%d)\n", in cs_hsi_read_on_control()
495 hi->control_state); in cs_hsi_read_on_control()
496 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
499 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_read_on_control()
500 dev_err(&hi->cl->device, "Control read error (%d)\n", in cs_hsi_read_on_control()
501 hi->control_state); in cs_hsi_read_on_control()
502 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
505 hi->control_state |= SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_control()
506 dev_dbg(&hi->cl->device, "Issuing RX on control\n"); in cs_hsi_read_on_control()
507 msg = cs_claim_cmd(hi); in cs_hsi_read_on_control()
508 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
512 ret = hsi_async_read(hi->cl, msg); in cs_hsi_read_on_control()
514 cs_hsi_control_read_error(hi, msg); in cs_hsi_read_on_control()
519 struct cs_hsi_iface *hi = msg->context; in cs_hsi_write_on_control_complete() local
521 spin_lock(&hi->lock); in cs_hsi_write_on_control_complete()
522 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_control_complete()
524 spin_unlock(&hi->lock); in cs_hsi_write_on_control_complete()
526 cs_hsi_control_write_error(hi, msg); in cs_hsi_write_on_control_complete()
528 dev_err(&hi->cl->device, in cs_hsi_write_on_control_complete()
534 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message) in cs_hsi_write_on_control() argument
539 spin_lock(&hi->lock); in cs_hsi_write_on_control()
540 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_write_on_control()
541 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
544 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) { in cs_hsi_write_on_control()
545 dev_err(&hi->cl->device, in cs_hsi_write_on_control()
547 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
550 hi->control_state |= SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_control()
551 msg = cs_claim_cmd(hi); in cs_hsi_write_on_control()
552 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
557 dev_dbg(&hi->cl->device, in cs_hsi_write_on_control()
559 ret = hsi_async_write(hi->cl, msg); in cs_hsi_write_on_control()
561 dev_err(&hi->cl->device, in cs_hsi_write_on_control()
563 cs_hsi_control_write_error(hi, msg); in cs_hsi_write_on_control()
573 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) { in cs_hsi_write_on_control()
574 dev_err(&hi->cl->device, "Restarting control reads\n"); in cs_hsi_write_on_control()
575 cs_hsi_read_on_control(hi); in cs_hsi_write_on_control()
583 struct cs_hsi_iface *hi = msg->context; in cs_hsi_read_on_data_complete() local
587 cs_hsi_data_read_error(hi, msg); in cs_hsi_read_on_data_complete()
591 spin_lock(&hi->lock); in cs_hsi_read_on_data_complete()
592 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING)); in cs_hsi_read_on_data_complete()
593 hi->data_state &= ~SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_data_complete()
595 payload |= hi->rx_slot; in cs_hsi_read_on_data_complete()
596 hi->rx_slot++; in cs_hsi_read_on_data_complete()
597 hi->rx_slot %= hi->rx_ptr_boundary; in cs_hsi_read_on_data_complete()
599 hi->mmap_cfg->rx_ptr = hi->rx_slot; in cs_hsi_read_on_data_complete()
600 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_read_on_data_complete()
601 wake_up_interruptible(&hi->datawait); in cs_hsi_read_on_data_complete()
602 spin_unlock(&hi->lock); in cs_hsi_read_on_data_complete()
604 cs_notify_data(payload, hi->rx_bufs); in cs_hsi_read_on_data_complete()
605 cs_hsi_read_on_data(hi); in cs_hsi_read_on_data_complete()
610 struct cs_hsi_iface *hi = msg->context; in cs_hsi_peek_on_data_complete() local
615 cs_hsi_data_read_error(hi, msg); in cs_hsi_peek_on_data_complete()
618 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) { in cs_hsi_peek_on_data_complete()
619 dev_err(&hi->cl->device, "Data received in invalid state\n"); in cs_hsi_peek_on_data_complete()
620 cs_hsi_data_read_error(hi, msg); in cs_hsi_peek_on_data_complete()
624 spin_lock(&hi->lock); in cs_hsi_peek_on_data_complete()
625 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL)); in cs_hsi_peek_on_data_complete()
626 hi->data_state &= ~SSI_CHANNEL_STATE_POLL; in cs_hsi_peek_on_data_complete()
627 hi->data_state |= SSI_CHANNEL_STATE_READING; in cs_hsi_peek_on_data_complete()
628 spin_unlock(&hi->lock); in cs_hsi_peek_on_data_complete()
630 address = (u32 *)(hi->mmap_base + in cs_hsi_peek_on_data_complete()
631 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]); in cs_hsi_peek_on_data_complete()
632 sg_init_one(msg->sgt.sgl, address, hi->buf_size); in cs_hsi_peek_on_data_complete()
635 ret = hsi_async_read(hi->cl, msg); in cs_hsi_peek_on_data_complete()
637 cs_hsi_data_read_error(hi, msg); in cs_hsi_peek_on_data_complete()
658 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi) in cs_hsi_read_on_data() argument
663 spin_lock(&hi->lock); in cs_hsi_read_on_data()
664 if (hi->data_state & in cs_hsi_read_on_data()
666 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n", in cs_hsi_read_on_data()
667 hi->data_state); in cs_hsi_read_on_data()
668 spin_unlock(&hi->lock); in cs_hsi_read_on_data()
671 hi->data_state |= SSI_CHANNEL_STATE_POLL; in cs_hsi_read_on_data()
672 spin_unlock(&hi->lock); in cs_hsi_read_on_data()
674 rxmsg = hi->data_rx_msg; in cs_hsi_read_on_data()
675 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0); in cs_hsi_read_on_data()
679 ret = hsi_async_read(hi->cl, rxmsg); in cs_hsi_read_on_data()
681 cs_hsi_data_read_error(hi, rxmsg); in cs_hsi_read_on_data()
686 struct cs_hsi_iface *hi = msg->context; in cs_hsi_write_on_data_complete() local
689 spin_lock(&hi->lock); in cs_hsi_write_on_data_complete()
690 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_data_complete()
691 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_write_on_data_complete()
692 wake_up_interruptible(&hi->datawait); in cs_hsi_write_on_data_complete()
693 spin_unlock(&hi->lock); in cs_hsi_write_on_data_complete()
695 cs_hsi_data_write_error(hi, msg); in cs_hsi_write_on_data_complete()
699 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot) in cs_hsi_write_on_data() argument
705 spin_lock(&hi->lock); in cs_hsi_write_on_data()
706 if (hi->iface_state != CS_STATE_CONFIGURED) { in cs_hsi_write_on_data()
707 dev_err(&hi->cl->device, "Not configured, aborting\n"); in cs_hsi_write_on_data()
711 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_write_on_data()
712 dev_err(&hi->cl->device, "HSI error, aborting\n"); in cs_hsi_write_on_data()
716 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) { in cs_hsi_write_on_data()
717 dev_err(&hi->cl->device, "Write pending on data channel.\n"); in cs_hsi_write_on_data()
721 hi->data_state |= SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_data()
722 spin_unlock(&hi->lock); in cs_hsi_write_on_data()
724 hi->tx_slot = slot; in cs_hsi_write_on_data()
725 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]); in cs_hsi_write_on_data()
726 txmsg = hi->data_tx_msg; in cs_hsi_write_on_data()
727 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); in cs_hsi_write_on_data()
729 ret = hsi_async_write(hi->cl, txmsg); in cs_hsi_write_on_data()
731 cs_hsi_data_write_error(hi, txmsg); in cs_hsi_write_on_data()
736 spin_unlock(&hi->lock); in cs_hsi_write_on_data()
738 cs_hsi_data_write_error(hi, hi->data_tx_msg); in cs_hsi_write_on_data()
743 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi) in cs_hsi_get_state() argument
745 return hi->iface_state; in cs_hsi_get_state()
748 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd) in cs_hsi_command() argument
755 ret = cs_hsi_write_on_control(hi, cmd); in cs_hsi_command()
759 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK); in cs_hsi_command()
772 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state) in cs_hsi_set_wakeline() argument
776 spin_lock_bh(&hi->lock); in cs_hsi_set_wakeline()
777 if (hi->wakeline_state != new_state) { in cs_hsi_set_wakeline()
778 hi->wakeline_state = new_state; in cs_hsi_set_wakeline()
780 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n", in cs_hsi_set_wakeline()
781 new_state, hi->cl); in cs_hsi_set_wakeline()
783 spin_unlock_bh(&hi->lock); in cs_hsi_set_wakeline()
787 ssip_slave_start_tx(hi->master); in cs_hsi_set_wakeline()
789 ssip_slave_stop_tx(hi->master); in cs_hsi_set_wakeline()
792 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n", in cs_hsi_set_wakeline()
793 new_state, hi->cl); in cs_hsi_set_wakeline()
796 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs) in set_buffer_sizes() argument
798 hi->rx_bufs = rx_bufs; in set_buffer_sizes()
799 hi->tx_bufs = tx_bufs; in set_buffer_sizes()
800 hi->mmap_cfg->rx_bufs = rx_bufs; in set_buffer_sizes()
801 hi->mmap_cfg->tx_bufs = tx_bufs; in set_buffer_sizes()
803 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) { in set_buffer_sizes()
811 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT); in set_buffer_sizes()
812 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary; in set_buffer_sizes()
814 hi->rx_ptr_boundary = hi->rx_bufs; in set_buffer_sizes()
818 static int check_buf_params(struct cs_hsi_iface *hi, in check_buf_params() argument
823 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); in check_buf_params()
829 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) { in check_buf_params()
830 dev_err(&hi->cl->device, "No space for the requested buffer " in check_buf_params()
841 static int cs_hsi_data_sync(struct cs_hsi_iface *hi) in cs_hsi_data_sync() argument
845 spin_lock_bh(&hi->lock); in cs_hsi_data_sync()
847 if (!cs_state_xfer_active(hi->data_state)) { in cs_hsi_data_sync()
848 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n"); in cs_hsi_data_sync()
855 if (!cs_state_xfer_active(hi->data_state)) in cs_hsi_data_sync()
865 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE); in cs_hsi_data_sync()
866 spin_unlock_bh(&hi->lock); in cs_hsi_data_sync()
869 spin_lock_bh(&hi->lock); in cs_hsi_data_sync()
870 finish_wait(&hi->datawait, &wait); in cs_hsi_data_sync()
872 dev_dbg(&hi->cl->device, in cs_hsi_data_sync()
881 spin_unlock_bh(&hi->lock); in cs_hsi_data_sync()
882 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r); in cs_hsi_data_sync()
887 static void cs_hsi_data_enable(struct cs_hsi_iface *hi, in cs_hsi_data_enable() argument
892 BUG_ON(hi->buf_size == 0); in cs_hsi_data_enable()
894 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs); in cs_hsi_data_enable()
896 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size); in cs_hsi_data_enable()
897 dev_dbg(&hi->cl->device, in cs_hsi_data_enable()
899 hi->slot_size, hi->buf_size, L1_CACHE_BYTES); in cs_hsi_data_enable()
901 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); in cs_hsi_data_enable()
902 dev_dbg(&hi->cl->device, in cs_hsi_data_enable()
904 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES); in cs_hsi_data_enable()
906 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) { in cs_hsi_data_enable()
907 hi->rx_offsets[i] = data_start + i * hi->slot_size; in cs_hsi_data_enable()
908 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i]; in cs_hsi_data_enable()
909 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n", in cs_hsi_data_enable()
910 i, hi->rx_offsets[i]); in cs_hsi_data_enable()
912 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) { in cs_hsi_data_enable()
913 hi->tx_offsets[i] = data_start + in cs_hsi_data_enable()
914 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size; in cs_hsi_data_enable()
915 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i]; in cs_hsi_data_enable()
916 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n", in cs_hsi_data_enable()
917 i, hi->rx_offsets[i]); in cs_hsi_data_enable()
920 hi->iface_state = CS_STATE_CONFIGURED; in cs_hsi_data_enable()
923 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state) in cs_hsi_data_disable() argument
926 dev_dbg(&hi->cl->device, in cs_hsi_data_disable()
928 hi->iface_state = CS_STATE_OPENED; in cs_hsi_data_disable()
932 static int cs_hsi_buf_config(struct cs_hsi_iface *hi, in cs_hsi_buf_config() argument
936 unsigned int old_state = hi->iface_state; in cs_hsi_buf_config()
938 spin_lock_bh(&hi->lock); in cs_hsi_buf_config()
941 hi->iface_state = CS_STATE_OPENED; in cs_hsi_buf_config()
942 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
948 r = cs_hsi_data_sync(hi); in cs_hsi_buf_config()
952 WARN_ON(cs_state_xfer_active(hi->data_state)); in cs_hsi_buf_config()
954 spin_lock_bh(&hi->lock); in cs_hsi_buf_config()
955 r = check_buf_params(hi, buf_cfg); in cs_hsi_buf_config()
959 hi->buf_size = buf_cfg->buf_size; in cs_hsi_buf_config()
960 hi->mmap_cfg->buf_size = hi->buf_size; in cs_hsi_buf_config()
961 hi->flags = buf_cfg->flags; in cs_hsi_buf_config()
963 hi->rx_slot = 0; in cs_hsi_buf_config()
964 hi->tx_slot = 0; in cs_hsi_buf_config()
965 hi->slot_size = 0; in cs_hsi_buf_config()
967 if (hi->buf_size) in cs_hsi_buf_config()
968 cs_hsi_data_enable(hi, buf_cfg); in cs_hsi_buf_config()
970 cs_hsi_data_disable(hi, old_state); in cs_hsi_buf_config()
972 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
974 if (old_state != hi->iface_state) { in cs_hsi_buf_config()
975 if (hi->iface_state == CS_STATE_CONFIGURED) { in cs_hsi_buf_config()
976 pm_qos_add_request(&hi->pm_qos_req, in cs_hsi_buf_config()
980 cs_hsi_read_on_data(hi); in cs_hsi_buf_config()
983 pm_qos_remove_request(&hi->pm_qos_req); in cs_hsi_buf_config()
989 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
993 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl, in cs_hsi_start() argument
1049 BUG_ON(!hi); in cs_hsi_start()
1050 *hi = hsi_if; in cs_hsi_start()
1068 static void cs_hsi_stop(struct cs_hsi_iface *hi) in cs_hsi_stop() argument
1070 dev_dbg(&hi->cl->device, "cs_hsi_stop\n"); in cs_hsi_stop()
1071 cs_hsi_set_wakeline(hi, 0); in cs_hsi_stop()
1072 ssip_slave_put_master(hi->master); in cs_hsi_stop()
1075 hi->iface_state = CS_STATE_CLOSED; in cs_hsi_stop()
1076 hsi_release_port(hi->cl); in cs_hsi_stop()
1083 WARN_ON(!cs_state_idle(hi->control_state)); in cs_hsi_stop()
1084 WARN_ON(!cs_state_idle(hi->data_state)); in cs_hsi_stop()
1086 if (pm_qos_request_active(&hi->pm_qos_req)) in cs_hsi_stop()
1087 pm_qos_remove_request(&hi->pm_qos_req); in cs_hsi_stop()
1089 spin_lock_bh(&hi->lock); in cs_hsi_stop()
1090 cs_hsi_free_data(hi); in cs_hsi_stop()
1091 cs_free_cmds(hi); in cs_hsi_stop()
1092 spin_unlock_bh(&hi->lock); in cs_hsi_stop()
1093 kfree(hi); in cs_hsi_stop()
1201 err = cs_hsi_command(csdata->hi, data); in cs_char_write()
1218 state = cs_hsi_get_state(csdata->hi); in cs_char_ioctl()
1237 cs_hsi_set_wakeline(csdata->hi, !!state); in cs_char_ioctl()
1256 r = cs_hsi_buf_config(csdata->hi, &buf_cfg); in cs_char_ioctl()
1304 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE); in cs_char_open()
1347 cs_hsi_stop(csdata->hi); in cs_char_release()
1349 csdata->hi = NULL; in cs_char_release()
1387 cs_char_data.hi = NULL; in cs_hsi_client_probe()
1416 struct cs_hsi_iface *hi; in cs_hsi_client_remove() local
1421 hi = cs_char_data.hi; in cs_hsi_client_remove()
1422 cs_char_data.hi = NULL; in cs_hsi_client_remove()
1424 if (hi) in cs_hsi_client_remove()
1425 cs_hsi_stop(hi); in cs_hsi_client_remove()