/linux-4.1.27/drivers/s390/crypto/ |
H A D | Makefile | 5 ap-objs := ap_bus.o 6 obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
|
H A D | ap_bus.h | 43 * The ap_qid_t identifier of an ap queue. It contains a 62 * The ap queue status word is returned by all three AP functions 181 void *private; /* ap driver private pointer. */ 192 void *private; /* ap driver private pointer. */ 229 * for the first time. Otherwise the ap message queue will get
|
/linux-4.1.27/tools/lib/traceevent/ |
H A D | parse-utils.c | 28 void __vwarning(const char *fmt, va_list ap) __vwarning() argument 35 vfprintf(stderr, fmt, ap); __vwarning() 42 va_list ap; __warning() local 44 va_start(ap, fmt); __warning() 45 __vwarning(fmt, ap); __warning() 46 va_end(ap); __warning() 51 va_list ap; warning() local 53 va_start(ap, fmt); warning() 54 __vwarning(fmt, ap); warning() 55 va_end(ap); warning() 58 void __vpr_stat(const char *fmt, va_list ap) __vpr_stat() argument 60 vprintf(fmt, ap); __vpr_stat() 66 va_list ap; __pr_stat() local 68 va_start(ap, fmt); __pr_stat() 69 __vpr_stat(fmt, ap); __pr_stat() 70 va_end(ap); __pr_stat() 73 void __weak vpr_stat(const char *fmt, va_list ap) vpr_stat() argument 75 __vpr_stat(fmt, ap); vpr_stat() 80 va_list ap; pr_stat() local 82 va_start(ap, fmt); pr_stat() 83 __vpr_stat(fmt, ap); pr_stat() 84 va_end(ap); pr_stat()
|
H A D | trace-seq.c | 126 va_list ap; trace_seq_printf() local 135 va_start(ap, fmt); trace_seq_printf() 136 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); trace_seq_printf() 137 va_end(ap); trace_seq_printf()
|
H A D | event-utils.h | 28 void vpr_stat(const char *fmt, va_list ap);
|
/linux-4.1.27/lib/ |
H A D | kasprintf.c | 14 char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) kvasprintf() argument 20 va_copy(aq, ap); kvasprintf() 28 vsnprintf(p, len+1, fmt, ap); kvasprintf() 36 va_list ap; kasprintf() local 39 va_start(ap, fmt); kasprintf() 40 p = kvasprintf(gfp, fmt, ap); kasprintf() 41 va_end(ap); kasprintf()
|
H A D | seq_buf.c | 84 va_list ap; seq_buf_printf() local 87 va_start(ap, fmt); seq_buf_printf() 88 ret = seq_buf_vprintf(s, fmt, ap); seq_buf_printf() 89 va_end(ap); seq_buf_printf()
|
/linux-4.1.27/tools/perf/ui/ |
H A D | helpline.c | 20 va_list ap __maybe_unused) nop_helpline__show() 43 void ui_helpline__vpush(const char *fmt, va_list ap) ui_helpline__vpush() argument 47 if (vasprintf(&s, fmt, ap) < 0) ui_helpline__vpush() 48 vfprintf(stderr, fmt, ap); ui_helpline__vpush() 57 va_list ap; ui_helpline__fpush() local 59 va_start(ap, fmt); ui_helpline__fpush() 60 ui_helpline__vpush(fmt, ap); ui_helpline__fpush() 61 va_end(ap); ui_helpline__fpush() 70 int ui_helpline__vshow(const char *fmt, va_list ap) ui_helpline__vshow() argument 72 return helpline_fns->show(fmt, ap); ui_helpline__vshow()
|
H A D | helpline.h | 12 int (*show)(const char *fmt, va_list ap); 21 void ui_helpline__vpush(const char *fmt, va_list ap); 24 int ui_helpline__vshow(const char *fmt, va_list ap);
|
/linux-4.1.27/drivers/net/ppp/ |
H A D | ppp_async.c | 100 static int ppp_async_encode(struct asyncppp *ap); 102 static int ppp_async_push(struct asyncppp *ap); 103 static void ppp_async_flush_output(struct asyncppp *ap); 104 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 110 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 138 struct asyncppp *ap; ap_get() local 141 ap = tty->disc_data; ap_get() 142 if (ap != NULL) ap_get() 143 atomic_inc(&ap->refcnt); ap_get() 145 return ap; ap_get() 148 static void ap_put(struct asyncppp *ap) ap_put() argument 150 if (atomic_dec_and_test(&ap->refcnt)) ap_put() 151 up(&ap->dead_sem); ap_put() 161 struct asyncppp *ap; ppp_asynctty_open() local 169 ap = kzalloc(sizeof(*ap), GFP_KERNEL); ppp_asynctty_open() 170 if (!ap) ppp_asynctty_open() 174 ap->tty = tty; ppp_asynctty_open() 175 ap->mru = PPP_MRU; ppp_asynctty_open() 176 spin_lock_init(&ap->xmit_lock); ppp_asynctty_open() 177 spin_lock_init(&ap->recv_lock); ppp_asynctty_open() 178 ap->xaccm[0] = ~0U; ppp_asynctty_open() 179 ap->xaccm[3] = 0x60000000U; ppp_asynctty_open() 180 ap->raccm = ~0U; ppp_asynctty_open() 181 ap->optr = ap->obuf; ppp_asynctty_open() 182 ap->olim = ap->obuf; ppp_asynctty_open() 183 ap->lcp_fcs = -1; ppp_asynctty_open() 185 skb_queue_head_init(&ap->rqueue); ppp_asynctty_open() 186 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); ppp_asynctty_open() 188 atomic_set(&ap->refcnt, 1); ppp_asynctty_open() 189 sema_init(&ap->dead_sem, 0); ppp_asynctty_open() 191 ap->chan.private = ap; ppp_asynctty_open() 192 ap->chan.ops = &async_ops; ppp_asynctty_open() 193 ap->chan.mtu = PPP_MRU; ppp_asynctty_open() 195 ap->chan.speed = speed; ppp_asynctty_open() 196 err = ppp_register_channel(&ap->chan); ppp_asynctty_open() 200 tty->disc_data = ap; ppp_asynctty_open() 205 kfree(ap); ppp_asynctty_open() 221 struct asyncppp *ap; ppp_asynctty_close() local 224 ap = tty->disc_data; ppp_asynctty_close() 227 if (!ap) ppp_asynctty_close() 231 * We have now ensured that nobody can start using ap from now ppp_asynctty_close() 237 if (!atomic_dec_and_test(&ap->refcnt)) ppp_asynctty_close() 238 down(&ap->dead_sem); ppp_asynctty_close() 239 tasklet_kill(&ap->tsk); ppp_asynctty_close() 241 ppp_unregister_channel(&ap->chan); ppp_asynctty_close() 242 kfree_skb(ap->rpkt); ppp_asynctty_close() 243 skb_queue_purge(&ap->rqueue); ppp_asynctty_close() 244 kfree_skb(ap->tpkt); ppp_asynctty_close() 245 kfree(ap); ppp_asynctty_close() 291 struct asyncppp *ap = ap_get(tty); ppp_asynctty_ioctl() local 295 if (!ap) ppp_asynctty_ioctl() 301 if (put_user(ppp_channel_index(&ap->chan), p)) ppp_asynctty_ioctl() 308 if (put_user(ppp_unit_number(&ap->chan), p)) ppp_asynctty_ioctl() 316 ppp_async_flush_output(ap); ppp_asynctty_ioctl() 332 ap_put(ap); ppp_asynctty_ioctl() 348 struct asyncppp *ap = ap_get(tty); ppp_asynctty_receive() local 351 if (!ap) ppp_asynctty_receive() 353 spin_lock_irqsave(&ap->recv_lock, flags); ppp_asynctty_receive() 354 ppp_async_input(ap, buf, cflags, count); ppp_asynctty_receive() 355 spin_unlock_irqrestore(&ap->recv_lock, flags); ppp_asynctty_receive() 356 if (!skb_queue_empty(&ap->rqueue)) ppp_asynctty_receive() 357 tasklet_schedule(&ap->tsk); ppp_asynctty_receive() 358 ap_put(ap); ppp_asynctty_receive() 365 struct asyncppp *ap = ap_get(tty); ppp_asynctty_wakeup() local 368 if (!ap) ppp_asynctty_wakeup() 370 set_bit(XMIT_WAKEUP, &ap->xmit_flags); ppp_asynctty_wakeup() 371 tasklet_schedule(&ap->tsk); ppp_asynctty_wakeup() 372 ap_put(ap); ppp_asynctty_wakeup() 409 struct asyncppp *ap = chan->private; ppp_async_ioctl() local 418 val = ap->flags | ap->rbits; ppp_async_ioctl() 426 ap->flags = val & ~SC_RCV_BITS; ppp_async_ioctl() 427 spin_lock_irq(&ap->recv_lock); ppp_async_ioctl() 428 ap->rbits = val & SC_RCV_BITS; ppp_async_ioctl() 429 spin_unlock_irq(&ap->recv_lock); ppp_async_ioctl() 434 if (put_user(ap->xaccm[0], (u32 __user *)argp)) ppp_async_ioctl() 439 if (get_user(ap->xaccm[0], (u32 __user *)argp)) ppp_async_ioctl() 445 if (put_user(ap->raccm, (u32 __user *)argp)) ppp_async_ioctl() 450 if (get_user(ap->raccm, (u32 __user *)argp)) ppp_async_ioctl() 456 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) ppp_async_ioctl() 465 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); ppp_async_ioctl() 470 if (put_user(ap->mru, p)) ppp_async_ioctl() 479 ap->mru = val; ppp_async_ioctl() 497 struct asyncppp *ap = (struct asyncppp *) arg; ppp_async_process() local 501 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { ppp_async_process() 503 ppp_input_error(&ap->chan, 0); ppp_async_process() 504 ppp_input(&ap->chan, skb); ppp_async_process() 508 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) ppp_async_process() 509 ppp_output_wakeup(&ap->chan); ppp_async_process() 520 * Assumes ap->tpkt != 0 on entry. 524 #define PUT_BYTE(ap, buf, c, islcp) do { \ 525 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 533 ppp_async_encode(struct asyncppp *ap) ppp_async_encode() argument 540 buf = ap->obuf; ppp_async_encode() 541 ap->olim = buf; ppp_async_encode() 542 ap->optr = buf; ppp_async_encode() 543 i = ap->tpkt_pos; ppp_async_encode() 544 data = ap->tpkt->data; ppp_async_encode() 545 count = ap->tpkt->len; ppp_async_encode() 546 fcs = ap->tfcs; ppp_async_encode() 558 async_lcp_peek(ap, data, count, 0); ppp_async_encode() 565 time_after_eq(jiffies, ap->last_xmit + flag_time)) ppp_async_encode() 567 ap->last_xmit = jiffies; ppp_async_encode() 573 if ((ap->flags & SC_COMP_AC) == 0 || islcp) { ppp_async_encode() 574 PUT_BYTE(ap, buf, 0xff, islcp); ppp_async_encode() 576 PUT_BYTE(ap, buf, 0x03, islcp); ppp_async_encode() 586 buflim = ap->obuf + OBUFSIZE - 6; ppp_async_encode() 589 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) ppp_async_encode() 592 PUT_BYTE(ap, buf, c, islcp); ppp_async_encode() 599 ap->olim = buf; ppp_async_encode() 600 ap->tpkt_pos = i; ppp_async_encode() 601 ap->tfcs = fcs; ppp_async_encode() 610 PUT_BYTE(ap, buf, c, islcp); ppp_async_encode() 612 PUT_BYTE(ap, buf, c, islcp); ppp_async_encode() 614 ap->olim = buf; ppp_async_encode() 616 consume_skb(ap->tpkt); ppp_async_encode() 617 ap->tpkt = NULL; ppp_async_encode() 634 struct asyncppp *ap = chan->private; ppp_async_send() local 636 ppp_async_push(ap); ppp_async_send() 638 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) ppp_async_send() 640 ap->tpkt = skb; ppp_async_send() 641 ap->tpkt_pos = 0; ppp_async_send() 643 ppp_async_push(ap); ppp_async_send() 651 ppp_async_push(struct asyncppp *ap) ppp_async_push() argument 654 struct tty_struct *tty = ap->tty; ppp_async_push() 666 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) ppp_async_push() 668 spin_lock_bh(&ap->xmit_lock); ppp_async_push() 670 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) ppp_async_push() 672 if (!tty_stuffed && ap->optr < ap->olim) { ppp_async_push() 673 avail = ap->olim - ap->optr; ppp_async_push() 675 sent = tty->ops->write(tty, ap->optr, avail); ppp_async_push() 678 ap->optr += sent; ppp_async_push() 683 if (ap->optr >= ap->olim && ap->tpkt) { ppp_async_push() 684 if (ppp_async_encode(ap)) { ppp_async_push() 685 /* finished processing ap->tpkt */ ppp_async_push() 686 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_async_push() 700 clear_bit(XMIT_BUSY, &ap->xmit_flags); ppp_async_push() 702 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) || ppp_async_push() 703 (!tty_stuffed && ap->tpkt))) ppp_async_push() 706 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) ppp_async_push() 709 spin_unlock_bh(&ap->xmit_lock); ppp_async_push() 713 clear_bit(XMIT_BUSY, &ap->xmit_flags); ppp_async_push() 714 if (ap->tpkt) { ppp_async_push() 715 kfree_skb(ap->tpkt); ppp_async_push() 716 ap->tpkt = NULL; ppp_async_push() 717 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_async_push() 720 ap->optr = ap->olim; ppp_async_push() 721 spin_unlock_bh(&ap->xmit_lock); ppp_async_push() 731 ppp_async_flush_output(struct asyncppp *ap) ppp_async_flush_output() argument 735 spin_lock_bh(&ap->xmit_lock); ppp_async_flush_output() 736 ap->optr = ap->olim; ppp_async_flush_output() 737 if (ap->tpkt != NULL) { ppp_async_flush_output() 738 kfree_skb(ap->tpkt); ppp_async_flush_output() 739 ap->tpkt = NULL; ppp_async_flush_output() 740 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_async_flush_output() 743 spin_unlock_bh(&ap->xmit_lock); ppp_async_flush_output() 745 ppp_output_wakeup(&ap->chan); ppp_async_flush_output() 754 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) scan_ordinary() argument 761 (c < 0x20 && (ap->raccm & (1 << c)) != 0)) scan_ordinary() 769 process_input_packet(struct asyncppp *ap) process_input_packet() argument 775 skb = ap->rpkt; process_input_packet() 776 if (ap->state & (SC_TOSS | SC_ESCAPE)) process_input_packet() 811 async_lcp_peek(ap, p, skb->len, 1); process_input_packet() 815 skb->cb[0] = ap->state; process_input_packet() 816 skb_queue_tail(&ap->rqueue, skb); process_input_packet() 817 ap->rpkt = NULL; process_input_packet() 818 ap->state = 0; process_input_packet() 823 ap->state = SC_PREV_ERROR; process_input_packet() 835 ppp_async_input(struct asyncppp *ap, const unsigned char *buf, ppp_async_input() argument 843 if (~ap->rbits & SC_RCV_BITS) { ppp_async_input() 853 ap->rbits |= s; ppp_async_input() 858 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) ppp_async_input() 861 n = scan_ordinary(ap, buf, count); ppp_async_input() 864 if (flags && (ap->state & SC_TOSS) == 0) { ppp_async_input() 872 ap->state |= SC_TOSS; ppp_async_input() 874 } else if (n > 0 && (ap->state & SC_TOSS) == 0) { ppp_async_input() 876 skb = ap->rpkt; ppp_async_input() 878 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); ppp_async_input() 881 ap->rpkt = skb; ppp_async_input() 895 ap->state |= SC_TOSS; ppp_async_input() 899 if (ap->state & SC_ESCAPE) { ppp_async_input() 901 ap->state &= ~SC_ESCAPE; ppp_async_input() 911 ap->state |= SC_TOSS; ppp_async_input() 913 process_input_packet(ap); ppp_async_input() 915 ap->state |= SC_ESCAPE; ppp_async_input() 916 } else if (I_IXON(ap->tty)) { ppp_async_input() 917 if (c == START_CHAR(ap->tty)) ppp_async_input() 918 start_tty(ap->tty); ppp_async_input() 919 else if (c == STOP_CHAR(ap->tty)) ppp_async_input() 920 stop_tty(ap->tty); ppp_async_input() 934 ap->state |= SC_TOSS; ppp_async_input() 955 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, async_lcp_peek() argument 983 ap->lcp_fcs = fcs; async_lcp_peek() 988 fcs ^= ap->lcp_fcs; async_lcp_peek() 989 ap->lcp_fcs = -1; async_lcp_peek() 1004 ap->mru = val; async_lcp_peek() 1006 ap->chan.mtu = val; async_lcp_peek() 1011 ap->raccm = val; async_lcp_peek() 1013 ap->xaccm[0] = val; async_lcp_peek()
|
H A D | ppp_synctty.c | 92 static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *); 97 static int ppp_sync_push(struct syncppp *ap); 98 static void ppp_sync_flush_output(struct syncppp *ap); 99 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, 139 struct syncppp *ap; sp_get() local 142 ap = tty->disc_data; sp_get() 143 if (ap != NULL) sp_get() 144 atomic_inc(&ap->refcnt); sp_get() 146 return ap; sp_get() 149 static void sp_put(struct syncppp *ap) sp_put() argument 151 if (atomic_dec_and_test(&ap->refcnt)) sp_put() 152 complete(&ap->dead_cmp); sp_put() 161 struct syncppp *ap; ppp_sync_open() local 168 ap = kzalloc(sizeof(*ap), GFP_KERNEL); ppp_sync_open() 170 if (!ap) ppp_sync_open() 174 ap->tty = tty; ppp_sync_open() 175 ap->mru = PPP_MRU; ppp_sync_open() 176 spin_lock_init(&ap->xmit_lock); ppp_sync_open() 177 spin_lock_init(&ap->recv_lock); ppp_sync_open() 178 ap->xaccm[0] = ~0U; ppp_sync_open() 179 ap->xaccm[3] = 0x60000000U; ppp_sync_open() 180 ap->raccm = ~0U; ppp_sync_open() 182 skb_queue_head_init(&ap->rqueue); ppp_sync_open() 183 tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap); ppp_sync_open() 185 atomic_set(&ap->refcnt, 1); ppp_sync_open() 186 init_completion(&ap->dead_cmp); ppp_sync_open() 188 ap->chan.private = ap; ppp_sync_open() 189 ap->chan.ops = &sync_ops; ppp_sync_open() 190 ap->chan.mtu = PPP_MRU; ppp_sync_open() 191 ap->chan.hdrlen = 2; /* for A/C bytes */ ppp_sync_open() 193 ap->chan.speed = speed; ppp_sync_open() 194 err = ppp_register_channel(&ap->chan); ppp_sync_open() 198 tty->disc_data = ap; ppp_sync_open() 203 kfree(ap); ppp_sync_open() 219 struct syncppp *ap; ppp_sync_close() local 222 ap = tty->disc_data; ppp_sync_close() 225 if (!ap) ppp_sync_close() 229 * We have now ensured that nobody can start using ap from now ppp_sync_close() 235 if (!atomic_dec_and_test(&ap->refcnt)) ppp_sync_close() 236 wait_for_completion(&ap->dead_cmp); ppp_sync_close() 237 tasklet_kill(&ap->tsk); ppp_sync_close() 239 ppp_unregister_channel(&ap->chan); ppp_sync_close() 240 skb_queue_purge(&ap->rqueue); ppp_sync_close() 241 kfree_skb(ap->tpkt); ppp_sync_close() 242 kfree(ap); ppp_sync_close() 283 struct syncppp *ap = sp_get(tty); ppp_synctty_ioctl() local 287 if (!ap) ppp_synctty_ioctl() 293 if (put_user(ppp_channel_index(&ap->chan), p)) ppp_synctty_ioctl() 300 if (put_user(ppp_unit_number(&ap->chan), p)) ppp_synctty_ioctl() 308 ppp_sync_flush_output(ap); ppp_synctty_ioctl() 324 sp_put(ap); ppp_synctty_ioctl() 340 struct syncppp *ap = sp_get(tty); ppp_sync_receive() local 343 if (!ap) ppp_sync_receive() 345 spin_lock_irqsave(&ap->recv_lock, flags); ppp_sync_receive() 346 ppp_sync_input(ap, buf, cflags, count); ppp_sync_receive() 347 spin_unlock_irqrestore(&ap->recv_lock, flags); ppp_sync_receive() 348 if (!skb_queue_empty(&ap->rqueue)) ppp_sync_receive() 349 tasklet_schedule(&ap->tsk); ppp_sync_receive() 350 sp_put(ap); ppp_sync_receive() 357 struct syncppp *ap = sp_get(tty); ppp_sync_wakeup() local 360 if (!ap) ppp_sync_wakeup() 362 set_bit(XMIT_WAKEUP, &ap->xmit_flags); ppp_sync_wakeup() 363 tasklet_schedule(&ap->tsk); ppp_sync_wakeup() 364 sp_put(ap); ppp_sync_wakeup() 401 struct syncppp *ap = chan->private; ppp_sync_ioctl() local 410 val = ap->flags | ap->rbits; ppp_sync_ioctl() 418 ap->flags = val & ~SC_RCV_BITS; ppp_sync_ioctl() 419 spin_lock_irq(&ap->recv_lock); ppp_sync_ioctl() 420 ap->rbits = val & SC_RCV_BITS; ppp_sync_ioctl() 421 spin_unlock_irq(&ap->recv_lock); ppp_sync_ioctl() 426 if (put_user(ap->xaccm[0], p)) ppp_sync_ioctl() 431 if (get_user(ap->xaccm[0], p)) ppp_sync_ioctl() 437 if (put_user(ap->raccm, p)) ppp_sync_ioctl() 442 if (get_user(ap->raccm, p)) ppp_sync_ioctl() 448 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) ppp_sync_ioctl() 457 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); ppp_sync_ioctl() 462 if (put_user(ap->mru, (int __user *) argp)) ppp_sync_ioctl() 471 ap->mru = val; ppp_sync_ioctl() 488 struct syncppp *ap = (struct syncppp *) arg; ppp_sync_process() local 492 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { ppp_sync_process() 495 ppp_input_error(&ap->chan, 0); ppp_sync_process() 499 ppp_input(&ap->chan, skb); ppp_sync_process() 503 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap)) ppp_sync_process() 504 ppp_output_wakeup(&ap->chan); ppp_sync_process() 512 ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) ppp_sync_txmunge() argument 528 if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp) ppp_sync_txmunge() 532 if ((ap->flags & SC_COMP_AC) == 0 || islcp) { ppp_sync_txmunge() 550 ap->last_xmit = jiffies; ppp_sync_txmunge() 552 if (skb && ap->flags & SC_LOG_OUTPKT) ppp_sync_txmunge() 571 struct syncppp *ap = chan->private; ppp_sync_send() local 573 ppp_sync_push(ap); ppp_sync_send() 575 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) ppp_sync_send() 577 skb = ppp_sync_txmunge(ap, skb); ppp_sync_send() 579 ap->tpkt = skb; ppp_sync_send() 581 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_sync_send() 583 ppp_sync_push(ap); ppp_sync_send() 591 ppp_sync_push(struct syncppp *ap) ppp_sync_push() argument 594 struct tty_struct *tty = ap->tty; ppp_sync_push() 597 if (!spin_trylock_bh(&ap->xmit_lock)) ppp_sync_push() 600 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) ppp_sync_push() 602 if (!tty_stuffed && ap->tpkt) { ppp_sync_push() 604 sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len); ppp_sync_push() 607 if (sent < ap->tpkt->len) { ppp_sync_push() 610 consume_skb(ap->tpkt); ppp_sync_push() 611 ap->tpkt = NULL; ppp_sync_push() 612 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_sync_push() 618 spin_unlock_bh(&ap->xmit_lock); ppp_sync_push() 619 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) || ppp_sync_push() 620 (!tty_stuffed && ap->tpkt))) ppp_sync_push() 622 if (!spin_trylock_bh(&ap->xmit_lock)) ppp_sync_push() 628 if (ap->tpkt) { ppp_sync_push() 629 kfree_skb(ap->tpkt); ppp_sync_push() 630 ap->tpkt = NULL; ppp_sync_push() 631 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_sync_push() 634 spin_unlock_bh(&ap->xmit_lock); ppp_sync_push() 643 ppp_sync_flush_output(struct syncppp *ap) ppp_sync_flush_output() argument 647 spin_lock_bh(&ap->xmit_lock); ppp_sync_flush_output() 648 if (ap->tpkt != NULL) { ppp_sync_flush_output() 649 kfree_skb(ap->tpkt); ppp_sync_flush_output() 650 ap->tpkt = NULL; ppp_sync_flush_output() 651 clear_bit(XMIT_FULL, &ap->xmit_flags); ppp_sync_flush_output() 654 spin_unlock_bh(&ap->xmit_lock); ppp_sync_flush_output() 656 ppp_output_wakeup(&ap->chan); ppp_sync_flush_output() 670 ppp_sync_input(struct syncppp *ap, const unsigned char *buf, ppp_sync_input() argument 679 if (ap->flags & SC_LOG_INPKT) ppp_sync_input() 683 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); ppp_sync_input() 720 skb_queue_tail(&ap->rqueue, skb); ppp_sync_input() 727 skb_queue_tail(&ap->rqueue, skb); ppp_sync_input()
|
/linux-4.1.27/net/irda/irnet/ |
H A D | irnet_ppp.c | 45 irnet_ctrl_write(irnet_socket * ap, irnet_ctrl_write() argument 54 DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); irnet_ctrl_write() 112 memcpy(ap->rname, start + 5, length - 5); irnet_ctrl_write() 113 ap->rname[length - 5] = '\0'; irnet_ctrl_write() 116 ap->rname[0] = '\0'; irnet_ctrl_write() 117 DEBUG(CTRL_INFO, "Got rname = ``%s''\n", ap->rname); irnet_ctrl_write() 150 ap->rsaddr = addr; irnet_ctrl_write() 151 DEBUG(CTRL_INFO, "Got rsaddr = %08x\n", ap->rsaddr); irnet_ctrl_write() 156 ap->rdaddr = addr; irnet_ctrl_write() 157 DEBUG(CTRL_INFO, "Got rdaddr = %08x\n", ap->rdaddr); irnet_ctrl_write() 185 irnet_get_discovery_log(irnet_socket * ap) irnet_get_discovery_log() argument 190 ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask, irnet_get_discovery_log() 194 if(ap->discoveries == NULL) irnet_get_discovery_log() 195 ap->disco_number = -1; irnet_get_discovery_log() 198 ap->discoveries, ap->disco_number); irnet_get_discovery_log() 211 * State of the ap->disco_XXX variables : 217 irnet_read_discovery_log(irnet_socket *ap, char *event, int buf_size) irnet_read_discovery_log() argument 221 DENTER(CTRL_TRACE, "(ap=0x%p, event=0x%p)\n", irnet_read_discovery_log() 222 ap, event); irnet_read_discovery_log() 225 if(ap->disco_number == -1) irnet_read_discovery_log() 232 if(ap->discoveries == NULL) irnet_read_discovery_log() 233 irnet_get_discovery_log(ap); irnet_read_discovery_log() 236 if(ap->disco_index < ap->disco_number) irnet_read_discovery_log() 241 ap->discoveries[ap->disco_index].daddr, irnet_read_discovery_log() 242 ap->discoveries[ap->disco_index].info, irnet_read_discovery_log() 243 ap->discoveries[ap->disco_index].saddr, irnet_read_discovery_log() 244 ap->discoveries[ap->disco_index].hints[0], irnet_read_discovery_log() 245 ap->discoveries[ap->disco_index].hints[1]); irnet_read_discovery_log() 247 ap->disco_index, ap->discoveries[ap->disco_index].info); irnet_read_discovery_log() 252 ap->disco_index++; irnet_read_discovery_log() 256 if(ap->disco_index >= ap->disco_number) irnet_read_discovery_log() 260 ap->discoveries); irnet_read_discovery_log() 261 if(ap->discoveries != NULL) irnet_read_discovery_log() 264 kfree(ap->discoveries); irnet_read_discovery_log() 265 ap->discoveries = NULL; irnet_read_discovery_log() 267 ap->disco_number = -1; irnet_read_discovery_log() 279 irnet_ctrl_read(irnet_socket * ap, irnet_ctrl_read() argument 288 DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); irnet_ctrl_read() 292 if (irnet_read_discovery_log(ap, event, sizeof(event))) irnet_ctrl_read() 313 if(ap->event_index != irnet_events.index) irnet_ctrl_read() 336 switch(irnet_events.log[ap->event_index].event) irnet_ctrl_read() 341 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 342 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 343 irnet_events.log[ap->event_index].saddr, irnet_ctrl_read() 344 irnet_events.log[ap->event_index].hints.byte[0], irnet_ctrl_read() 345 irnet_events.log[ap->event_index].hints.byte[1]); irnet_ctrl_read() 350 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 351 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 352 irnet_events.log[ap->event_index].saddr, irnet_ctrl_read() 353 irnet_events.log[ap->event_index].hints.byte[0], irnet_ctrl_read() 354 irnet_events.log[ap->event_index].hints.byte[1]); irnet_ctrl_read() 358 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 359 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 360 irnet_events.log[ap->event_index].unit); irnet_ctrl_read() 364 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 365 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 366 irnet_events.log[ap->event_index].unit); irnet_ctrl_read() 370 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 371 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 372 irnet_events.log[ap->event_index].saddr); irnet_ctrl_read() 376 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 377 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 378 irnet_events.log[ap->event_index].unit); irnet_ctrl_read() 382 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 383 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 384 irnet_events.log[ap->event_index].unit); irnet_ctrl_read() 388 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 389 irnet_events.log[ap->event_index].name, irnet_ctrl_read() 390 irnet_events.log[ap->event_index].unit); irnet_ctrl_read() 394 irnet_events.log[ap->event_index].daddr, irnet_ctrl_read() 395 irnet_events.log[ap->event_index].name); irnet_ctrl_read() 401 ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS; irnet_ctrl_read() 422 irnet_ctrl_poll(irnet_socket * ap, irnet_ctrl_poll() argument 428 DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap); irnet_ctrl_poll() 433 if(ap->event_index != irnet_events.index) irnet_ctrl_poll() 436 if(ap->disco_number != -1) irnet_ctrl_poll() 439 if(ap->discoveries == NULL) irnet_ctrl_poll() 440 irnet_get_discovery_log(ap); irnet_ctrl_poll() 442 if(ap->disco_number != -1) irnet_ctrl_poll() 469 struct irnet_socket * ap; dev_irnet_open() local 481 ap = kzalloc(sizeof(*ap), GFP_KERNEL); dev_irnet_open() 482 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); dev_irnet_open() 485 ap->file = file; dev_irnet_open() 488 ap->ppp_open = 0; dev_irnet_open() 489 ap->chan.private = ap; dev_irnet_open() 490 ap->chan.ops = &irnet_ppp_ops; dev_irnet_open() 491 ap->chan.mtu = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); dev_irnet_open() 492 ap->chan.hdrlen = 2 + TTP_MAX_HEADER; /* for A/C + Max IrDA hdr */ dev_irnet_open() 494 ap->mru = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); dev_irnet_open() 495 ap->xaccm[0] = ~0U; dev_irnet_open() 496 ap->xaccm[3] = 0x60000000U; dev_irnet_open() 497 ap->raccm = ~0U; dev_irnet_open() 500 err = irda_irnet_create(ap); dev_irnet_open() 504 kfree(ap); dev_irnet_open() 510 ap->event_index = irnet_events.index; /* Cancel all past events */ dev_irnet_open() 512 mutex_init(&ap->lock); dev_irnet_open() 515 file->private_data = ap; dev_irnet_open() 517 DEXIT(FS_TRACE, " - ap=0x%p\n", ap); dev_irnet_open() 532 irnet_socket * ap = file->private_data; dev_irnet_close() local 534 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", dev_irnet_close() 535 file, ap); dev_irnet_close() 536 DABORT(ap == NULL, 0, FS_ERROR, "ap is NULL !!!\n"); dev_irnet_close() 542 irda_irnet_destroy(ap); dev_irnet_close() 545 if(ap->ppp_open) dev_irnet_close() 548 ap->ppp_open = 0; dev_irnet_close() 549 ppp_unregister_channel(&ap->chan); dev_irnet_close() 552 kfree(ap); dev_irnet_close() 569 irnet_socket * ap = file->private_data; dev_irnet_write() local 571 DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", dev_irnet_write() 572 file, ap, count); dev_irnet_write() 573 DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); dev_irnet_write() 576 if(ap->ppp_open) dev_irnet_write() 579 return irnet_ctrl_write(ap, buf, count); dev_irnet_write() 593 irnet_socket * ap = file->private_data; dev_irnet_read() local 595 DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", dev_irnet_read() 596 file, ap, count); dev_irnet_read() 597 DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); dev_irnet_read() 600 if(ap->ppp_open) dev_irnet_read() 603 return irnet_ctrl_read(ap, file, buf, count); dev_irnet_read() 614 irnet_socket * ap = file->private_data; dev_irnet_poll() local 617 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", dev_irnet_poll() 618 file, ap); dev_irnet_poll() 621 DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n"); dev_irnet_poll() 624 if(!ap->ppp_open) dev_irnet_poll() 625 mask |= irnet_ctrl_poll(ap, file, wait); dev_irnet_poll() 643 irnet_socket * ap = file->private_data; dev_irnet_ioctl() local 648 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p, cmd=0x%X)\n", dev_irnet_ioctl() 649 file, ap, cmd); dev_irnet_ioctl() 652 DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); dev_irnet_ioctl() 668 /* PPP channel setup (ap->chan in configured in dev_irnet_open())*/ dev_irnet_ioctl() 669 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 672 err = ppp_register_channel(&ap->chan); dev_irnet_ioctl() 676 ap->ppp_open = 1; dev_irnet_ioctl() 680 irda_irnet_connect(ap); dev_irnet_ioctl() 685 mutex_unlock(&ap->lock); dev_irnet_ioctl() 692 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 695 if(ap->ppp_open) dev_irnet_ioctl() 697 ap->ppp_open = 0; dev_irnet_ioctl() 698 ppp_unregister_channel(&ap->chan); dev_irnet_ioctl() 704 mutex_unlock(&ap->lock); dev_irnet_ioctl() 710 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 713 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), dev_irnet_ioctl() 717 mutex_unlock(&ap->lock); dev_irnet_ioctl() 720 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 723 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), dev_irnet_ioctl() 727 mutex_unlock(&ap->lock); dev_irnet_ioctl() 747 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 750 err = ppp_irnet_ioctl(&ap->chan, cmd, arg); dev_irnet_ioctl() 752 mutex_unlock(&ap->lock); dev_irnet_ioctl() 760 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 764 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) dev_irnet_ioctl() 767 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) dev_irnet_ioctl() 771 mutex_unlock(&ap->lock); dev_irnet_ioctl() 776 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 780 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) dev_irnet_ioctl() 783 if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) dev_irnet_ioctl() 787 mutex_unlock(&ap->lock); dev_irnet_ioctl() 810 if (mutex_lock_interruptible(&ap->lock)) dev_irnet_ioctl() 812 ppp_output_wakeup(&ap->chan); dev_irnet_ioctl() 813 mutex_unlock(&ap->lock); dev_irnet_ioctl() 849 irnet_prepare_skb(irnet_socket * ap, irnet_prepare_skb() argument 857 DENTER(PPP_TRACE, "(ap=0x%p, skb=0x%p)\n", irnet_prepare_skb() 858 ap, skb); irnet_prepare_skb() 870 if((data[0] == 0) && (ap->flags & SC_COMP_PROT) && (!islcp)) irnet_prepare_skb() 874 needaddr = 2*((ap->flags & SC_COMP_AC) == 0 || islcp); irnet_prepare_skb() 877 if((skb_headroom(skb) < (ap->max_header_size + needaddr)) || irnet_prepare_skb() 885 new_skb = skb_realloc_headroom(skb, ap->max_header_size + needaddr); irnet_prepare_skb() 925 DENTER(PPP_TRACE, "(channel=0x%p, ap/self=0x%p)\n", ppp_irnet_send() 1024 irnet_socket * ap = (struct irnet_socket *) chan->private; ppp_irnet_ioctl() local 1030 DENTER(PPP_TRACE, "(channel=0x%p, ap=0x%p, cmd=0x%X)\n", ppp_irnet_ioctl() 1031 chan, ap, cmd); ppp_irnet_ioctl() 1034 DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); ppp_irnet_ioctl() 1041 val = ap->flags | ap->rbits; ppp_irnet_ioctl() 1049 ap->flags = val & ~SC_RCV_BITS; ppp_irnet_ioctl() 1050 ap->rbits = val & SC_RCV_BITS; ppp_irnet_ioctl() 1056 if(put_user(ap->xaccm[0], (u32 __user *) argp)) ppp_irnet_ioctl() 1061 if(get_user(ap->xaccm[0], (u32 __user *) argp)) ppp_irnet_ioctl() 1066 if(put_user(ap->raccm, (u32 __user *) argp)) ppp_irnet_ioctl() 1071 if(get_user(ap->raccm, (u32 __user *) argp)) ppp_irnet_ioctl() 1076 if(copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) ppp_irnet_ioctl() 1085 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); ppp_irnet_ioctl() 1091 if(put_user(ap->mru, (int __user *) argp)) ppp_irnet_ioctl() 1100 ap->mru = val; ppp_irnet_ioctl()
|
/linux-4.1.27/drivers/net/ethernet/alteon/ |
H A D | acenic.c | 91 #define ACE_IS_TIGON_I(ap) 0 92 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES 94 #define ACE_IS_TIGON_I(ap) (ap->version == 1) 95 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries 460 struct ace_private *ap; acenic_probe_one() local 469 ap = netdev_priv(dev); acenic_probe_one() 470 ap->pdev = pdev; acenic_probe_one() 471 ap->name = pci_name(pdev); acenic_probe_one() 495 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); acenic_probe_one() 498 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { acenic_probe_one() 501 ap->name); acenic_probe_one() 502 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; acenic_probe_one() 503 pci_write_config_word(ap->pdev, PCI_COMMAND, acenic_probe_one() 504 ap->pci_command); acenic_probe_one() 508 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); acenic_probe_one() 509 if (ap->pci_latency <= 0x40) { acenic_probe_one() 510 ap->pci_latency = 0x40; acenic_probe_one() 511 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); acenic_probe_one() 520 ap->regs = ioremap(dev->base_addr, 0x4000); acenic_probe_one() 521 if (!ap->regs) { acenic_probe_one() 524 ap->name, boards_found); acenic_probe_one() 532 ap->name); acenic_probe_one() 535 ap->name); acenic_probe_one() 539 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); acenic_probe_one() 542 printk(KERN_INFO "%s: NetGear GA620 ", ap->name); acenic_probe_one() 547 ap->name); acenic_probe_one() 551 printk(KERN_INFO "%s: SGI AceNIC ", ap->name); acenic_probe_one() 554 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); acenic_probe_one() 562 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { acenic_probe_one() 574 ap->board_idx = BOARD_IDX_OVERFLOW; acenic_probe_one() 576 ap->board_idx = boards_found; acenic_probe_one() 578 ap->board_idx = BOARD_IDX_STATIC; acenic_probe_one() 588 ap->name = dev->name; acenic_probe_one() 590 if (ap->pci_using_dac) acenic_probe_one() 608 struct ace_private *ap = netdev_priv(dev); acenic_remove_one() local 609 struct ace_regs __iomem *regs = ap->regs; acenic_remove_one() 615 if (ap->version >= 2) acenic_remove_one() 636 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; acenic_remove_one() 642 ringp = &ap->skb->rx_std_skbuff[i]; acenic_remove_one() 644 pci_unmap_page(ap->pdev, mapping, acenic_remove_one() 648 ap->rx_std_ring[i].size = 0; acenic_remove_one() 649 ap->skb->rx_std_skbuff[i].skb = NULL; acenic_remove_one() 654 if (ap->version >= 2) { acenic_remove_one() 656 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; acenic_remove_one() 662 ringp = &ap->skb->rx_mini_skbuff[i]; acenic_remove_one() 664 pci_unmap_page(ap->pdev, mapping, acenic_remove_one() 668 ap->rx_mini_ring[i].size = 0; acenic_remove_one() 669 ap->skb->rx_mini_skbuff[i].skb = NULL; acenic_remove_one() 676 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; acenic_remove_one() 681 ringp = &ap->skb->rx_jumbo_skbuff[i]; acenic_remove_one() 683 pci_unmap_page(ap->pdev, mapping, acenic_remove_one() 687 ap->rx_jumbo_ring[i].size = 0; acenic_remove_one() 688 ap->skb->rx_jumbo_skbuff[i].skb = NULL; acenic_remove_one() 706 struct ace_private *ap = netdev_priv(dev); ace_free_descriptors() local 709 if (ap->rx_std_ring != NULL) { ace_free_descriptors() 715 pci_free_consistent(ap->pdev, size, ap->rx_std_ring, ace_free_descriptors() 716 ap->rx_ring_base_dma); ace_free_descriptors() 717 ap->rx_std_ring = NULL; ace_free_descriptors() 718 ap->rx_jumbo_ring = NULL; ace_free_descriptors() 719 ap->rx_mini_ring = NULL; ace_free_descriptors() 720 ap->rx_return_ring = NULL; ace_free_descriptors() 722 if (ap->evt_ring != NULL) { ace_free_descriptors() 724 pci_free_consistent(ap->pdev, size, ap->evt_ring, ace_free_descriptors() 725 ap->evt_ring_dma); ace_free_descriptors() 726 ap->evt_ring = NULL; ace_free_descriptors() 728 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { ace_free_descriptors() 730 pci_free_consistent(ap->pdev, size, ap->tx_ring, ace_free_descriptors() 731 ap->tx_ring_dma); ace_free_descriptors() 733 ap->tx_ring = NULL; ace_free_descriptors() 735 if (ap->evt_prd != NULL) { ace_free_descriptors() 736 pci_free_consistent(ap->pdev, sizeof(u32), ace_free_descriptors() 737 (void *)ap->evt_prd, ap->evt_prd_dma); ace_free_descriptors() 738 ap->evt_prd = NULL; ace_free_descriptors() 740 if (ap->rx_ret_prd != NULL) { ace_free_descriptors() 741 pci_free_consistent(ap->pdev, sizeof(u32), ace_free_descriptors() 742 (void *)ap->rx_ret_prd, ace_free_descriptors() 743 ap->rx_ret_prd_dma); ace_free_descriptors() 744 ap->rx_ret_prd = NULL; ace_free_descriptors() 746 if (ap->tx_csm != NULL) { ace_free_descriptors() 747 pci_free_consistent(ap->pdev, sizeof(u32), ace_free_descriptors() 748 (void *)ap->tx_csm, ap->tx_csm_dma); ace_free_descriptors() 749 ap->tx_csm = NULL; ace_free_descriptors() 756 struct ace_private *ap = netdev_priv(dev); ace_allocate_descriptors() local 765 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, ace_allocate_descriptors() 766 &ap->rx_ring_base_dma); ace_allocate_descriptors() 767 if (ap->rx_std_ring == NULL) ace_allocate_descriptors() 770 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; ace_allocate_descriptors() 771 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; ace_allocate_descriptors() 772 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; ace_allocate_descriptors() 776 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); ace_allocate_descriptors() 778 if (ap->evt_ring == NULL) ace_allocate_descriptors() 785 if (!ACE_IS_TIGON_I(ap)) { ace_allocate_descriptors() 788 ap->tx_ring = pci_alloc_consistent(ap->pdev, size, ace_allocate_descriptors() 789 &ap->tx_ring_dma); ace_allocate_descriptors() 791 if (ap->tx_ring == NULL) ace_allocate_descriptors() 795 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), ace_allocate_descriptors() 796 &ap->evt_prd_dma); ace_allocate_descriptors() 797 if (ap->evt_prd == NULL) ace_allocate_descriptors() 800 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), ace_allocate_descriptors() 801 &ap->rx_ret_prd_dma); ace_allocate_descriptors() 802 if (ap->rx_ret_prd == NULL) ace_allocate_descriptors() 805 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), ace_allocate_descriptors() 806 &ap->tx_csm_dma); ace_allocate_descriptors() 807 if (ap->tx_csm == NULL) ace_allocate_descriptors() 825 struct ace_private *ap; ace_init_cleanup() local 827 ap = netdev_priv(dev); ace_init_cleanup() 831 if (ap->info) ace_init_cleanup() 832 pci_free_consistent(ap->pdev, sizeof(struct ace_info), ace_init_cleanup() 833 ap->info, ap->info_dma); ace_init_cleanup() 834 kfree(ap->skb); ace_init_cleanup() 835 kfree(ap->trace_buf); ace_init_cleanup() 840 iounmap(ap->regs); ace_init_cleanup() 862 struct ace_private *ap; ace_init() local 873 ap = netdev_priv(dev); ace_init() 874 regs = ap->regs; ace_init() 876 board_idx = ap->board_idx; ace_init() 917 tig_ver, ap->firmware_major, ap->firmware_minor, ace_init() 918 ap->firmware_fix); ace_init() 920 ap->version = 1; ace_init() 921 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; ace_init() 926 tig_ver, ap->firmware_major, ap->firmware_minor, ace_init() 927 ap->firmware_fix); ace_init() 937 ap->version = 2; ace_init() 938 ap->tx_ring_entries = MAX_TX_RING_ENTRIES; ace_init() 1006 pdev = ap->pdev; ace_init() 1026 ap->pci_latency); ace_init() 1039 if (ap->version >= 2) { ace_init() 1046 if (ap->pci_command & PCI_COMMAND_INVALIDATE) { ace_init() 1047 ap->pci_command &= ~PCI_COMMAND_INVALIDATE; ace_init() 1049 ap->pci_command); ace_init() 1053 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { ace_init() 1074 ap->pci_command &= ~PCI_COMMAND_INVALIDATE; ace_init() 1076 ap->pci_command); ace_init() 1121 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { ace_init() 1123 ap->pci_command |= PCI_COMMAND_FAST_BACK; ace_init() 1124 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); ace_init() 1132 ap->pci_using_dac = 1; ace_init() 1134 ap->pci_using_dac = 0; ace_init() 1145 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), ace_init() 1146 &ap->info_dma))) { ace_init() 1150 ap->info = info; ace_init() 1155 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { ace_init() 1170 spin_lock_init(&ap->debug_lock); ace_init() 1171 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; ace_init() 1172 ap->last_std_rx = 0; ace_init() 1173 ap->last_mini_rx = 0; ace_init() 1176 memset(ap->info, 0, sizeof(struct ace_info)); ace_init() 1177 memset(ap->skb, 0, sizeof(struct ace_skb)); ace_init() 1183 ap->fw_running = 0; ace_init() 1185 tmp_ptr = ap->info_dma; ace_init() 1189 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); ace_init() 1191 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); ace_init() 1194 *(ap->evt_prd) = 0; ace_init() 1196 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); ace_init() 1209 tmp_ptr = ap->info_dma; ace_init() 1213 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); ace_init() 1218 memset(ap->rx_std_ring, 0, ace_init() 1222 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; ace_init() 1224 ap->rx_std_skbprd = 0; ace_init() 1225 atomic_set(&ap->cur_rx_bufs, 0); ace_init() 1228 (ap->rx_ring_base_dma + ace_init() 1234 memset(ap->rx_jumbo_ring, 0, ace_init() 1238 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; ace_init() 1240 ap->rx_jumbo_skbprd = 0; ace_init() 1241 atomic_set(&ap->cur_jumbo_bufs, 0); ace_init() 1243 memset(ap->rx_mini_ring, 0, ace_init() 1246 if (ap->version >= 2) { ace_init() 1248 (ap->rx_ring_base_dma + ace_init() 1257 ap->rx_mini_ring[i].flags = ace_init() 1265 ap->rx_mini_skbprd = 0; ace_init() 1266 atomic_set(&ap->cur_mini_bufs, 0); ace_init() 1269 (ap->rx_ring_base_dma + ace_init() 1277 memset(ap->rx_return_ring, 0, ace_init() 1280 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); ace_init() 1281 *(ap->rx_ret_prd) = 0; ace_init() 1285 if (ACE_IS_TIGON_I(ap)) { ace_init() 1286 ap->tx_ring = (__force struct tx_desc *) regs->Window; ace_init() 1289 writel(0, (__force void __iomem *)ap->tx_ring + i * 4); ace_init() 1293 memset(ap->tx_ring, 0, ace_init() 1296 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); ace_init() 1299 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); ace_init() 1305 if (!ACE_IS_TIGON_I(ap)) ace_init() 1312 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); ace_init() 1343 ap->name, ACE_MAX_MOD_PARMS); ace_init() 1369 if(ap->version >= 2) ace_init() 1382 ap->name); ace_init() 1395 "forcing auto negotiation\n", ap->name); ace_init() 1403 "negotiation\n", ap->name); ace_init() 1406 if ((option & 0x400) && (ap->version >= 2)) { ace_init() 1408 ap->name); ace_init() 1413 ap->link = tmp; ace_init() 1415 if (ap->version >= 2) ace_init() 1418 writel(ap->firmware_start, ®s->Pc); ace_init() 1428 ap->cur_rx = 0; ace_init() 1429 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; ace_init() 1432 ace_set_txprd(regs, ap, 0); ace_init() 1453 while (time_before(jiffies, myjif) && !ap->fw_running) ace_init() 1456 if (!ap->fw_running) { ace_init() 1457 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); ace_init() 1459 ace_dump_trace(ap); ace_init() 1472 if (ap->version >= 2) ace_init() 1486 if (!test_and_set_bit(0, &ap->std_refill_busy)) ace_init() 1490 ap->name); ace_init() 1491 if (ap->version >= 2) { ace_init() 1492 if (!test_and_set_bit(0, &ap->mini_refill_busy)) ace_init() 1496 "the RX mini ring\n", ap->name); ace_init() 1508 struct ace_private *ap = netdev_priv(dev); ace_set_rxtx_parms() local 1509 struct ace_regs __iomem *regs = ap->regs; ace_set_rxtx_parms() 1510 int board_idx = ap->board_idx; ace_set_rxtx_parms() 1547 struct ace_private *ap = netdev_priv(dev); ace_watchdog() local 1548 struct ace_regs __iomem *regs = ap->regs; ace_watchdog() 1555 if (*ap->tx_csm != ap->tx_ret_csm) { ace_watchdog() 1572 struct ace_private *ap = netdev_priv(dev); ace_tasklet() local 1575 cur_size = atomic_read(&ap->cur_rx_bufs); ace_tasklet() 1577 !test_and_set_bit(0, &ap->std_refill_busy)) { ace_tasklet() 1584 if (ap->version >= 2) { ace_tasklet() 1585 cur_size = atomic_read(&ap->cur_mini_bufs); ace_tasklet() 1587 !test_and_set_bit(0, &ap->mini_refill_busy)) { ace_tasklet() 1596 cur_size = atomic_read(&ap->cur_jumbo_bufs); ace_tasklet() 1597 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && ace_tasklet() 1598 !test_and_set_bit(0, &ap->jumbo_refill_busy)) { ace_tasklet() 1604 ap->tasklet_pending = 0; ace_tasklet() 1611 static void ace_dump_trace(struct ace_private *ap) ace_dump_trace() argument 1614 if (!ap->trace_buf) ace_dump_trace() 1615 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) ace_dump_trace() 1630 struct ace_private *ap = netdev_priv(dev); ace_load_std_rx_ring() local 1631 struct ace_regs __iomem *regs = ap->regs; ace_load_std_rx_ring() 1635 prefetchw(&ap->cur_rx_bufs); ace_load_std_rx_ring() 1637 idx = ap->rx_std_skbprd; ace_load_std_rx_ring() 1648 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), ace_load_std_rx_ring() 1652 ap->skb->rx_std_skbuff[idx].skb = skb; ace_load_std_rx_ring() 1653 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], ace_load_std_rx_ring() 1656 rd = &ap->rx_std_ring[idx]; ace_load_std_rx_ring() 1666 atomic_add(i, &ap->cur_rx_bufs); ace_load_std_rx_ring() 1667 ap->rx_std_skbprd = idx; ace_load_std_rx_ring() 1669 if (ACE_IS_TIGON_I(ap)) { ace_load_std_rx_ring() 1673 cmd.idx = ap->rx_std_skbprd; ace_load_std_rx_ring() 1681 clear_bit(0, &ap->std_refill_busy); ace_load_std_rx_ring() 1693 struct ace_private *ap = netdev_priv(dev); ace_load_mini_rx_ring() local 1694 struct ace_regs __iomem *regs = ap->regs; ace_load_mini_rx_ring() 1697 prefetchw(&ap->cur_mini_bufs); ace_load_mini_rx_ring() 1699 idx = ap->rx_mini_skbprd; ace_load_mini_rx_ring() 1709 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), ace_load_mini_rx_ring() 1713 ap->skb->rx_mini_skbuff[idx].skb = skb; ace_load_mini_rx_ring() 1714 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], ace_load_mini_rx_ring() 1717 rd = &ap->rx_mini_ring[idx]; ace_load_mini_rx_ring() 1727 atomic_add(i, &ap->cur_mini_bufs); ace_load_mini_rx_ring() 1729 ap->rx_mini_skbprd = idx; ace_load_mini_rx_ring() 1735 clear_bit(0, &ap->mini_refill_busy); ace_load_mini_rx_ring() 1750 struct ace_private *ap = netdev_priv(dev); ace_load_jumbo_rx_ring() local 1751 struct ace_regs __iomem *regs = ap->regs; ace_load_jumbo_rx_ring() 1754 idx = ap->rx_jumbo_skbprd; ace_load_jumbo_rx_ring() 1765 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), ace_load_jumbo_rx_ring() 1769 ap->skb->rx_jumbo_skbuff[idx].skb = skb; ace_load_jumbo_rx_ring() 1770 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], ace_load_jumbo_rx_ring() 1773 rd = &ap->rx_jumbo_ring[idx]; ace_load_jumbo_rx_ring() 1783 atomic_add(i, &ap->cur_jumbo_bufs); ace_load_jumbo_rx_ring() 1784 ap->rx_jumbo_skbprd = idx; ace_load_jumbo_rx_ring() 1786 if (ACE_IS_TIGON_I(ap)) { ace_load_jumbo_rx_ring() 1790 cmd.idx = ap->rx_jumbo_skbprd; ace_load_jumbo_rx_ring() 1798 clear_bit(0, &ap->jumbo_refill_busy); ace_load_jumbo_rx_ring() 1815 struct ace_private *ap; ace_handle_event() local 1817 ap = netdev_priv(dev); ace_handle_event() 1820 switch (ap->evt_ring[evtcsm].evt) { ace_handle_event() 1823 ap->name); ace_handle_event() 1824 ap->fw_running = 1; ace_handle_event() 1831 u16 code = ap->evt_ring[evtcsm].code; ace_handle_event() 1835 u32 state = readl(&ap->regs->GigLnkState); ace_handle_event() 1838 ap->name, ace_handle_event() 1846 ap->name); ace_handle_event() 1850 "UP\n", ap->name); ace_handle_event() 1854 "state %02x\n", ap->name, code); ace_handle_event() 1859 switch(ap->evt_ring[evtcsm].code) { ace_handle_event() 1862 ap->name); ace_handle_event() 1866 "error\n", ap->name); ace_handle_event() 1870 ap->name); ace_handle_event() 1874 ap->name, ap->evt_ring[evtcsm].code); ace_handle_event() 1881 if (ap->skb->rx_jumbo_skbuff[i].skb) { ace_handle_event() 1882 ap->rx_jumbo_ring[i].size = 0; ace_handle_event() 1883 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); ace_handle_event() 1884 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); ace_handle_event() 1885 ap->skb->rx_jumbo_skbuff[i].skb = NULL; ace_handle_event() 1889 if (ACE_IS_TIGON_I(ap)) { ace_handle_event() 1894 ace_issue_cmd(ap->regs, &cmd); ace_handle_event() 1896 writel(0, &((ap->regs)->RxJumboPrd)); ace_handle_event() 1900 ap->jumbo = 0; ace_handle_event() 1901 ap->rx_jumbo_skbprd = 0; ace_handle_event() 1903 ap->name); ace_handle_event() 1904 clear_bit(0, &ap->jumbo_refill_busy); ace_handle_event() 1909 ap->name, ap->evt_ring[evtcsm].evt); ace_handle_event() 1920 struct ace_private *ap = netdev_priv(dev); ace_rx_int() local 1926 prefetchw(&ap->cur_rx_bufs); ace_rx_int() 1927 prefetchw(&ap->cur_mini_bufs); ace_rx_int() 1942 retdesc = &ap->rx_return_ring[idx]; ace_rx_int() 1956 rip = &ap->skb->rx_std_skbuff[skbidx]; ace_rx_int() 1958 rxdesc = &ap->rx_std_ring[skbidx]; ace_rx_int() 1962 rip = &ap->skb->rx_jumbo_skbuff[skbidx]; ace_rx_int() 1964 rxdesc = &ap->rx_jumbo_ring[skbidx]; ace_rx_int() 1965 atomic_dec(&ap->cur_jumbo_bufs); ace_rx_int() 1968 rip = &ap->skb->rx_mini_skbuff[skbidx]; ace_rx_int() 1970 rxdesc = &ap->rx_mini_ring[skbidx]; ace_rx_int() 1982 pci_unmap_page(ap->pdev, ace_rx_int() 2017 atomic_sub(std_count, &ap->cur_rx_bufs); ace_rx_int() 2018 if (!ACE_IS_TIGON_I(ap)) ace_rx_int() 2019 atomic_sub(mini_count, &ap->cur_mini_bufs); ace_rx_int() 2026 if (ACE_IS_TIGON_I(ap)) { ace_rx_int() 2027 writel(idx, &ap->regs->RxRetCsm); ace_rx_int() 2029 ap->cur_rx = idx; ace_rx_int() 2041 struct ace_private *ap = netdev_priv(dev); ace_tx_int() local 2047 info = ap->skb->tx_skbuff + idx; ace_tx_int() 2051 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), ace_tx_int() 2064 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); ace_tx_int() 2071 ap->tx_ret_csm = txcsm; ace_tx_int() 2106 struct ace_private *ap = netdev_priv(dev); ace_interrupt() local 2107 struct ace_regs __iomem *regs = ap->regs; ace_interrupt() 2122 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before ace_interrupt() 2138 rxretprd = *ap->rx_ret_prd; ace_interrupt() 2139 rxretcsm = ap->cur_rx; ace_interrupt() 2144 txcsm = *ap->tx_csm; ace_interrupt() 2145 idx = ap->tx_ret_csm; ace_interrupt() 2155 if (!tx_ring_full(ap, txcsm, ap->tx_prd)) ace_interrupt() 2160 evtprd = *ap->evt_prd; ace_interrupt() 2175 cur_size = atomic_read(&ap->cur_rx_bufs); ace_interrupt() 2178 !test_and_set_bit(0, &ap->std_refill_busy)) { ace_interrupt() 2188 if (!ACE_IS_TIGON_I(ap)) { ace_interrupt() 2189 cur_size = atomic_read(&ap->cur_mini_bufs); ace_interrupt() 2193 &ap->mini_refill_busy)) { ace_interrupt() 2205 if (ap->jumbo) { ace_interrupt() 2206 cur_size = atomic_read(&ap->cur_jumbo_bufs); ace_interrupt() 2210 &ap->jumbo_refill_busy)){ ace_interrupt() 2221 if (run_tasklet && !ap->tasklet_pending) { ace_interrupt() 2222 ap->tasklet_pending = 1; ace_interrupt() 2223 tasklet_schedule(&ap->ace_tasklet); ace_interrupt() 2232 struct ace_private *ap = netdev_priv(dev); ace_open() local 2233 struct ace_regs __iomem *regs = ap->regs; ace_open() 2236 if (!(ap->fw_running)) { ace_open() 2253 if (ap->jumbo && ace_open() 2254 !test_and_set_bit(0, &ap->jumbo_refill_busy)) ace_open() 2263 ap->promisc = 1; ace_open() 2265 ap->promisc = 0; ace_open() 2266 ap->mcast_all = 0; ace_open() 2280 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); ace_open() 2287 struct ace_private *ap = netdev_priv(dev); ace_close() local 2288 struct ace_regs __iomem *regs = ap->regs; ace_close() 2301 if (ap->promisc) { ace_close() 2306 ap->promisc = 0; ace_close() 2314 tasklet_kill(&ap->ace_tasklet); ace_close() 2324 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { ace_close() 2328 info = ap->skb->tx_skbuff + i; ace_close() 2332 if (ACE_IS_TIGON_I(ap)) { ace_close() 2335 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; ace_close() 2340 memset(ap->tx_ring + i, 0, ace_close() 2342 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), ace_close() 2353 if (ap->jumbo) { ace_close() 2368 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, ace_map_tx_skb() argument 2374 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), ace_map_tx_skb() 2378 info = ap->skb->tx_skbuff + idx; ace_map_tx_skb() 2387 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, ace_load_tx_bd() argument 2394 if (ACE_IS_TIGON_I(ap)) { ace_load_tx_bd() 2412 struct ace_private *ap = netdev_priv(dev); ace_start_xmit() local 2413 struct ace_regs __iomem *regs = ap->regs; ace_start_xmit() 2419 idx = ap->tx_prd; ace_start_xmit() 2421 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) ace_start_xmit() 2428 mapping = ace_map_tx_skb(ap, skb, skb, idx); ace_start_xmit() 2436 desc = ap->tx_ring + idx; ace_start_xmit() 2437 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); ace_start_xmit() 2440 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) ace_start_xmit() 2443 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); ace_start_xmit() 2449 mapping = ace_map_tx_skb(ap, skb, NULL, idx); ace_start_xmit() 2458 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); ace_start_xmit() 2460 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); ace_start_xmit() 2467 info = ap->skb->tx_skbuff + idx; ace_start_xmit() 2468 desc = ap->tx_ring + idx; ace_start_xmit() 2470 mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0, ace_start_xmit() 2477 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); ace_start_xmit() 2481 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) ace_start_xmit() 2494 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); ace_start_xmit() 2499 ap->tx_prd = idx; ace_start_xmit() 2500 ace_set_txprd(regs, ap, idx); ace_start_xmit() 2511 if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) ace_start_xmit() 2548 struct ace_private *ap = netdev_priv(dev); ace_change_mtu() local 2549 struct ace_regs __iomem *regs = ap->regs; ace_change_mtu() 2558 if (!(ap->jumbo)) { ace_change_mtu() 2561 ap->jumbo = 1; ace_change_mtu() 2562 if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) ace_change_mtu() 2567 while (test_and_set_bit(0, &ap->jumbo_refill_busy)); ace_change_mtu() 2570 if (ap->jumbo) { ace_change_mtu() 2585 struct ace_private *ap = netdev_priv(dev); ace_get_settings() local 2586 struct ace_regs __iomem *regs = ap->regs; ace_get_settings() 2638 struct ace_private *ap = netdev_priv(dev); ace_set_settings() local 2639 struct ace_regs __iomem *regs = ap->regs; ace_set_settings() 2657 if (!ACE_IS_TIGON_I(ap)) ace_set_settings() 2679 if (link != ap->link) { ace_set_settings() 2684 ap->link = link; ace_set_settings() 2686 if (!ACE_IS_TIGON_I(ap)) ace_set_settings() 2701 struct ace_private *ap = netdev_priv(dev); ace_get_drvinfo() local 2705 ap->firmware_major, ap->firmware_minor, ace_get_drvinfo() 2706 ap->firmware_fix); ace_get_drvinfo() 2708 if (ap->pdev) ace_get_drvinfo() 2709 strlcpy(info->bus_info, pci_name(ap->pdev), ace_get_drvinfo() 2719 struct ace_private *ap = netdev_priv(dev); ace_set_mac_addr() local 2720 struct ace_regs __iomem *regs = ap->regs; ace_set_mac_addr() 2747 struct ace_private *ap = netdev_priv(dev); ace_set_multicast_list() local 2748 struct ace_regs __iomem *regs = ap->regs; ace_set_multicast_list() 2751 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { ace_set_multicast_list() 2756 ap->mcast_all = 1; ace_set_multicast_list() 2757 } else if (ap->mcast_all) { ace_set_multicast_list() 2762 ap->mcast_all = 0; ace_set_multicast_list() 2765 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { ace_set_multicast_list() 2770 ap->promisc = 1; ace_set_multicast_list() 2771 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { ace_set_multicast_list() 2776 ap->promisc = 0; ace_set_multicast_list() 2785 if (!netdev_mc_empty(dev) && !ap->mcast_all) { ace_set_multicast_list() 2790 }else if (!ap->mcast_all) { ace_set_multicast_list() 2801 struct ace_private *ap = netdev_priv(dev); ace_get_stats() local 2803 (struct ace_mac_stats __iomem *)ap->regs->Stats; ace_get_stats() 2875 struct ace_private *ap = netdev_priv(dev); ace_load_firmware() local 2876 struct ace_regs __iomem *regs = ap->regs; ace_load_firmware() 2883 "CPU is running!\n", ap->name); ace_load_firmware() 2887 if (ACE_IS_TIGON_I(ap)) ace_load_firmware() 2890 ret = request_firmware(&fw, fw_name, &ap->pdev->dev); ace_load_firmware() 2893 ap->name, fw_name); ace_load_firmware() 2904 ap->firmware_major = fw->data[0]; ace_load_firmware() 2905 ap->firmware_minor = fw->data[1]; ace_load_firmware() 2906 ap->firmware_fix = fw->data[2]; ace_load_firmware() 2908 ap->firmware_start = be32_to_cpu(fw_data[1]); ace_load_firmware() 2909 if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) { ace_load_firmware() 2911 ap->name, ap->firmware_start, fw_name); ace_load_firmware() 2919 ap->name, load_addr, fw_name); ace_load_firmware() 3082 struct ace_private *ap = netdev_priv(dev); read_eeprom_byte() local 3083 struct ace_regs __iomem *regs = ap->regs; read_eeprom_byte() 3100 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); read_eeprom_byte() 3109 ap->name); read_eeprom_byte() 3118 ap->name); read_eeprom_byte() 3128 ap->name); read_eeprom_byte() 3185 ap->name, offset); read_eeprom_byte()
|
H A D | acenic.h | 703 static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd) tx_space() argument 705 return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1); tx_space() 708 #define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap) 709 #define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED) 721 struct ace_private *ap, u32 value) ace_set_txprd() 725 spin_lock_irqsave(&ap->debug_lock, flags); ace_set_txprd() 727 if (value == ap->last_tx) ace_set_txprd() 730 ap->last_tx = value; ace_set_txprd() 731 spin_unlock_irqrestore(&ap->debug_lock, flags); ace_set_txprd() 741 struct ace_private *ap = netdev_priv(dev); ace_mask_irq() local 742 struct ace_regs __iomem *regs = ap->regs; ace_mask_irq() 744 if (ACE_IS_TIGON_I(ap)) ace_mask_irq() 755 struct ace_private *ap = netdev_priv(dev); ace_unmask_irq() local 756 struct ace_regs __iomem *regs = ap->regs; ace_unmask_irq() 758 if (ACE_IS_TIGON_I(ap)) ace_unmask_irq() 779 static void ace_dump_trace(struct ace_private *ap); 720 ace_set_txprd(struct ace_regs __iomem *regs, struct ace_private *ap, u32 value) ace_set_txprd() argument
|
/linux-4.1.27/drivers/ata/ |
H A D | libata-sff.c | 75 * @ap: port where the device is 84 u8 ata_sff_check_status(struct ata_port *ap) ata_sff_check_status() argument 86 return ioread8(ap->ioaddr.status_addr); ata_sff_check_status() 92 * @ap: port where the device is 103 static u8 ata_sff_altstatus(struct ata_port *ap) ata_sff_altstatus() argument 105 if (ap->ops->sff_check_altstatus) ata_sff_altstatus() 106 return ap->ops->sff_check_altstatus(ap); ata_sff_altstatus() 108 return ioread8(ap->ioaddr.altstatus_addr); ata_sff_altstatus() 113 * @ap: port where the device is 123 static u8 ata_sff_irq_status(struct ata_port *ap) ata_sff_irq_status() argument 127 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { ata_sff_irq_status() 128 status = ata_sff_altstatus(ap); ata_sff_irq_status() 134 status = ap->ops->sff_check_status(ap); ata_sff_irq_status() 140 * @ap: Port to wait for. 150 static void ata_sff_sync(struct ata_port *ap) ata_sff_sync() argument 152 if (ap->ops->sff_check_altstatus) ata_sff_sync() 153 ap->ops->sff_check_altstatus(ap); ata_sff_sync() 154 else if (ap->ioaddr.altstatus_addr) ata_sff_sync() 155 ioread8(ap->ioaddr.altstatus_addr); ata_sff_sync() 160 * @ap: Port to pause for. 170 void ata_sff_pause(struct ata_port *ap) ata_sff_pause() argument 172 ata_sff_sync(ap); ata_sff_pause() 179 * @ap: Port to pause for. 185 void ata_sff_dma_pause(struct ata_port *ap) ata_sff_dma_pause() argument 187 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { ata_sff_dma_pause() 190 ata_sff_altstatus(ap); ata_sff_dma_pause() 202 * @ap: port containing status register to be polled 215 int ata_sff_busy_sleep(struct ata_port *ap, ata_sff_busy_sleep() argument 221 status = ata_sff_busy_wait(ap, ATA_BUSY, 300); ata_sff_busy_sleep() 226 ata_msleep(ap, 50); ata_sff_busy_sleep() 227 status = ata_sff_busy_wait(ap, ATA_BUSY, 3); ata_sff_busy_sleep() 231 ata_port_warn(ap, ata_sff_busy_sleep() 238 ata_msleep(ap, 50); ata_sff_busy_sleep() 239 status = ap->ops->sff_check_status(ap); ata_sff_busy_sleep() 246 ata_port_err(ap, ata_sff_busy_sleep() 258 u8 status = link->ap->ops->sff_check_status(link->ap); ata_sff_check_ready() 285 * @ap: port where the device is 296 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) ata_sff_set_devctl() argument 298 if (ap->ops->sff_set_devctl) ata_sff_set_devctl() 299 ap->ops->sff_set_devctl(ap, ctl); ata_sff_set_devctl() 301 iowrite8(ctl, ap->ioaddr.ctl_addr); ata_sff_set_devctl() 306 * @ap: ATA channel to manipulate 318 void ata_sff_dev_select(struct ata_port *ap, unsigned int device) ata_sff_dev_select() argument 327 iowrite8(tmp, ap->ioaddr.device_addr); ata_sff_dev_select() 328 ata_sff_pause(ap); /* needed; also flushes, for mmio */ ata_sff_dev_select() 334 * @ap: ATA channel to manipulate 350 static void ata_dev_select(struct ata_port *ap, unsigned int device, ata_dev_select() argument 353 if (ata_msg_probe(ap)) ata_dev_select() 354 ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n", ata_dev_select() 358 ata_wait_idle(ap); ata_dev_select() 360 ap->ops->sff_dev_select(ap, device); ata_dev_select() 363 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) ata_dev_select() 364 ata_msleep(ap, 150); ata_dev_select() 365 ata_wait_idle(ap); ata_dev_select() 371 * @ap: Port on which interrupts are enabled. 382 void ata_sff_irq_on(struct ata_port *ap) ata_sff_irq_on() argument 384 struct ata_ioports *ioaddr = &ap->ioaddr; ata_sff_irq_on() 386 if (ap->ops->sff_irq_on) { ata_sff_irq_on() 387 ap->ops->sff_irq_on(ap); ata_sff_irq_on() 391 ap->ctl &= ~ATA_NIEN; ata_sff_irq_on() 392 ap->last_ctl = ap->ctl; ata_sff_irq_on() 394 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) ata_sff_irq_on() 395 ata_sff_set_devctl(ap, ap->ctl); ata_sff_irq_on() 396 ata_wait_idle(ap); ata_sff_irq_on() 398 if (ap->ops->sff_irq_clear) ata_sff_irq_on() 399 ap->ops->sff_irq_clear(ap); ata_sff_irq_on() 405 * @ap: Port to which output is sent 413 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) ata_sff_tf_load() argument 415 struct ata_ioports *ioaddr = &ap->ioaddr; ata_sff_tf_load() 418 if (tf->ctl != ap->last_ctl) { ata_sff_tf_load() 421 ap->last_ctl = tf->ctl; ata_sff_tf_load() 422 ata_wait_idle(ap); ata_sff_tf_load() 459 ata_wait_idle(ap); ata_sff_tf_load() 465 * @ap: Port from which input is read 476 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) ata_sff_tf_read() argument 478 struct ata_ioports *ioaddr = &ap->ioaddr; ata_sff_tf_read() 480 tf->command = ata_sff_check_status(ap); ata_sff_tf_read() 497 ap->last_ctl = tf->ctl; ata_sff_tf_read() 506 * @ap: port to which command is being issued 515 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) ata_sff_exec_command() argument 517 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); ata_sff_exec_command() 519 iowrite8(tf->command, ap->ioaddr.command_addr); ata_sff_exec_command() 520 ata_sff_pause(ap); ata_sff_exec_command() 526 * @ap: port to which command is being issued 536 static inline void ata_tf_to_host(struct ata_port *ap, ata_tf_to_host() argument 539 ap->ops->sff_tf_load(ap, tf); ata_tf_to_host() 540 ap->ops->sff_exec_command(ap, tf); ata_tf_to_host() 561 struct ata_port *ap = dev->link->ap; ata_sff_data_xfer() local 562 void __iomem *data_addr = ap->ioaddr.data_addr; ata_sff_data_xfer() 616 struct ata_port *ap = dev->link->ap; ata_sff_data_xfer32() local 617 void __iomem *data_addr = ap->ioaddr.data_addr; ata_sff_data_xfer32() 621 if (!(ap->pflags & ATA_PFLAG_PIO32)) ata_sff_data_xfer32() 701 struct ata_port *ap = qc->ap; ata_pio_sector() local 707 ap->hsm_task_state = HSM_ST_LAST; ata_pio_sector() 726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, ata_pio_sector() 733 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, ata_pio_sector() 774 ata_sff_sync(qc->ap); /* flush */ ata_pio_sectors() 779 * @ap: Port to which ATAPI device is attached. 788 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) atapi_send_cdb() argument 794 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); atapi_send_cdb() 795 ata_sff_sync(ap); atapi_send_cdb() 800 ap->hsm_task_state = HSM_ST; atapi_send_cdb() 803 ap->hsm_task_state = HSM_ST_LAST; atapi_send_cdb() 807 ap->hsm_task_state = HSM_ST_LAST; atapi_send_cdb() 809 ap->ops->bmdma_start(qc); atapi_send_cdb() 831 struct ata_port *ap = qc->ap; __atapi_pio_bytes() local 871 consumed = ap->ops->sff_data_xfer(dev, buf + offset, __atapi_pio_bytes() 878 consumed = ap->ops->sff_data_xfer(dev, buf + offset, __atapi_pio_bytes() 913 struct ata_port *ap = qc->ap; atapi_pio_bytes() local 925 ap->ops->sff_tf_read(ap, &qc->result_tf); atapi_pio_bytes() 943 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); atapi_pio_bytes() 947 ata_sff_sync(ap); /* flush */ atapi_pio_bytes() 956 ap->hsm_task_state = HSM_ST_ERR; atapi_pio_bytes() 961 * @ap: the target ata_port 967 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, ata_hsm_ok_in_wq() argument 973 if (ap->hsm_task_state == HSM_ST_FIRST) { ata_hsm_ok_in_wq() 999 struct ata_port *ap = qc->ap; ata_hsm_qc_complete() local 1001 if (ap->ops->error_handler) { ata_hsm_qc_complete() 1006 qc = ata_qc_from_tag(ap, qc->tag); ata_hsm_qc_complete() 1009 ata_sff_irq_on(ap); ata_hsm_qc_complete() 1012 ata_port_freeze(ap); ata_hsm_qc_complete() 1018 ata_port_freeze(ap); ata_hsm_qc_complete() 1022 ata_sff_irq_on(ap); ata_hsm_qc_complete() 1031 * @ap: the target ata_port 1039 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, ata_sff_hsm_move() argument 1046 lockdep_assert_held(ap->lock); ata_sff_hsm_move() 1054 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); ata_sff_hsm_move() 1058 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); ata_sff_hsm_move() 1060 switch (ap->hsm_task_state) { ata_sff_hsm_move() 1083 ap->hsm_task_state = HSM_ST_ERR; ata_sff_hsm_move() 1104 ap->hsm_task_state = HSM_ST_ERR; ata_sff_hsm_move() 1118 ap->hsm_task_state = HSM_ST; ata_sff_hsm_move() 1122 atapi_send_cdb(ap, qc); ata_sff_hsm_move() 1137 ap->hsm_task_state = HSM_ST_LAST; ata_sff_hsm_move() 1152 ap->hsm_task_state = HSM_ST_ERR; ata_sff_hsm_move() 1158 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) ata_sff_hsm_move() 1190 ap->hsm_task_state = HSM_ST_ERR; ata_sff_hsm_move() 1210 status = ata_wait_idle(ap); ata_sff_hsm_move() 1234 ap->hsm_task_state = HSM_ST_ERR; ata_sff_hsm_move() 1240 if (ap->hsm_task_state == HSM_ST_LAST && ata_sff_hsm_move() 1243 status = ata_wait_idle(ap); ata_sff_hsm_move() 1254 ap->hsm_task_state = HSM_ST_ERR; ata_sff_hsm_move() 1260 ap->print_id, qc->dev->devno, status); ata_sff_hsm_move() 1264 ap->hsm_task_state = HSM_ST_IDLE; ata_sff_hsm_move() 1273 ap->hsm_task_state = HSM_ST_IDLE; ata_sff_hsm_move() 1303 struct ata_port *ap = link->ap; ata_sff_queue_pio_task() local 1305 WARN_ON((ap->sff_pio_task_link != NULL) && ata_sff_queue_pio_task() 1306 (ap->sff_pio_task_link != link)); ata_sff_queue_pio_task() 1307 ap->sff_pio_task_link = link; ata_sff_queue_pio_task() 1310 ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay)); ata_sff_queue_pio_task() 1314 void ata_sff_flush_pio_task(struct ata_port *ap) ata_sff_flush_pio_task() argument 1318 cancel_delayed_work_sync(&ap->sff_pio_task); ata_sff_flush_pio_task() 1328 spin_lock_irq(ap->lock); ata_sff_flush_pio_task() 1329 ap->hsm_task_state = HSM_ST_IDLE; ata_sff_flush_pio_task() 1330 spin_unlock_irq(ap->lock); ata_sff_flush_pio_task() 1332 ap->sff_pio_task_link = NULL; ata_sff_flush_pio_task() 1334 if (ata_msg_ctl(ap)) ata_sff_flush_pio_task() 1335 ata_port_dbg(ap, "%s: EXIT\n", __func__); ata_sff_flush_pio_task() 1340 struct ata_port *ap = ata_sff_pio_task() local 1342 struct ata_link *link = ap->sff_pio_task_link; ata_sff_pio_task() 1347 spin_lock_irq(ap->lock); ata_sff_pio_task() 1349 BUG_ON(ap->sff_pio_task_link == NULL); ata_sff_pio_task() 1351 qc = ata_qc_from_tag(ap, link->active_tag); ata_sff_pio_task() 1353 ap->sff_pio_task_link = NULL; ata_sff_pio_task() 1358 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); ata_sff_pio_task() 1367 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); ata_sff_pio_task() 1369 spin_unlock_irq(ap->lock); ata_sff_pio_task() 1370 ata_msleep(ap, 2); ata_sff_pio_task() 1371 spin_lock_irq(ap->lock); ata_sff_pio_task() 1373 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); ata_sff_pio_task() 1384 ap->sff_pio_task_link = NULL; ata_sff_pio_task() 1386 poll_next = ata_sff_hsm_move(ap, qc, status, 1); ata_sff_pio_task() 1394 spin_unlock_irq(ap->lock); ata_sff_pio_task() 1412 struct ata_port *ap = qc->ap; ata_sff_qc_issue() local 1418 if (ap->flags & ATA_FLAG_PIO_POLLING) ata_sff_qc_issue() 1422 ata_dev_select(ap, qc->dev->devno, 1, 0); ata_sff_qc_issue() 1430 ata_tf_to_host(ap, &qc->tf); ata_sff_qc_issue() 1431 ap->hsm_task_state = HSM_ST_LAST; ata_sff_qc_issue() 1442 ata_tf_to_host(ap, &qc->tf); ata_sff_qc_issue() 1446 ap->hsm_task_state = HSM_ST_FIRST; ata_sff_qc_issue() 1454 ap->hsm_task_state = HSM_ST; ata_sff_qc_issue() 1472 ata_tf_to_host(ap, &qc->tf); ata_sff_qc_issue() 1474 ap->hsm_task_state = HSM_ST_FIRST; ata_sff_qc_issue() 1506 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); ata_sff_qc_fill_rtf() 1511 static unsigned int ata_sff_idle_irq(struct ata_port *ap) ata_sff_idle_irq() argument 1513 ap->stats.idle_irq++; ata_sff_idle_irq() 1516 if ((ap->stats.idle_irq % 1000) == 0) { ata_sff_idle_irq() 1517 ap->ops->sff_check_status(ap); ata_sff_idle_irq() 1518 if (ap->ops->sff_irq_clear) ata_sff_idle_irq() 1519 ap->ops->sff_irq_clear(ap); ata_sff_idle_irq() 1520 ata_port_warn(ap, "irq trap\n"); ata_sff_idle_irq() 1527 static unsigned int __ata_sff_port_intr(struct ata_port *ap, __ata_sff_port_intr() argument 1534 ap->print_id, qc->tf.protocol, ap->hsm_task_state); __ata_sff_port_intr() 1537 switch (ap->hsm_task_state) { __ata_sff_port_intr() 1548 return ata_sff_idle_irq(ap); __ata_sff_port_intr() 1551 return ata_sff_idle_irq(ap); __ata_sff_port_intr() 1557 status = ata_sff_irq_status(ap); __ata_sff_port_intr() 1562 ap->hsm_task_state = HSM_ST_ERR; __ata_sff_port_intr() 1564 return ata_sff_idle_irq(ap); __ata_sff_port_intr() 1568 if (ap->ops->sff_irq_clear) __ata_sff_port_intr() 1569 ap->ops->sff_irq_clear(ap); __ata_sff_port_intr() 1571 ata_sff_hsm_move(ap, qc, status, 0); __ata_sff_port_intr() 1578 * @ap: Port on which interrupt arrived (possibly...) 1589 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) ata_sff_port_intr() argument 1591 return __ata_sff_port_intr(ap, qc, false); ata_sff_port_intr() 1610 struct ata_port *ap = host->ports[i]; __ata_sff_interrupt() local 1613 qc = ata_qc_from_tag(ap, ap->link.active_tag); __ata_sff_interrupt() 1616 handled |= port_intr(ap, qc); __ata_sff_interrupt() 1632 struct ata_port *ap = host->ports[i]; __ata_sff_interrupt() local 1637 if (!ap->ops->sff_irq_check || __ata_sff_interrupt() 1638 !ap->ops->sff_irq_check(ap)) __ata_sff_interrupt() 1642 ap->ops->sff_check_status(ap); __ata_sff_interrupt() 1643 if (ap->ops->sff_irq_clear) __ata_sff_interrupt() 1644 ap->ops->sff_irq_clear(ap); __ata_sff_interrupt() 1647 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) __ata_sff_interrupt() 1689 * @ap: port that appears to have timed out 1700 void ata_sff_lost_interrupt(struct ata_port *ap) ata_sff_lost_interrupt() argument 1706 qc = ata_qc_from_tag(ap, ap->link.active_tag); ata_sff_lost_interrupt() 1712 status = ata_sff_altstatus(ap); ata_sff_lost_interrupt() 1718 ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", ata_sff_lost_interrupt() 1722 ata_sff_port_intr(ap, qc); ata_sff_lost_interrupt() 1728 * @ap: port to freeze 1735 void ata_sff_freeze(struct ata_port *ap) ata_sff_freeze() argument 1737 ap->ctl |= ATA_NIEN; ata_sff_freeze() 1738 ap->last_ctl = ap->ctl; ata_sff_freeze() 1740 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) ata_sff_freeze() 1741 ata_sff_set_devctl(ap, ap->ctl); ata_sff_freeze() 1747 ap->ops->sff_check_status(ap); ata_sff_freeze() 1749 if (ap->ops->sff_irq_clear) ata_sff_freeze() 1750 ap->ops->sff_irq_clear(ap); ata_sff_freeze() 1756 * @ap: port to thaw 1763 void ata_sff_thaw(struct ata_port *ap) ata_sff_thaw() argument 1766 ap->ops->sff_check_status(ap); ata_sff_thaw() 1767 if (ap->ops->sff_irq_clear) ata_sff_thaw() 1768 ap->ops->sff_irq_clear(ap); ata_sff_thaw() 1769 ata_sff_irq_on(ap); ata_sff_thaw() 1818 * @ap: ATA channel to examine 1833 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) ata_devchk() argument 1835 struct ata_ioports *ioaddr = &ap->ioaddr; ata_devchk() 1838 ap->ops->sff_dev_select(ap, device); ata_devchk() 1882 struct ata_port *ap = dev->link->ap; ata_sff_dev_classify() local 1887 ap->ops->sff_dev_select(ap, dev->devno); ata_sff_dev_classify() 1891 ap->ops->sff_tf_read(ap, &tf); ata_sff_dev_classify() 1922 (ap->ops->sff_check_status(ap) == 0)) ata_sff_dev_classify() 1949 struct ata_port *ap = link->ap; ata_sff_wait_after_reset() local 1950 struct ata_ioports *ioaddr = &ap->ioaddr; ata_sff_wait_after_reset() 1955 ata_msleep(ap, ATA_WAIT_AFTER_RESET); ata_sff_wait_after_reset() 1971 ap->ops->sff_dev_select(ap, 1); ata_sff_wait_after_reset() 1984 ata_msleep(ap, 50); /* give drive a breather */ ata_sff_wait_after_reset() 1996 ap->ops->sff_dev_select(ap, 0); ata_sff_wait_after_reset() 1998 ap->ops->sff_dev_select(ap, 1); ata_sff_wait_after_reset() 2000 ap->ops->sff_dev_select(ap, 0); ata_sff_wait_after_reset() 2006 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, ata_bus_softreset() argument 2009 struct ata_ioports *ioaddr = &ap->ioaddr; ata_bus_softreset() 2011 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); ata_bus_softreset() 2013 if (ap->ioaddr.ctl_addr) { ata_bus_softreset() 2015 iowrite8(ap->ctl, ioaddr->ctl_addr); ata_bus_softreset() 2017 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); ata_bus_softreset() 2019 iowrite8(ap->ctl, ioaddr->ctl_addr); ata_bus_softreset() 2020 ap->last_ctl = ap->ctl; ata_bus_softreset() 2024 return ata_sff_wait_after_reset(&ap->link, devmask, deadline); ata_bus_softreset() 2044 struct ata_port *ap = link->ap; ata_sff_softreset() local 2045 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; ata_sff_softreset() 2053 if (ata_devchk(ap, 0)) ata_sff_softreset() 2055 if (slave_possible && ata_devchk(ap, 1)) ata_sff_softreset() 2059 ap->ops->sff_dev_select(ap, 0); ata_sff_softreset() 2063 rc = ata_bus_softreset(ap, devmask, deadline); ata_sff_softreset() 2129 struct ata_port *ap = link->ap; ata_sff_postreset() local 2135 ap->ops->sff_dev_select(ap, 1); ata_sff_postreset() 2137 ap->ops->sff_dev_select(ap, 0); ata_sff_postreset() 2146 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { ata_sff_postreset() 2147 ata_sff_set_devctl(ap, ap->ctl); ata_sff_postreset() 2148 ap->last_ctl = ap->ctl; ata_sff_postreset() 2166 struct ata_port *ap; ata_sff_drain_fifo() local 2172 ap = qc->ap; ata_sff_drain_fifo() 2174 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) ata_sff_drain_fifo() 2176 ioread16(ap->ioaddr.data_addr); ata_sff_drain_fifo() 2180 ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); ata_sff_drain_fifo() 2187 * @ap: port to handle error for 2197 void ata_sff_error_handler(struct ata_port *ap) ata_sff_error_handler() argument 2199 ata_reset_fn_t softreset = ap->ops->softreset; ata_sff_error_handler() 2200 ata_reset_fn_t hardreset = ap->ops->hardreset; ata_sff_error_handler() 2204 qc = __ata_qc_from_tag(ap, ap->link.active_tag); ata_sff_error_handler() 2208 spin_lock_irqsave(ap->lock, flags); ata_sff_error_handler() 2217 if (ap->ops->sff_drain_fifo) ata_sff_error_handler() 2218 ap->ops->sff_drain_fifo(qc); ata_sff_error_handler() 2220 spin_unlock_irqrestore(ap->lock, flags); ata_sff_error_handler() 2224 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) ata_sff_error_handler() 2227 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, ata_sff_error_handler() 2228 ap->ops->postreset); ata_sff_error_handler() 2302 struct ata_port *ap = host->ports[i]; ata_pci_sff_init_host() local 2306 if (ata_port_is_dummy(ap)) ata_pci_sff_init_host() 2314 ap->ops = &ata_dummy_port_ops; ata_pci_sff_init_host() 2326 ap->ops = &ata_dummy_port_ops; ata_pci_sff_init_host() 2331 ap->ioaddr.cmd_addr = iomap[base]; ata_pci_sff_init_host() 2332 ap->ioaddr.altstatus_addr = ata_pci_sff_init_host() 2333 ap->ioaddr.ctl_addr = (void __iomem *) ata_pci_sff_init_host() 2335 ata_sff_std_ports(&ap->ioaddr); ata_pci_sff_init_host() 2337 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", ata_pci_sff_init_host() 2634 struct ata_port *ap = qc->ap; ata_bmdma_fill_sg() local 2635 struct ata_bmdma_prd *prd = ap->bmdma_prd; ata_bmdma_fill_sg() 2685 struct ata_port *ap = qc->ap; ata_bmdma_fill_sg_dumb() local 2686 struct ata_bmdma_prd *prd = ap->bmdma_prd; ata_bmdma_fill_sg_dumb() 2782 struct ata_port *ap = qc->ap; ata_bmdma_qc_issue() local 2790 ata_dev_select(ap, qc->dev->devno, 1, 0); ata_bmdma_qc_issue() 2797 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ata_bmdma_qc_issue() 2798 ap->ops->bmdma_setup(qc); /* set up bmdma */ ata_bmdma_qc_issue() 2799 ap->ops->bmdma_start(qc); /* initiate bmdma */ ata_bmdma_qc_issue() 2800 ap->hsm_task_state = HSM_ST_LAST; ata_bmdma_qc_issue() 2806 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ata_bmdma_qc_issue() 2807 ap->ops->bmdma_setup(qc); /* set up bmdma */ ata_bmdma_qc_issue() 2808 ap->hsm_task_state = HSM_ST_FIRST; ata_bmdma_qc_issue() 2826 * @ap: Port on which interrupt arrived (possibly...) 2837 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) ata_bmdma_port_intr() argument 2839 struct ata_eh_info *ehi = &ap->link.eh_info; ata_bmdma_port_intr() 2844 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { ata_bmdma_port_intr() 2846 host_stat = ap->ops->bmdma_status(ap); ata_bmdma_port_intr() 2847 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); ata_bmdma_port_intr() 2851 return ata_sff_idle_irq(ap); ata_bmdma_port_intr() 2854 ap->ops->bmdma_stop(qc); ata_bmdma_port_intr() 2860 ap->hsm_task_state = HSM_ST_ERR; ata_bmdma_port_intr() 2864 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); ata_bmdma_port_intr() 2895 * @ap: port to handle error for 2905 void ata_bmdma_error_handler(struct ata_port *ap) ata_bmdma_error_handler() argument 2911 qc = __ata_qc_from_tag(ap, ap->link.active_tag); ata_bmdma_error_handler() 2916 spin_lock_irqsave(ap->lock, flags); ata_bmdma_error_handler() 2921 host_stat = ap->ops->bmdma_status(ap); ata_bmdma_error_handler() 2933 ap->ops->bmdma_stop(qc); ata_bmdma_error_handler() 2937 ap->ops->sff_check_status(ap); ata_bmdma_error_handler() 2938 if (ap->ops->sff_irq_clear) ata_bmdma_error_handler() 2939 ap->ops->sff_irq_clear(ap); ata_bmdma_error_handler() 2943 spin_unlock_irqrestore(ap->lock, flags); ata_bmdma_error_handler() 2946 ata_eh_thaw_port(ap); ata_bmdma_error_handler() 2948 ata_sff_error_handler(ap); ata_bmdma_error_handler() 2961 struct ata_port *ap = qc->ap; ata_bmdma_post_internal_cmd() local 2965 spin_lock_irqsave(ap->lock, flags); ata_bmdma_post_internal_cmd() 2966 ap->ops->bmdma_stop(qc); ata_bmdma_post_internal_cmd() 2967 spin_unlock_irqrestore(ap->lock, flags); ata_bmdma_post_internal_cmd() 2974 * @ap: Port associated with this ATA transaction. 2983 void ata_bmdma_irq_clear(struct ata_port *ap) ata_bmdma_irq_clear() argument 2985 void __iomem *mmio = ap->ioaddr.bmdma_addr; ata_bmdma_irq_clear() 3003 struct ata_port *ap = qc->ap; ata_bmdma_setup() local 3009 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); ata_bmdma_setup() 3012 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ata_bmdma_setup() 3016 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ata_bmdma_setup() 3019 ap->ops->sff_exec_command(ap, &qc->tf); ata_bmdma_setup() 3032 struct ata_port *ap = qc->ap; ata_bmdma_start() local 3036 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ata_bmdma_start() 3037 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ata_bmdma_start() 3069 struct ata_port *ap = qc->ap; ata_bmdma_stop() local 3070 void __iomem *mmio = ap->ioaddr.bmdma_addr; ata_bmdma_stop() 3077 ata_sff_dma_pause(ap); ata_bmdma_stop() 3083 * @ap: Port associated with this ATA transaction. 3092 u8 ata_bmdma_status(struct ata_port *ap) ata_bmdma_status() argument 3094 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); ata_bmdma_status() 3101 * @ap: Port to initialize 3111 int ata_bmdma_port_start(struct ata_port *ap) ata_bmdma_port_start() argument 3113 if (ap->mwdma_mask || ap->udma_mask) { ata_bmdma_port_start() 3114 ap->bmdma_prd = ata_bmdma_port_start() 3115 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, ata_bmdma_port_start() 3116 &ap->bmdma_prd_dma, GFP_KERNEL); ata_bmdma_port_start() 3117 if (!ap->bmdma_prd) ata_bmdma_port_start() 3127 * @ap: Port to initialize 3139 int ata_bmdma_port_start32(struct ata_port *ap) ata_bmdma_port_start32() argument 3141 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; ata_bmdma_port_start32() 3142 return ata_bmdma_port_start(ap); ata_bmdma_port_start32() 3232 struct ata_port *ap = host->ports[i]; ata_pci_bmdma_init() local 3235 if (ata_port_is_dummy(ap)) ata_pci_bmdma_init() 3238 ap->ioaddr.bmdma_addr = bmdma; ata_pci_bmdma_init() 3239 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && ata_pci_bmdma_init() 3243 ata_port_desc(ap, "bmdma 0x%llx", ata_pci_bmdma_init() 3310 * @ap: Port to initialize 3318 void ata_sff_port_init(struct ata_port *ap) ata_sff_port_init() argument 3320 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); ata_sff_port_init() 3321 ap->ctl = ATA_DEVCTL_OBS; ata_sff_port_init() 3322 ap->last_ctl = 0xFF; ata_sff_port_init()
|
H A D | sata_dwc_460ex.c | 173 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\ 174 (ap)->host->private_data) 175 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\ 176 (ap)->private_data) 178 (qc)->ap->host->private_data) 212 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 214 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 215 static void sata_dwc_port_stop(struct ata_port *ap); 254 static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) sata_dwc_tf_dump() argument 256 dev_vdbg(ap->dev, sata_dwc_tf_dump() 260 dev_vdbg(ap->dev, sata_dwc_tf_dump() 263 dev_vdbg(ap->dev, sata_dwc_tf_dump() 274 struct ata_port *ap; dma_dwc_xfer_done() local 280 ap = host->ports[port]; dma_dwc_xfer_done() 281 hsdevp = HSDEVP_FROM_AP(ap); dma_dwc_xfer_done() 282 tag = ap->link.active_tag; dma_dwc_xfer_done() 293 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", dma_dwc_xfer_done() 298 sata_dwc_dma_xfer_complete(ap, 1); dma_dwc_xfer_done() 305 struct ata_port *ap = qc->ap; dma_dwc_xfer_setup() local 306 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); dma_dwc_xfer_setup() 307 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); dma_dwc_xfer_setup() 348 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", sata_dwc_scr_read() 353 *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4)); sata_dwc_scr_read() 354 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", sata_dwc_scr_read() 355 __func__, link->ap->print_id, scr, *val); sata_dwc_scr_read() 362 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", sata_dwc_scr_write() 363 __func__, link->ap->print_id, scr, val); sata_dwc_scr_write() 365 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", sata_dwc_scr_write() 369 out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val); sata_dwc_scr_write() 403 static void sata_dwc_error_intr(struct ata_port *ap, sata_dwc_error_intr() argument 406 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_error_intr() 407 struct ata_eh_info *ehi = &ap->link.eh_info; sata_dwc_error_intr() 416 status = ap->ops->sff_check_status(ap); sata_dwc_error_intr() 418 tag = ap->link.active_tag; sata_dwc_error_intr() 420 dev_err(ap->dev, sata_dwc_error_intr() 438 qc = ata_qc_from_tag(ap, tag); sata_dwc_error_intr() 444 ata_port_abort(ap); sata_dwc_error_intr() 458 struct ata_port *ap; sata_dwc_isr() local 472 ap = host->ports[port]; sata_dwc_isr() 473 hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_isr() 475 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, sata_dwc_isr() 476 ap->link.active_tag); sata_dwc_isr() 480 sata_dwc_error_intr(ap, hsdev, intpr); sata_dwc_isr() 490 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); sata_dwc_isr() 492 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); sata_dwc_isr() 496 qc = ata_qc_from_tag(ap, tag); sata_dwc_isr() 502 qc->ap->link.active_tag = tag; sata_dwc_isr() 513 if (ap->link.active_tag == ATA_TAG_POISON) sata_dwc_isr() 516 tag = ap->link.active_tag; sata_dwc_isr() 517 qc = ata_qc_from_tag(ap, tag); sata_dwc_isr() 521 dev_err(ap->dev, sata_dwc_isr() 524 ap->ops->sff_check_status(ap); sata_dwc_isr() 528 status = ap->ops->sff_check_status(ap); sata_dwc_isr() 530 qc->ap->link.active_tag = tag; sata_dwc_isr() 534 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); sata_dwc_isr() 535 sata_dwc_qc_complete(ap, qc, 1); sata_dwc_isr() 540 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", sata_dwc_isr() 553 dev_err(ap->dev, sata_dwc_isr() 560 sata_dwc_dma_xfer_complete(ap, 1); sata_dwc_isr() 562 ata_sff_hsm_move(ap, qc, status, 0); sata_dwc_isr() 566 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) sata_dwc_isr() 587 dev_dbg(ap->dev, sata_dwc_isr() 595 dev_warn(ap->dev, sata_dwc_isr() 601 status = ap->ops->sff_check_status(ap); sata_dwc_isr() 602 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); sata_dwc_isr() 614 qc = ata_qc_from_tag(ap, tag); sata_dwc_isr() 617 qc->ap->link.active_tag = tag; sata_dwc_isr() 622 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, sata_dwc_isr() 624 sata_dwc_qc_complete(ap, qc, 1); sata_dwc_isr() 630 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, sata_dwc_isr() 636 dev_warn(ap->dev, "%s: DMA not pending?\n", sata_dwc_isr() 639 sata_dwc_dma_xfer_complete(ap, 1); sata_dwc_isr() 641 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) sata_dwc_isr() 647 ap->stats.idle_irq++; sata_dwc_isr() 648 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", sata_dwc_isr() 649 ap->print_id); sata_dwc_isr() 661 dev_dbg(ap->dev, sata_dwc_isr() 698 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) sata_dwc_dma_xfer_complete() argument 701 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_dma_xfer_complete() 702 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); sata_dwc_dma_xfer_complete() 705 tag = ap->link.active_tag; sata_dwc_dma_xfer_complete() 706 qc = ata_qc_from_tag(ap, tag); sata_dwc_dma_xfer_complete() 708 dev_err(ap->dev, "failed to get qc"); sata_dwc_dma_xfer_complete() 714 dev_info(ap->dev, sata_dwc_dma_xfer_complete() 725 dev_err(ap->dev, sata_dwc_dma_xfer_complete() 732 sata_dwc_qc_complete(ap, qc, check_status); sata_dwc_dma_xfer_complete() 733 ap->link.active_tag = ATA_TAG_POISON; sata_dwc_dma_xfer_complete() 735 sata_dwc_qc_complete(ap, qc, check_status); sata_dwc_dma_xfer_complete() 739 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, sata_dwc_qc_complete() argument 745 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_qc_complete() 747 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); sata_dwc_qc_complete() 750 dev_err(ap->dev, "TX DMA PENDING\n"); sata_dwc_qc_complete() 752 dev_err(ap->dev, "RX DMA PENDING\n"); sata_dwc_qc_complete() 753 dev_dbg(ap->dev, sata_dwc_qc_complete() 755 qc->tf.command, status, ap->print_id, qc->tf.protocol); sata_dwc_qc_complete() 826 static int sata_dwc_port_start(struct ata_port *ap) sata_dwc_port_start() argument 835 hsdev = HSDEV_FROM_AP(ap); sata_dwc_port_start() 837 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); sata_dwc_port_start() 839 hsdev->host = ap->host; sata_dwc_port_start() 840 pdev = ap->host->dev; sata_dwc_port_start() 842 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); sata_dwc_port_start() 850 dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); sata_dwc_port_start() 874 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ sata_dwc_port_start() 875 ap->bmdma_prd_dma = 0; sata_dwc_port_start() 877 if (ap->port_no == 0) { sata_dwc_port_start() 878 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", sata_dwc_port_start() 883 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", sata_dwc_port_start() 892 ap->private_data = hsdevp; sata_dwc_port_start() 893 dev_dbg(ap->dev, "%s: done\n", __func__); sata_dwc_port_start() 899 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); sata_dwc_port_start() 903 static void sata_dwc_port_stop(struct ata_port *ap) sata_dwc_port_stop() argument 905 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_port_stop() 907 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); sata_dwc_port_stop() 913 ap->private_data = NULL; sata_dwc_port_stop() 918 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 923 static void sata_dwc_exec_command_by_tag(struct ata_port *ap, sata_dwc_exec_command_by_tag() argument 928 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_exec_command_by_tag() 930 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, sata_dwc_exec_command_by_tag() 933 spin_lock_irqsave(&ap->host->lock, flags); sata_dwc_exec_command_by_tag() 935 spin_unlock_irqrestore(&ap->host->lock, flags); sata_dwc_exec_command_by_tag() 943 ata_sff_exec_command(ap, tf); sata_dwc_exec_command_by_tag() 948 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, sata_dwc_bmdma_setup_by_tag() 957 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", sata_dwc_bmdma_setup() 958 __func__, qc->ap->link.sactive, tag); sata_dwc_bmdma_setup() 970 struct ata_port *ap = qc->ap; sata_dwc_bmdma_start_by_tag() local 971 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_bmdma_start_by_tag() 982 dev_err(ap->dev, sata_dwc_bmdma_start_by_tag() 988 dev_dbg(ap->dev, sata_dwc_bmdma_start_by_tag() 992 sata_dwc_tf_dump(ap, &qc->tf); sata_dwc_bmdma_start_by_tag() 997 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", sata_dwc_bmdma_start_by_tag() 1019 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", sata_dwc_bmdma_start() 1020 __func__, qc->ap->link.sactive, tag); sata_dwc_bmdma_start() 1024 dev_dbg(qc->ap->dev, "%s\n", __func__); sata_dwc_bmdma_start() 1037 struct ata_port *ap = qc->ap; sata_dwc_qc_prep_by_tag() local 1038 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); sata_dwc_qc_prep_by_tag() 1040 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", sata_dwc_qc_prep_by_tag() 1041 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), sata_dwc_qc_prep_by_tag() 1046 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n", sata_dwc_qc_prep_by_tag() 1057 struct ata_port *ap = qc->ap; sata_dwc_qc_issue() local 1060 if (qc->tag > 0 || ap->link.sactive > 1) sata_dwc_qc_issue() 1061 dev_info(ap->dev, sata_dwc_qc_issue() 1062 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", sata_dwc_qc_issue() 1063 __func__, ap->print_id, qc->tf.command, sata_dwc_qc_issue() 1066 ap->link.active_tag, ap->link.sactive); sata_dwc_qc_issue() 1078 dev_dbg(qc->ap->dev, sata_dwc_qc_issue() 1079 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", sata_dwc_qc_issue() 1080 __func__, tag, qc->ap->link.sactive, sactive); sata_dwc_qc_issue() 1082 ap->ops->sff_tf_load(ap, &qc->tf); sata_dwc_qc_issue() 1083 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, sata_dwc_qc_issue() 1105 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", sata_dwc_qc_prep() 1106 __func__, qc->tag, qc->ap->link.active_tag); sata_dwc_qc_prep() 1112 static void sata_dwc_error_handler(struct ata_port *ap) sata_dwc_error_handler() argument 1114 ata_sff_error_handler(ap); sata_dwc_error_handler() 1120 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); sata_dwc_hardreset()
|
H A D | libahci.c | 61 static ssize_t ahci_led_show(struct ata_port *ap, char *buf); 62 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 64 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 72 static int ahci_port_start(struct ata_port *ap); 73 static void ahci_port_stop(struct ata_port *ap); 76 static void ahci_freeze(struct ata_port *ap); 77 static void ahci_thaw(struct ata_port *ap); 78 static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep); 79 static void ahci_enable_fbs(struct ata_port *ap); 80 static void ahci_disable_fbs(struct ata_port *ap); 81 static void ahci_pmp_attach(struct ata_port *ap); 82 static void ahci_pmp_detach(struct ata_port *ap); 93 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 230 struct ata_port *ap = ata_shost_to_port(shost); ahci_show_host_caps() local 231 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_show_host_caps() 240 struct ata_port *ap = ata_shost_to_port(shost); ahci_show_host_cap2() local 241 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_show_host_cap2() 250 struct ata_port *ap = ata_shost_to_port(shost); ahci_show_host_version() local 251 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_show_host_version() 261 struct ata_port *ap = ata_shost_to_port(shost); ahci_show_port_cmd() local 262 void __iomem *port_mmio = ahci_port_base(ap); ahci_show_port_cmd() 271 struct ata_port *ap = ata_shost_to_port(shost); ahci_read_em_buffer() local 272 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_read_em_buffer() 280 spin_lock_irqsave(ap->lock, flags); ahci_read_em_buffer() 283 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT || ahci_read_em_buffer() 285 spin_unlock_irqrestore(ap->lock, flags); ahci_read_em_buffer() 290 spin_unlock_irqrestore(ap->lock, flags); ahci_read_em_buffer() 302 ata_port_warn(ap, ahci_read_em_buffer() 317 spin_unlock_irqrestore(ap->lock, flags); ahci_read_em_buffer() 327 struct ata_port *ap = ata_shost_to_port(shost); ahci_store_em_buffer() local 328 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_store_em_buffer() 337 if (!(ap->flags & ATA_FLAG_EM) || ahci_store_em_buffer() 342 spin_lock_irqsave(ap->lock, flags); ahci_store_em_buffer() 346 spin_unlock_irqrestore(ap->lock, flags); ahci_store_em_buffer() 358 spin_unlock_irqrestore(ap->lock, flags); ahci_store_em_buffer() 367 struct ata_port *ap = ata_shost_to_port(shost); ahci_show_em_supported() local 368 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_show_em_supported() 539 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) ahci_scr_offset() argument 548 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_scr_offset() 558 void __iomem *port_mmio = ahci_port_base(link->ap); ahci_scr_read() 559 int offset = ahci_scr_offset(link->ap, sc_reg); ahci_scr_read() 570 void __iomem *port_mmio = ahci_port_base(link->ap); ahci_scr_write() 571 int offset = ahci_scr_offset(link->ap, sc_reg); ahci_scr_write() 580 void ahci_start_engine(struct ata_port *ap) ahci_start_engine() argument 582 void __iomem *port_mmio = ahci_port_base(ap); ahci_start_engine() 593 int ahci_stop_engine(struct ata_port *ap) ahci_stop_engine() argument 595 void __iomem *port_mmio = ahci_port_base(ap); ahci_stop_engine() 609 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, ahci_stop_engine() 618 void ahci_start_fis_rx(struct ata_port *ap) ahci_start_fis_rx() argument 620 void __iomem *port_mmio = ahci_port_base(ap); ahci_start_fis_rx() 621 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_start_fis_rx() 622 struct ahci_port_priv *pp = ap->private_data; ahci_start_fis_rx() 646 static int ahci_stop_fis_rx(struct ata_port *ap) ahci_stop_fis_rx() argument 648 void __iomem *port_mmio = ahci_port_base(ap); ahci_stop_fis_rx() 657 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON, ahci_stop_fis_rx() 665 static void ahci_power_up(struct ata_port *ap) ahci_power_up() argument 667 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_power_up() 668 void __iomem *port_mmio = ahci_port_base(ap); ahci_power_up() 686 struct ata_port *ap = link->ap; ahci_set_lpm() local 687 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_set_lpm() 688 struct ahci_port_priv *pp = ap->private_data; ahci_set_lpm() 689 void __iomem *port_mmio = ahci_port_base(ap); ahci_set_lpm() 714 ata_msleep(ap, 10); ahci_set_lpm() 730 ahci_set_aggressive_devslp(ap, true); ahci_set_lpm() 732 ahci_set_aggressive_devslp(ap, false); ahci_set_lpm() 747 static void ahci_power_down(struct ata_port *ap) ahci_power_down() argument 749 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_power_down() 750 void __iomem *port_mmio = ahci_port_base(ap); ahci_power_down() 768 static void ahci_start_port(struct ata_port *ap) ahci_start_port() argument 770 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_start_port() 771 struct ahci_port_priv *pp = ap->private_data; ahci_start_port() 778 ahci_start_fis_rx(ap); ahci_start_port() 782 hpriv->start_engine(ap); ahci_start_port() 785 if (ap->flags & ATA_FLAG_EM) { ata_for_each_link() 786 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 791 rc = ap->ops->transmit_led_message(ap, ata_for_each_link() 810 if (ap->flags & ATA_FLAG_SW_ACTIVITY) 811 ata_for_each_link(link, ap, EDGE) 816 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) ahci_deinit_port() argument 821 rc = ahci_stop_engine(ap); ahci_deinit_port() 828 rc = ahci_stop_fis_rx(ap); ahci_deinit_port() 887 struct ata_port *ap = link->ap; ahci_sw_activity() local 888 struct ahci_port_priv *pp = ap->private_data; ahci_sw_activity() 902 struct ata_port *ap = link->ap; ahci_sw_activity_blink() local 903 struct ahci_port_priv *pp = ap->private_data; ahci_sw_activity_blink() 910 led_message |= ap->port_no | (link->pmp << 8); ahci_sw_activity_blink() 916 spin_lock_irqsave(ap->lock, flags); ahci_sw_activity_blink() 939 spin_unlock_irqrestore(ap->lock, flags); ahci_sw_activity_blink() 940 ap->ops->transmit_led_message(ap, led_message, 4); ahci_sw_activity_blink() 945 struct ata_port *ap = link->ap; ahci_init_sw_activity() local 946 struct ahci_port_priv *pp = ap->private_data; ahci_init_sw_activity() 973 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, ahci_transmit_led_message() argument 976 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_transmit_led_message() 977 struct ahci_port_priv *pp = ap->private_data; ahci_transmit_led_message() 992 spin_lock_irqsave(ap->lock, flags); ahci_transmit_led_message() 1000 spin_unlock_irqrestore(ap->lock, flags); ahci_transmit_led_message() 1012 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); ahci_transmit_led_message() 1027 spin_unlock_irqrestore(ap->lock, flags); ahci_transmit_led_message() 1031 static ssize_t ahci_led_show(struct ata_port *ap, char *buf) ahci_led_show() argument 1033 struct ahci_port_priv *pp = ap->private_data; ahci_led_show() 1038 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 1045 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, ahci_led_store() argument 1050 struct ahci_port_priv *pp = ap->private_data; ahci_led_store() 1070 return ap->ops->transmit_led_message(ap, state, size); ahci_led_store() 1076 struct ata_port *ap = link->ap; ahci_activity_store() local 1077 struct ahci_port_priv *pp = ap->private_data; ahci_activity_store() 1088 port_led_state |= (ap->port_no | (link->pmp << 8)); ahci_activity_store() 1089 ap->ops->transmit_led_message(ap, port_led_state, 4); ahci_activity_store() 1095 port_led_state |= (ap->port_no | (link->pmp << 8)); ahci_activity_store() 1097 ap->ops->transmit_led_message(ap, port_led_state, 4); ahci_activity_store() 1107 struct ata_port *ap = link->ap; ahci_activity_show() local 1108 struct ahci_port_priv *pp = ap->private_data; ahci_activity_show() 1117 static void ahci_port_init(struct device *dev, struct ata_port *ap, ahci_port_init() argument 1126 rc = ahci_deinit_port(ap, &emsg); ahci_port_init() 1153 struct ata_port *ap = host->ports[i]; ahci_init_controller() local 1155 port_mmio = ahci_port_base(ap); ahci_init_controller() 1156 if (ata_port_is_dummy(ap)) ahci_init_controller() 1159 ahci_port_init(host->dev, ap, i, mmio, port_mmio); ahci_init_controller() 1172 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; ahci_dev_config() 1181 unsigned int ahci_dev_classify(struct ata_port *ap) ahci_dev_classify() argument 1183 void __iomem *port_mmio = ahci_port_base(ap); ahci_dev_classify() 1211 int ahci_kick_engine(struct ata_port *ap) ahci_kick_engine() argument 1213 void __iomem *port_mmio = ahci_port_base(ap); ahci_kick_engine() 1214 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_kick_engine() 1220 rc = ahci_stop_engine(ap); ahci_kick_engine() 1228 if (!busy && !sata_pmp_attached(ap)) { ahci_kick_engine() 1244 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, ahci_kick_engine() 1251 hpriv->start_engine(ap); ahci_kick_engine() 1256 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, ahci_exec_polled_cmd() argument 1261 struct ahci_port_priv *pp = ap->private_data; ahci_exec_polled_cmd() 1262 void __iomem *port_mmio = ahci_port_base(ap); ahci_exec_polled_cmd() 1274 tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE, ahci_exec_polled_cmd() 1277 ahci_kick_engine(ap); ahci_exec_polled_cmd() 1290 struct ata_port *ap = link->ap; ahci_do_softreset() local 1291 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_do_softreset() 1292 struct ahci_port_priv *pp = ap->private_data; ahci_do_softreset() 1302 rc = ahci_kick_engine(ap); ahci_do_softreset() 1312 ahci_disable_fbs(ap); ahci_do_softreset() 1325 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, ahci_do_softreset() 1333 ata_msleep(ap, 1); ahci_do_softreset() 1337 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); ahci_do_softreset() 1354 *class = ahci_dev_classify(ap); ahci_do_softreset() 1358 ahci_enable_fbs(ap); ahci_do_softreset() 1370 void __iomem *port_mmio = ahci_port_base(link->ap); ahci_check_ready() 1390 void __iomem *port_mmio = ahci_port_base(link->ap); ahci_bad_pmp_check_ready() 1407 struct ata_port *ap = link->ap; ahci_pmp_retry_softreset() local 1408 void __iomem *port_mmio = ahci_port_base(ap); ahci_pmp_retry_softreset() 1441 struct ata_port *ap = link->ap; ahci_hardreset() local 1442 struct ahci_port_priv *pp = ap->private_data; ahci_hardreset() 1443 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_hardreset() 1451 ahci_stop_engine(ap); ahci_hardreset() 1461 hpriv->start_engine(ap); ahci_hardreset() 1464 *class = ahci_dev_classify(ap); ahci_hardreset() 1472 struct ata_port *ap = link->ap; ahci_postreset() local 1473 void __iomem *port_mmio = ahci_port_base(ap); ahci_postreset() 1515 struct ata_port *ap = qc->ap; ahci_pmp_qc_defer() local 1516 struct ahci_port_priv *pp = ap->private_data; ahci_pmp_qc_defer() 1518 if (!sata_pmp_attached(ap) || pp->fbs_enabled) ahci_pmp_qc_defer() 1526 struct ata_port *ap = qc->ap; ahci_qc_prep() local 1527 struct ahci_port_priv *pp = ap->private_data; ahci_qc_prep() 1562 static void ahci_fbs_dec_intr(struct ata_port *ap) ahci_fbs_dec_intr() argument 1564 struct ahci_port_priv *pp = ap->private_data; ahci_fbs_dec_intr() 1565 void __iomem *port_mmio = ahci_port_base(ap); ahci_fbs_dec_intr() 1583 dev_err(ap->host->dev, "failed to clear device error\n"); ahci_fbs_dec_intr() 1586 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) ahci_error_intr() argument 1588 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_error_intr() 1589 struct ahci_port_priv *pp = ap->private_data; ahci_error_intr() 1590 struct ata_eh_info *host_ehi = &ap->link.eh_info; ahci_error_intr() 1599 void __iomem *port_mmio = ahci_port_base(ap); ahci_error_intr() 1603 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) { ahci_error_intr() 1604 link = &ap->pmp_link[pmp]; ahci_error_intr() 1609 ata_for_each_link(link, ap, EDGE) ahci_error_intr() 1614 link = &ap->link; ahci_error_intr() 1616 active_qc = ata_qc_from_tag(ap, link->active_tag); ahci_error_intr() 1624 ahci_scr_read(&ap->link, SCR_ERROR, &serror); ahci_error_intr() 1625 ahci_scr_write(&ap->link, SCR_ERROR, serror); ahci_error_intr() 1656 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { ahci_error_intr() 1689 ata_port_freeze(ap); ahci_error_intr() 1692 ahci_fbs_dec_intr(ap); ahci_error_intr() 1694 ata_port_abort(ap); ahci_error_intr() 1697 static void ahci_handle_port_interrupt(struct ata_port *ap, ahci_handle_port_interrupt() argument 1700 struct ata_eh_info *ehi = &ap->link.eh_info; ahci_handle_port_interrupt() 1701 struct ahci_port_priv *pp = ap->private_data; ahci_handle_port_interrupt() 1702 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_handle_port_interrupt() 1703 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); ahci_handle_port_interrupt() 1711 if (sata_lpm_ignore_phy_events(&ap->link)) { ahci_handle_port_interrupt() 1713 ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); ahci_handle_port_interrupt() 1717 ahci_error_intr(ap, status); ahci_handle_port_interrupt() 1732 sata_async_notification(ap); ahci_handle_port_interrupt() 1748 sata_async_notification(ap); ahci_handle_port_interrupt() 1758 if (ap->qc_active) { ahci_handle_port_interrupt() 1764 if (ap->qc_active && pp->active_link->sactive) ahci_handle_port_interrupt() 1771 rc = ata_qc_complete_multiple(ap, qc_active); ahci_handle_port_interrupt() 1777 ata_port_freeze(ap); ahci_handle_port_interrupt() 1781 static void ahci_port_intr(struct ata_port *ap) ahci_port_intr() argument 1783 void __iomem *port_mmio = ahci_port_base(ap); ahci_port_intr() 1789 ahci_handle_port_interrupt(ap, port_mmio, status); ahci_port_intr() 1794 struct ata_port *ap = dev_instance; ahci_port_thread_fn() local 1795 struct ahci_port_priv *pp = ap->private_data; ahci_port_thread_fn() 1796 void __iomem *port_mmio = ahci_port_base(ap); ahci_port_thread_fn() 1803 spin_lock_bh(ap->lock); ahci_port_thread_fn() 1804 ahci_handle_port_interrupt(ap, port_mmio, status); ahci_port_thread_fn() 1805 spin_unlock_bh(ap->lock); ahci_port_thread_fn() 1812 struct ata_port *ap = dev_instance; ahci_multi_irqs_intr() local 1813 void __iomem *port_mmio = ahci_port_base(ap); ahci_multi_irqs_intr() 1814 struct ahci_port_priv *pp = ap->private_data; ahci_multi_irqs_intr() 1852 struct ata_port *ap; ahci_single_irq_intr() local 1857 ap = host->ports[i]; ahci_single_irq_intr() 1858 if (ap) { ahci_single_irq_intr() 1859 ahci_port_intr(ap); ahci_single_irq_intr() 1891 struct ata_port *ap = qc->ap; ahci_qc_issue() local 1892 void __iomem *port_mmio = ahci_port_base(ap); ahci_qc_issue() 1893 struct ahci_port_priv *pp = ap->private_data; ahci_qc_issue() 1922 struct ahci_port_priv *pp = qc->ap->private_data; ahci_qc_fill_rtf() 1944 static void ahci_freeze(struct ata_port *ap) ahci_freeze() argument 1946 void __iomem *port_mmio = ahci_port_base(ap); ahci_freeze() 1952 static void ahci_thaw(struct ata_port *ap) ahci_thaw() argument 1954 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_thaw() 1956 void __iomem *port_mmio = ahci_port_base(ap); ahci_thaw() 1958 struct ahci_port_priv *pp = ap->private_data; ahci_thaw() 1963 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); ahci_thaw() 1969 void ahci_error_handler(struct ata_port *ap) ahci_error_handler() argument 1971 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_error_handler() 1973 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { ahci_error_handler() 1975 ahci_stop_engine(ap); ahci_error_handler() 1976 hpriv->start_engine(ap); ahci_error_handler() 1979 sata_pmp_error_handler(ap); ahci_error_handler() 1981 if (!ata_dev_enabled(ap->link.device)) ahci_error_handler() 1982 ahci_stop_engine(ap); ahci_error_handler() 1988 struct ata_port *ap = qc->ap; ahci_post_internal_cmd() local 1992 ahci_kick_engine(ap); ahci_post_internal_cmd() 1995 static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) ahci_set_aggressive_devslp() argument 1997 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_set_aggressive_devslp() 1998 void __iomem *port_mmio = ahci_port_base(ap); ahci_set_aggressive_devslp() 1999 struct ata_device *dev = ap->link.device; ahci_set_aggressive_devslp() 2006 dev_info(ap->host->dev, "port does not support device sleep\n"); ahci_set_aggressive_devslp() 2029 rc = ahci_stop_engine(ap); ahci_set_aggressive_devslp() 2061 hpriv->start_engine(ap); ahci_set_aggressive_devslp() 2071 static void ahci_enable_fbs(struct ata_port *ap) ahci_enable_fbs() argument 2073 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_enable_fbs() 2074 struct ahci_port_priv *pp = ap->private_data; ahci_enable_fbs() 2075 void __iomem *port_mmio = ahci_port_base(ap); ahci_enable_fbs() 2089 rc = ahci_stop_engine(ap); ahci_enable_fbs() 2096 dev_info(ap->host->dev, "FBS is enabled\n"); ahci_enable_fbs() 2100 dev_err(ap->host->dev, "Failed to enable FBS\n"); ahci_enable_fbs() 2102 hpriv->start_engine(ap); ahci_enable_fbs() 2105 static void ahci_disable_fbs(struct ata_port *ap) ahci_disable_fbs() argument 2107 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_disable_fbs() 2108 struct ahci_port_priv *pp = ap->private_data; ahci_disable_fbs() 2109 void __iomem *port_mmio = ahci_port_base(ap); ahci_disable_fbs() 2122 rc = ahci_stop_engine(ap); ahci_disable_fbs() 2129 dev_err(ap->host->dev, "Failed to disable FBS\n"); ahci_disable_fbs() 2131 dev_info(ap->host->dev, "FBS is disabled\n"); ahci_disable_fbs() 2135 hpriv->start_engine(ap); ahci_disable_fbs() 2138 static void ahci_pmp_attach(struct ata_port *ap) ahci_pmp_attach() argument 2140 void __iomem *port_mmio = ahci_port_base(ap); ahci_pmp_attach() 2141 struct ahci_port_priv *pp = ap->private_data; ahci_pmp_attach() 2148 ahci_enable_fbs(ap); ahci_pmp_attach() 2160 if (!(ap->pflags & ATA_PFLAG_FROZEN)) ahci_pmp_attach() 2164 static void ahci_pmp_detach(struct ata_port *ap) ahci_pmp_detach() argument 2166 void __iomem *port_mmio = ahci_port_base(ap); ahci_pmp_detach() 2167 struct ahci_port_priv *pp = ap->private_data; ahci_pmp_detach() 2170 ahci_disable_fbs(ap); ahci_pmp_detach() 2179 if (!(ap->pflags & ATA_PFLAG_FROZEN)) ahci_pmp_detach() 2183 int ahci_port_resume(struct ata_port *ap) ahci_port_resume() argument 2185 ahci_power_up(ap); ahci_port_resume() 2186 ahci_start_port(ap); ahci_port_resume() 2188 if (sata_pmp_attached(ap)) ahci_port_resume() 2189 ahci_pmp_attach(ap); ahci_port_resume() 2191 ahci_pmp_detach(ap); ahci_port_resume() 2198 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) ahci_port_suspend() argument 2203 rc = ahci_deinit_port(ap, &emsg); ahci_port_suspend() 2205 ahci_power_down(ap); ahci_port_suspend() 2207 ata_port_err(ap, "%s (%d)\n", emsg, rc); ahci_port_suspend() 2208 ata_port_freeze(ap); ahci_port_suspend() 2215 static int ahci_port_start(struct ata_port *ap) ahci_port_start() argument 2217 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_port_start() 2218 struct device *dev = ap->host->dev; ahci_port_start() 2228 if (ap->host->n_ports > 1) { ahci_port_start() 2235 "%s%d", dev_driver_string(dev), ap->port_no); ahci_port_start() 2239 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { ahci_port_start() 2240 void __iomem *port_mmio = ahci_port_base(ap); ahci_port_start() 2246 ap->port_no); ahci_port_start() 2250 ap->port_no); ahci_port_start() 2303 ap->lock = &pp->lock; ahci_port_start() 2306 ap->private_data = pp; ahci_port_start() 2309 return ahci_port_resume(ap); ahci_port_start() 2312 static void ahci_port_stop(struct ata_port *ap) ahci_port_stop() argument 2318 rc = ahci_deinit_port(ap, &emsg); ahci_port_stop() 2320 ata_port_warn(ap, "%s (%d)\n", emsg, rc); ahci_port_stop()
|
H A D | pata_samsung_cf.c | 105 static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev) pata_s3c_set_piomode() argument 107 struct s3c_ide_info *info = ap->host->private_data; pata_s3c_set_piomode() 177 static void pata_s3c_tf_load(struct ata_port *ap, pata_s3c_tf_load() argument 180 struct ata_ioports *ioaddr = &ap->ioaddr; pata_s3c_tf_load() 183 if (tf->ctl != ap->last_ctl) { pata_s3c_tf_load() 184 ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); pata_s3c_tf_load() 185 ap->last_ctl = tf->ctl; pata_s3c_tf_load() 186 ata_wait_idle(ap); pata_s3c_tf_load() 190 ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr); pata_s3c_tf_load() 191 ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr); pata_s3c_tf_load() 192 ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr); pata_s3c_tf_load() 193 ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr); pata_s3c_tf_load() 194 ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr); pata_s3c_tf_load() 198 ata_outb(ap->host, tf->feature, ioaddr->feature_addr); pata_s3c_tf_load() 199 ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr); pata_s3c_tf_load() 200 ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr); pata_s3c_tf_load() 201 ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr); pata_s3c_tf_load() 202 ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr); pata_s3c_tf_load() 206 ata_outb(ap->host, tf->device, ioaddr->device_addr); pata_s3c_tf_load() 208 ata_wait_idle(ap); pata_s3c_tf_load() 214 static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) pata_s3c_tf_read() argument 216 struct ata_ioports *ioaddr = &ap->ioaddr; pata_s3c_tf_read() 218 tf->feature = ata_inb(ap->host, ioaddr->error_addr); pata_s3c_tf_read() 219 tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); pata_s3c_tf_read() 220 tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); pata_s3c_tf_read() 221 tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); pata_s3c_tf_read() 222 tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr); pata_s3c_tf_read() 223 tf->device = ata_inb(ap->host, ioaddr->device_addr); pata_s3c_tf_read() 226 ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr); pata_s3c_tf_read() 227 tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr); pata_s3c_tf_read() 228 tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr); pata_s3c_tf_read() 229 tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr); pata_s3c_tf_read() 230 tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr); pata_s3c_tf_read() 231 tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr); pata_s3c_tf_read() 232 ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); pata_s3c_tf_read() 233 ap->last_ctl = tf->ctl; pata_s3c_tf_read() 240 static void pata_s3c_exec_command(struct ata_port *ap, pata_s3c_exec_command() argument 243 ata_outb(ap->host, tf->command, ap->ioaddr.command_addr); pata_s3c_exec_command() 244 ata_sff_pause(ap); pata_s3c_exec_command() 250 static u8 pata_s3c_check_status(struct ata_port *ap) pata_s3c_check_status() argument 252 return ata_inb(ap->host, ap->ioaddr.status_addr); pata_s3c_check_status() 258 static u8 pata_s3c_check_altstatus(struct ata_port *ap) pata_s3c_check_altstatus() argument 260 return ata_inb(ap->host, ap->ioaddr.altstatus_addr); pata_s3c_check_altstatus() 269 struct ata_port *ap = dev->link->ap; pata_s3c_data_xfer() local 270 struct s3c_ide_info *info = ap->host->private_data; pata_s3c_data_xfer() 271 void __iomem *data_addr = ap->ioaddr.data_addr; pata_s3c_data_xfer() 291 dev_err(ap->dev, "unexpected trailing data\n"); pata_s3c_data_xfer() 299 static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device) pata_s3c_dev_select() argument 306 ata_outb(ap->host, tmp, ap->ioaddr.device_addr); pata_s3c_dev_select() 307 ata_sff_pause(ap); pata_s3c_dev_select() 313 static unsigned int pata_s3c_devchk(struct ata_port *ap, pata_s3c_devchk() argument 316 struct ata_ioports *ioaddr = &ap->ioaddr; pata_s3c_devchk() 319 pata_s3c_dev_select(ap, device); pata_s3c_devchk() 321 ata_outb(ap->host, 0x55, ioaddr->nsect_addr); pata_s3c_devchk() 322 ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); pata_s3c_devchk() 324 ata_outb(ap->host, 0xaa, ioaddr->nsect_addr); pata_s3c_devchk() 325 ata_outb(ap->host, 0x55, ioaddr->lbal_addr); pata_s3c_devchk() 327 ata_outb(ap->host, 0x55, ioaddr->nsect_addr); pata_s3c_devchk() 328 ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); pata_s3c_devchk() 330 nsect = ata_inb(ap->host, ioaddr->nsect_addr); pata_s3c_devchk() 331 lbal = ata_inb(ap->host, ioaddr->lbal_addr); pata_s3c_devchk() 347 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); pata_s3c_wait_after_reset() 363 static int pata_s3c_bus_softreset(struct ata_port *ap, pata_s3c_bus_softreset() argument 366 struct ata_ioports *ioaddr = &ap->ioaddr; pata_s3c_bus_softreset() 369 ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); pata_s3c_bus_softreset() 371 ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr); pata_s3c_bus_softreset() 373 ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); pata_s3c_bus_softreset() 374 ap->last_ctl = ap->ctl; pata_s3c_bus_softreset() 376 return pata_s3c_wait_after_reset(&ap->link, deadline); pata_s3c_bus_softreset() 385 struct ata_port *ap = link->ap; pata_s3c_softreset() local 391 if (pata_s3c_devchk(ap, 0)) pata_s3c_softreset() 395 pata_s3c_dev_select(ap, 0); pata_s3c_softreset() 398 rc = pata_s3c_bus_softreset(ap, deadline); pata_s3c_softreset() 406 classes[0] = ata_sff_dev_classify(&ap->link.device[0], pata_s3c_softreset() 415 static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl) pata_s3c_set_devctl() argument 417 ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr); pata_s3c_set_devctl() 500 struct ata_port *ap; pata_s3c_probe() local 539 ap = host->ports[0]; pata_s3c_probe() 540 ap->pio_mask = ATA_PIO4; pata_s3c_probe() 543 ap->ops = &pata_s3c_port_ops; pata_s3c_probe() 548 ap->ops = &pata_s5p_port_ops; pata_s3c_probe() 555 ap->flags |= ATA_FLAG_PIO_POLLING; pata_s3c_probe() 557 ata_port_desc(ap, "no IRQ, using PIO polling\n"); pata_s3c_probe() 560 ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD; pata_s3c_probe() 561 ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR; pata_s3c_probe() 562 ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED; pata_s3c_probe() 563 ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED; pata_s3c_probe() 564 ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR; pata_s3c_probe() 565 ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR; pata_s3c_probe() 566 ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR; pata_s3c_probe() 567 ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR; pata_s3c_probe() 568 ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR; pata_s3c_probe() 569 ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD; pata_s3c_probe() 570 ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD; pata_s3c_probe() 571 ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD; pata_s3c_probe() 572 ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD; pata_s3c_probe() 574 ata_port_desc(ap, "mmio cmd 0x%llx ", pata_s3c_probe()
|
H A D | pata_acpi.c | 30 * @ap: Port 38 struct ata_port *ap = link->ap; pacpi_pre_reset() local 39 struct pata_acpi *acpi = ap->private_data; pacpi_pre_reset() 40 if (ACPI_HANDLE(&ap->tdev) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0) pacpi_pre_reset() 48 * @ap: port to detect 53 static int pacpi_cable_detect(struct ata_port *ap) pacpi_cable_detect() argument 55 struct pata_acpi *acpi = ap->private_data; pacpi_cable_detect() 72 static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev) pacpi_discover_modes() argument 74 struct pata_acpi *acpi = ap->private_data; pacpi_discover_modes() 80 ata_acpi_gtm(ap, &probe); pacpi_discover_modes() 85 ap->cbl = ATA_CBL_PATA80; pacpi_discover_modes() 101 struct pata_acpi *acpi = adev->link->ap->private_data; pacpi_mode_filter() 107 * @ap: ATA interface 111 static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev) pacpi_set_piomode() argument 114 struct pata_acpi *acpi = ap->private_data; pacpi_set_piomode() 123 ata_acpi_stm(ap, &acpi->gtm); pacpi_set_piomode() 125 ata_acpi_gtm(ap, &acpi->gtm); pacpi_set_piomode() 130 * @ap: ATA interface 134 static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev) pacpi_set_dmamode() argument 137 struct pata_acpi *acpi = ap->private_data; pacpi_set_dmamode() 152 ata_acpi_stm(ap, &acpi->gtm); pacpi_set_dmamode() 154 ata_acpi_gtm(ap, &acpi->gtm); pacpi_set_dmamode() 168 struct ata_port *ap = qc->ap; pacpi_qc_issue() local 170 struct pata_acpi *acpi = ap->private_data; pacpi_qc_issue() 176 pacpi_set_piomode(ap, adev); pacpi_qc_issue() 178 pacpi_set_dmamode(ap, adev); pacpi_qc_issue() 186 * @ap: ATA port being set up 191 static int pacpi_port_start(struct ata_port *ap) pacpi_port_start() argument 193 struct pci_dev *pdev = to_pci_dev(ap->host->dev); pacpi_port_start() 196 if (ACPI_HANDLE(&ap->tdev) == NULL) pacpi_port_start() 199 acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL); pacpi_port_start() 200 if (ap->private_data == NULL) pacpi_port_start() 202 acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]); pacpi_port_start() 203 acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]); pacpi_port_start() 204 return ata_bmdma_port_start(ap); pacpi_port_start()
|
H A D | sata_nv.c | 310 static void nv_nf2_freeze(struct ata_port *ap); 311 static void nv_nf2_thaw(struct ata_port *ap); 312 static void nv_ck804_freeze(struct ata_port *ap); 313 static void nv_ck804_thaw(struct ata_port *ap); 319 static void nv_adma_irq_clear(struct ata_port *ap); 320 static int nv_adma_port_start(struct ata_port *ap); 321 static void nv_adma_port_stop(struct ata_port *ap); 323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg); 324 static int nv_adma_port_resume(struct ata_port *ap); 326 static void nv_adma_freeze(struct ata_port *ap); 327 static void nv_adma_thaw(struct ata_port *ap); 328 static void nv_adma_error_handler(struct ata_port *ap); 331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 333 static void nv_mcp55_thaw(struct ata_port *ap); 334 static void nv_mcp55_freeze(struct ata_port *ap); 335 static void nv_swncq_error_handler(struct ata_port *ap); 337 static int nv_swncq_port_start(struct ata_port *ap); 341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); 344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg); 345 static int nv_swncq_port_resume(struct ata_port *ap); 605 static void nv_adma_register_mode(struct ata_port *ap) nv_adma_register_mode() argument 607 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_register_mode() 622 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n", nv_adma_register_mode() 636 ata_port_warn(ap, nv_adma_register_mode() 643 static void nv_adma_mode(struct ata_port *ap) nv_adma_mode() argument 645 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_mode() 666 ata_port_warn(ap, nv_adma_mode() 675 struct ata_port *ap = ata_shost_to_port(sdev->host); nv_adma_slave_config() local 676 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_slave_config() 679 struct pci_dev *pdev = to_pci_dev(ap->host->dev); nv_adma_slave_config() 692 spin_lock_irqsave(ap->lock, flags); nv_adma_slave_config() 694 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { nv_adma_slave_config() 710 nv_adma_register_mode(ap); nv_adma_slave_config() 719 if (ap->port_no == 1) nv_adma_slave_config() 737 port0 = ap->host->ports[0]->private_data; nv_adma_slave_config() 738 port1 = ap->host->ports[1]->private_data; nv_adma_slave_config() 739 sdev0 = ap->host->ports[0]->link.device[0].sdev; nv_adma_slave_config() 740 sdev1 = ap->host->ports[1]->link.device[0].sdev; nv_adma_slave_config() 773 ata_port_info(ap, nv_adma_slave_config() 775 (unsigned long long)*ap->host->dev->dma_mask, nv_adma_slave_config() 778 spin_unlock_irqrestore(ap->lock, flags); nv_adma_slave_config() 785 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_check_atapi_dma() 789 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) nv_adma_tf_read() argument 798 nv_adma_register_mode(ap); nv_adma_tf_read() 800 ata_sff_tf_read(ap, tf); nv_adma_tf_read() 835 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) nv_adma_check_cpb() argument 837 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_check_cpb() 846 struct ata_eh_info *ehi = &ap->link.eh_info; nv_adma_check_cpb() 869 ata_port_freeze(ap); nv_adma_check_cpb() 871 ata_port_abort(ap); nv_adma_check_cpb() 880 static int nv_host_intr(struct ata_port *ap, u8 irq_stat) nv_host_intr() argument 882 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); nv_host_intr() 886 ata_port_freeze(ap); nv_host_intr() 896 ata_sff_check_status(ap); nv_host_intr() 901 return ata_bmdma_port_intr(ap, qc); nv_host_intr() 913 struct ata_port *ap = host->ports[i]; nv_adma_interrupt() local 914 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_interrupt() 926 handled += nv_host_intr(ap, irq_stat); nv_adma_interrupt() 934 if (ata_tag_valid(ap->link.active_tag)) nv_adma_interrupt() 940 handled += nv_host_intr(ap, irq_stat); nv_adma_interrupt() 949 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && nv_adma_interrupt() 973 struct ata_eh_info *ehi = &ap->link.eh_info; nv_adma_interrupt() 991 ata_port_freeze(ap); nv_adma_interrupt() 1004 if (ata_tag_valid(ap->link.active_tag)) nv_adma_interrupt() 1006 ap->link.active_tag; nv_adma_interrupt() 1008 check_commands = ap->link.sactive; nv_adma_interrupt() 1014 rc = nv_adma_check_cpb(ap, pos, nv_adma_interrupt() 1022 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); nv_adma_interrupt() 1040 static void nv_adma_freeze(struct ata_port *ap) nv_adma_freeze() argument 1042 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_freeze() 1046 nv_ck804_freeze(ap); nv_adma_freeze() 1052 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), nv_adma_freeze() 1053 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); nv_adma_freeze() 1062 static void nv_adma_thaw(struct ata_port *ap) nv_adma_thaw() argument 1064 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_thaw() 1068 nv_ck804_thaw(ap); nv_adma_thaw() 1080 static void nv_adma_irq_clear(struct ata_port *ap) nv_adma_irq_clear() argument 1082 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_irq_clear() 1087 ata_bmdma_irq_clear(ap); nv_adma_irq_clear() 1092 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), nv_adma_irq_clear() 1093 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); nv_adma_irq_clear() 1100 if (ap->port_no == 0) { nv_adma_irq_clear() 1107 pp = ap->host->ports[0]->private_data; nv_adma_irq_clear() 1109 pp = ap->host->ports[1]->private_data; nv_adma_irq_clear() 1115 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_post_internal_cmd() 1121 static int nv_adma_port_start(struct ata_port *ap) nv_adma_port_start() argument 1123 struct device *dev = ap->host->dev; nv_adma_port_start() 1144 rc = ata_bmdma_port_start(ap); nv_adma_port_start() 1152 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + nv_adma_port_start() 1153 ap->port_no * NV_ADMA_PORT_SIZE; nv_adma_port_start() 1155 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; nv_adma_port_start() 1157 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); nv_adma_port_start() 1194 ap->private_data = pp; nv_adma_port_start() 1220 static void nv_adma_port_stop(struct ata_port *ap) nv_adma_port_stop() argument 1222 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_port_stop() 1230 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg) nv_adma_port_suspend() argument 1232 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_port_suspend() 1236 nv_adma_register_mode(ap); nv_adma_port_suspend() 1247 static int nv_adma_port_resume(struct ata_port *ap) nv_adma_port_resume() argument 1249 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_port_resume() 1282 static void nv_adma_setup_port(struct ata_port *ap) nv_adma_setup_port() argument 1284 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; nv_adma_setup_port() 1285 struct ata_ioports *ioport = &ap->ioaddr; nv_adma_setup_port() 1289 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; nv_adma_setup_port() 1350 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_fill_sg() 1370 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_use_reg_mode() 1387 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_qc_prep() 1395 nv_adma_register_mode(qc->ap); nv_adma_qc_prep() 1433 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_qc_issue() 1453 nv_adma_register_mode(qc->ap); nv_adma_qc_issue() 1456 nv_adma_mode(qc->ap); nv_adma_qc_issue() 1486 struct ata_port *ap = host->ports[i]; nv_generic_interrupt() local 1489 qc = ata_qc_from_tag(ap, ap->link.active_tag); nv_generic_interrupt() 1491 handled += ata_bmdma_port_intr(ap, qc); nv_generic_interrupt() 1497 ap->ops->sff_check_status(ap); nv_generic_interrupt() 1551 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); nv_scr_read() 1560 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); nv_scr_write() 1572 if (!(link->ap->pflags & ATA_PFLAG_LOADING) && nv_hardreset() 1596 static void nv_nf2_freeze(struct ata_port *ap) nv_nf2_freeze() argument 1598 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; nv_nf2_freeze() 1599 int shift = ap->port_no * NV_INT_PORT_SHIFT; nv_nf2_freeze() 1607 static void nv_nf2_thaw(struct ata_port *ap) nv_nf2_thaw() argument 1609 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; nv_nf2_thaw() 1610 int shift = ap->port_no * NV_INT_PORT_SHIFT; nv_nf2_thaw() 1620 static void nv_ck804_freeze(struct ata_port *ap) nv_ck804_freeze() argument 1622 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; nv_ck804_freeze() 1623 int shift = ap->port_no * NV_INT_PORT_SHIFT; nv_ck804_freeze() 1631 static void nv_ck804_thaw(struct ata_port *ap) nv_ck804_thaw() argument 1633 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; nv_ck804_thaw() 1634 int shift = ap->port_no * NV_INT_PORT_SHIFT; nv_ck804_thaw() 1644 static void nv_mcp55_freeze(struct ata_port *ap) nv_mcp55_freeze() argument 1646 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; nv_mcp55_freeze() 1647 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; nv_mcp55_freeze() 1657 static void nv_mcp55_thaw(struct ata_port *ap) nv_mcp55_thaw() argument 1659 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; nv_mcp55_thaw() 1660 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; nv_mcp55_thaw() 1670 static void nv_adma_error_handler(struct ata_port *ap) nv_adma_error_handler() argument 1672 struct nv_adma_port_priv *pp = ap->private_data; nv_adma_error_handler() 1678 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { nv_adma_error_handler() 1686 ata_port_err(ap, nv_adma_error_handler() 1695 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || nv_adma_error_handler() 1696 ap->link.sactive & (1 << i)) nv_adma_error_handler() 1697 ata_port_err(ap, nv_adma_error_handler() 1704 nv_adma_register_mode(ap); nv_adma_error_handler() 1723 ata_bmdma_error_handler(ap); nv_adma_error_handler() 1726 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) nv_swncq_qc_to_dq() argument 1728 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_qc_to_dq() 1737 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) nv_swncq_qc_from_dq() argument 1739 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_qc_from_dq() 1751 return ata_qc_from_tag(ap, tag); nv_swncq_qc_from_dq() 1754 static void nv_swncq_fis_reinit(struct ata_port *ap) nv_swncq_fis_reinit() argument 1756 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_fis_reinit() 1764 static void nv_swncq_pp_reinit(struct ata_port *ap) nv_swncq_pp_reinit() argument 1766 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_pp_reinit() 1774 nv_swncq_fis_reinit(ap); nv_swncq_pp_reinit() 1777 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis) nv_swncq_irq_clear() argument 1779 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_irq_clear() 1784 static void __ata_bmdma_stop(struct ata_port *ap) __ata_bmdma_stop() argument 1788 qc.ap = ap; __ata_bmdma_stop() 1792 static void nv_swncq_ncq_stop(struct ata_port *ap) nv_swncq_ncq_stop() argument 1794 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_ncq_stop() 1799 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", nv_swncq_ncq_stop() 1800 ap->qc_active, ap->link.sactive); nv_swncq_ncq_stop() 1801 ata_port_err(ap, nv_swncq_ncq_stop() 1807 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n", nv_swncq_ncq_stop() 1808 ap->ops->sff_check_status(ap), nv_swncq_ncq_stop() 1809 ioread8(ap->ioaddr.error_addr)); nv_swncq_ncq_stop() 1814 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n"); nv_swncq_ncq_stop() 1824 ata_port_err(ap, nv_swncq_ncq_stop() 1833 nv_swncq_pp_reinit(ap); nv_swncq_ncq_stop() 1834 ap->ops->sff_irq_clear(ap); nv_swncq_ncq_stop() 1835 __ata_bmdma_stop(ap); nv_swncq_ncq_stop() 1836 nv_swncq_irq_clear(ap, 0xffff); nv_swncq_ncq_stop() 1839 static void nv_swncq_error_handler(struct ata_port *ap) nv_swncq_error_handler() argument 1841 struct ata_eh_context *ehc = &ap->link.eh_context; nv_swncq_error_handler() 1843 if (ap->link.sactive) { nv_swncq_error_handler() 1844 nv_swncq_ncq_stop(ap); nv_swncq_error_handler() 1848 ata_bmdma_error_handler(ap); nv_swncq_error_handler() 1852 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg) nv_swncq_port_suspend() argument 1854 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; nv_swncq_port_suspend() 1871 static int nv_swncq_port_resume(struct ata_port *ap) nv_swncq_port_resume() argument 1873 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; nv_swncq_port_resume() 1918 struct ata_port *ap = ata_shost_to_port(sdev->host); nv_swncq_slave_config() local 1919 struct pci_dev *pdev = to_pci_dev(ap->host->dev); nv_swncq_slave_config() 1931 dev = &ap->link.device[sdev->id]; nv_swncq_slave_config() 1932 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) nv_swncq_slave_config() 1962 static int nv_swncq_port_start(struct ata_port *ap) nv_swncq_port_start() argument 1964 struct device *dev = ap->host->dev; nv_swncq_port_start() 1965 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; nv_swncq_port_start() 1970 rc = ata_bmdma_port_start(ap); nv_swncq_port_start() 1984 ap->private_data = pp; nv_swncq_port_start() 1985 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; nv_swncq_port_start() 1986 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; nv_swncq_port_start() 1987 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; nv_swncq_port_start() 2007 struct ata_port *ap = qc->ap; nv_swncq_fill_sg() local 2009 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_fill_sg() 2041 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, nv_swncq_issue_atacmd() argument 2044 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_issue_atacmd() 2057 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ nv_swncq_issue_atacmd() 2058 ap->ops->sff_exec_command(ap, &qc->tf); nv_swncq_issue_atacmd() 2067 struct ata_port *ap = qc->ap; nv_swncq_qc_issue() local 2068 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_qc_issue() 2076 nv_swncq_issue_atacmd(ap, qc); nv_swncq_qc_issue() 2078 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ nv_swncq_qc_issue() 2083 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis) nv_swncq_hotplug() argument 2086 struct ata_eh_info *ehi = &ap->link.eh_info; nv_swncq_hotplug() 2091 sata_scr_read(&ap->link, SCR_ERROR, &serror); nv_swncq_hotplug() 2092 sata_scr_write(&ap->link, SCR_ERROR, serror); nv_swncq_hotplug() 2105 ata_port_freeze(ap); nv_swncq_hotplug() 2108 static int nv_swncq_sdbfis(struct ata_port *ap) nv_swncq_sdbfis() argument 2111 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_sdbfis() 2112 struct ata_eh_info *ehi = &ap->link.eh_info; nv_swncq_sdbfis() 2118 host_stat = ap->ops->bmdma_status(ap); nv_swncq_sdbfis() 2128 ap->ops->sff_irq_clear(ap); nv_swncq_sdbfis() 2129 __ata_bmdma_stop(ap); nv_swncq_sdbfis() 2138 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); nv_swncq_sdbfis() 2140 if (!ap->qc_active) { nv_swncq_sdbfis() 2142 nv_swncq_pp_reinit(ap); nv_swncq_sdbfis() 2159 ap->print_id, ap->qc_active, pp->qc_active, nv_swncq_sdbfis() 2163 nv_swncq_fis_reinit(ap); nv_swncq_sdbfis() 2166 qc = ata_qc_from_tag(ap, pp->last_issue_tag); nv_swncq_sdbfis() 2167 nv_swncq_issue_atacmd(ap, qc); nv_swncq_sdbfis() 2173 qc = nv_swncq_qc_from_dq(ap); nv_swncq_sdbfis() 2175 nv_swncq_issue_atacmd(ap, qc); nv_swncq_sdbfis() 2181 static inline u32 nv_swncq_tag(struct ata_port *ap) nv_swncq_tag() argument 2183 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_tag() 2190 static void nv_swncq_dmafis(struct ata_port *ap) nv_swncq_dmafis() argument 2196 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_dmafis() 2198 __ata_bmdma_stop(ap); nv_swncq_dmafis() 2199 tag = nv_swncq_tag(ap); nv_swncq_dmafis() 2202 qc = ata_qc_from_tag(ap, tag); nv_swncq_dmafis() 2211 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); nv_swncq_dmafis() 2214 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); nv_swncq_dmafis() 2219 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); nv_swncq_dmafis() 2222 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) nv_swncq_host_interrupt() argument 2224 struct nv_swncq_port_priv *pp = ap->private_data; nv_swncq_host_interrupt() 2226 struct ata_eh_info *ehi = &ap->link.eh_info; nv_swncq_host_interrupt() 2230 ata_stat = ap->ops->sff_check_status(ap); nv_swncq_host_interrupt() 2231 nv_swncq_irq_clear(ap, fis); nv_swncq_host_interrupt() 2235 if (ap->pflags & ATA_PFLAG_FROZEN) nv_swncq_host_interrupt() 2239 nv_swncq_hotplug(ap, fis); nv_swncq_host_interrupt() 2246 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) nv_swncq_host_interrupt() 2248 ap->ops->scr_write(&ap->link, SCR_ERROR, serror); nv_swncq_host_interrupt() 2256 ata_port_freeze(ap); nv_swncq_host_interrupt() 2271 ap->print_id, pp->qc_active, pp->dhfis_bits, nv_swncq_host_interrupt() 2273 if (nv_swncq_sdbfis(ap) < 0) nv_swncq_host_interrupt() 2292 ata_stat = ap->ops->sff_check_status(ap); nv_swncq_host_interrupt() 2298 qc = nv_swncq_qc_from_dq(ap); nv_swncq_host_interrupt() 2299 nv_swncq_issue_atacmd(ap, qc); nv_swncq_host_interrupt() 2308 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); nv_swncq_host_interrupt() 2310 nv_swncq_dmafis(ap); nv_swncq_host_interrupt() 2317 ata_port_freeze(ap); nv_swncq_host_interrupt() 2334 struct ata_port *ap = host->ports[i]; nv_swncq_interrupt() local 2336 if (ap->link.sactive) { nv_swncq_interrupt() 2337 nv_swncq_host_interrupt(ap, (u16)irq_stat); nv_swncq_interrupt() 2341 nv_swncq_irq_clear(ap, 0xfff0); nv_swncq_interrupt() 2343 handled += nv_host_intr(ap, (u8)irq_stat); nv_swncq_interrupt()
|
H A D | pata_atp867x.c | 44 * Note that all runtime hot priv ports are cached in ap private_data 83 #define ATP867X_IOBASE(ap) ((ap)->host->iomap[0]) 84 #define ATP867X_SYS_INFO(ap) (0x3F + ATP867X_IOBASE(ap)) 86 #define ATP867X_IO_PORTBASE(ap, port) (0x00 + ATP867X_IOBASE(ap) + \ 88 #define ATP867X_IO_DMABASE(ap, port) (0x40 + \ 89 ATP867X_IO_PORTBASE((ap), (port))) 91 #define ATP867X_IO_STATUS(ap, port) (0x07 + \ 92 ATP867X_IO_PORTBASE((ap), (port))) 93 #define ATP867X_IO_ALTSTATUS(ap, port) (0x0E + \ 94 ATP867X_IO_PORTBASE((ap), (port))) 99 #define ATP867X_IO_MSTRPIOSPD(ap, port) (0x08 + \ 100 ATP867X_IO_DMABASE((ap), (port))) 101 #define ATP867X_IO_SLAVPIOSPD(ap, port) (0x09 + \ 102 ATP867X_IO_DMABASE((ap), (port))) 103 #define ATP867X_IO_8BPIOSPD(ap, port) (0x0A + \ 104 ATP867X_IO_DMABASE((ap), (port))) 105 #define ATP867X_IO_DMAMODE(ap, port) (0x0B + \ 106 ATP867X_IO_DMABASE((ap), (port))) 108 #define ATP867X_IO_PORTSPD(ap, port) (0x4A + \ 109 ATP867X_IO_PORTBASE((ap), (port))) 110 #define ATP867X_IO_PREREAD(ap, port) (0x4C + \ 111 ATP867X_IO_PORTBASE((ap), (port))) 121 static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev) atp867x_set_dmamode() argument 123 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atp867x_set_dmamode() 124 struct atp867x_priv *dp = ap->private_data; atp867x_set_dmamode() 152 static int atp867x_get_active_clocks_shifted(struct ata_port *ap, atp867x_get_active_clocks_shifted() argument 155 struct atp867x_priv *dp = ap->private_data; atp867x_get_active_clocks_shifted() 214 static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev) atp867x_set_piomode() argument 217 struct atp867x_priv *dp = ap->private_data; atp867x_set_piomode() 239 b = atp867x_get_active_clocks_shifted(ap, t.active) | atp867x_set_piomode() 247 b = atp867x_get_active_clocks_shifted(ap, t.act8b) | atp867x_set_piomode() 263 static int atp867x_cable_detect(struct ata_port *ap) atp867x_cable_detect() argument 265 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atp867x_cable_detect() 300 static void atp867x_check_ports(struct ata_port *ap, int port) atp867x_check_ports() argument 302 struct ata_ioports *ioaddr = &ap->ioaddr; atp867x_check_ports() 303 struct atp867x_priv *dp = ap->private_data; atp867x_check_ports() 326 (unsigned long long)ATP867X_IO_PORTBASE(ap, port), atp867x_check_ports() 328 (unsigned long long)ATP867X_IO_ALTSTATUS(ap, port), atp867x_check_ports() 330 (unsigned long long)ATP867X_IO_DMABASE(ap, port), atp867x_check_ports() 349 static int atp867x_set_priv(struct ata_port *ap) atp867x_set_priv() argument 351 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atp867x_set_priv() 353 int port = ap->port_no; atp867x_set_priv() 355 dp = ap->private_data = atp867x_set_priv() 360 dp->dma_mode = ATP867X_IO_DMAMODE(ap, port); atp867x_set_priv() 361 dp->mstr_piospd = ATP867X_IO_MSTRPIOSPD(ap, port); atp867x_set_priv() 362 dp->slave_piospd = ATP867X_IO_SLAVPIOSPD(ap, port); atp867x_set_priv() 363 dp->eightb_piospd = ATP867X_IO_8BPIOSPD(ap, port); atp867x_set_priv() 366 ioread8(ATP867X_SYS_INFO(ap)) & ATP867X_IO_SYS_INFO_66MHZ; atp867x_set_priv() 374 struct ata_port *ap = host->ports[0]; atp867x_fixup() local 394 iowrite16(ATP867X_IO_PORTSPD_VAL, ATP867X_IO_PORTSPD(ap, i)); atp867x_fixup() 400 iowrite16(ATP867X_PREREAD_VAL, ATP867X_IO_PREREAD(ap, i)); atp867x_fixup() 402 v = ioread8(ATP867X_IOBASE(ap) + 0x28); atp867x_fixup() 405 iowrite8(v, ATP867X_IOBASE(ap) + 0x28); atp867x_fixup() 410 v = ioread8(ATP867X_SYS_INFO(ap)); atp867x_fixup() 414 iowrite8(v, ATP867X_SYS_INFO(ap)); atp867x_fixup() 446 struct ata_port *ap = host->ports[i]; atp867x_ata_pci_sff_init_host() local 447 struct ata_ioports *ioaddr = &ap->ioaddr; atp867x_ata_pci_sff_init_host() 449 ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i); atp867x_ata_pci_sff_init_host() 451 = ATP867X_IO_ALTSTATUS(ap, i); atp867x_ata_pci_sff_init_host() 452 ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i); atp867x_ata_pci_sff_init_host() 455 rc = atp867x_set_priv(ap); atp867x_ata_pci_sff_init_host() 460 atp867x_check_ports(ap, i); atp867x_ata_pci_sff_init_host() 462 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", atp867x_ata_pci_sff_init_host() 465 ata_port_desc(ap, "bmdma 0x%lx", atp867x_ata_pci_sff_init_host()
|
H A D | pdc_adma.c | 56 #define ADMA_PORT_REGS(ap) \ 57 ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no) 133 static int adma_port_start(struct ata_port *ap); 134 static void adma_port_stop(struct ata_port *ap); 138 static void adma_freeze(struct ata_port *ap); 139 static void adma_thaw(struct ata_port *ap); 193 static void adma_reset_engine(struct ata_port *ap) adma_reset_engine() argument 195 void __iomem *chan = ADMA_PORT_REGS(ap); adma_reset_engine() 204 static void adma_reinit_engine(struct ata_port *ap) adma_reinit_engine() argument 206 struct adma_port_priv *pp = ap->private_data; adma_reinit_engine() 207 void __iomem *chan = ADMA_PORT_REGS(ap); adma_reinit_engine() 210 writeb(ATA_NIEN, ap->ioaddr.ctl_addr); adma_reinit_engine() 211 ata_sff_check_status(ap); adma_reinit_engine() 214 adma_reset_engine(ap); adma_reinit_engine() 232 static inline void adma_enter_reg_mode(struct ata_port *ap) adma_enter_reg_mode() argument 234 void __iomem *chan = ADMA_PORT_REGS(ap); adma_enter_reg_mode() 240 static void adma_freeze(struct ata_port *ap) adma_freeze() argument 242 void __iomem *chan = ADMA_PORT_REGS(ap); adma_freeze() 245 writeb(ATA_NIEN, ap->ioaddr.ctl_addr); adma_freeze() 246 ata_sff_check_status(ap); adma_freeze() 255 static void adma_thaw(struct ata_port *ap) adma_thaw() argument 257 adma_reinit_engine(ap); adma_thaw() 262 struct ata_port *ap = link->ap; adma_prereset() local 263 struct adma_port_priv *pp = ap->private_data; adma_prereset() 267 adma_reinit_engine(ap); adma_prereset() 275 struct ata_port *ap = qc->ap; adma_fill_sg() local 276 struct adma_port_priv *pp = ap->private_data; adma_fill_sg() 316 struct adma_port_priv *pp = qc->ap->private_data; adma_qc_prep() 323 adma_enter_reg_mode(qc->ap); adma_qc_prep() 394 struct ata_port *ap = qc->ap; adma_packet_start() local 395 void __iomem *chan = ADMA_PORT_REGS(ap); adma_packet_start() 397 VPRINTK("ENTER, ap %p\n", ap); adma_packet_start() 405 struct adma_port_priv *pp = qc->ap->private_data; adma_qc_issue() 430 struct ata_port *ap = host->ports[port_no]; adma_intr_pkt() local 433 void __iomem *chan = ADMA_PORT_REGS(ap); adma_intr_pkt() 439 adma_enter_reg_mode(ap); adma_intr_pkt() 440 pp = ap->private_data; adma_intr_pkt() 443 qc = ata_qc_from_tag(ap, ap->link.active_tag); adma_intr_pkt() 458 struct ata_eh_info *ehi = &ap->link.eh_info; adma_intr_pkt() 466 ata_port_abort(ap); adma_intr_pkt() 468 ata_port_freeze(ap); adma_intr_pkt() 480 struct ata_port *ap = host->ports[port_no]; adma_intr_mmio() local 481 struct adma_port_priv *pp = ap->private_data; adma_intr_mmio() 486 qc = ata_qc_from_tag(ap, ap->link.active_tag); adma_intr_mmio() 490 u8 status = ata_sff_check_status(ap); adma_intr_mmio() 494 ap->print_id, qc->tf.protocol, status); adma_intr_mmio() 502 struct ata_eh_info *ehi = &ap->link.eh_info; adma_intr_mmio() 507 ata_port_abort(ap); adma_intr_mmio() 509 ata_port_freeze(ap); adma_intr_mmio() 550 static int adma_port_start(struct ata_port *ap) adma_port_start() argument 552 struct device *dev = ap->host->dev; adma_port_start() 555 adma_enter_reg_mode(ap); adma_port_start() 570 ap->private_data = pp; adma_port_start() 571 adma_reinit_engine(ap); adma_port_start() 575 static void adma_port_stop(struct ata_port *ap) adma_port_stop() argument 577 adma_reset_engine(ap); adma_port_stop() 644 struct ata_port *ap = host->ports[port_no]; adma_ata_init_one() local 648 adma_ata_setup_port(&ap->ioaddr, port_base); adma_ata_init_one() 650 ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio"); adma_ata_init_one() 651 ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port"); adma_ata_init_one()
|
H A D | sata_promise.c | 65 /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */ 78 /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */ 138 /* ap->flags bits */ 156 static int pdc_common_port_start(struct ata_port *ap); 157 static int pdc_sata_port_start(struct ata_port *ap); 159 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 160 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 163 static void pdc_irq_clear(struct ata_port *ap); 165 static void pdc_freeze(struct ata_port *ap); 166 static void pdc_sata_freeze(struct ata_port *ap); 167 static void pdc_thaw(struct ata_port *ap); 168 static void pdc_sata_thaw(struct ata_port *ap); 173 static void pdc_error_handler(struct ata_port *ap); 175 static int pdc_pata_cable_detect(struct ata_port *ap); 176 static int pdc_sata_cable_detect(struct ata_port *ap); 331 static int pdc_common_port_start(struct ata_port *ap) pdc_common_port_start() argument 333 struct device *dev = ap->host->dev; pdc_common_port_start() 338 rc = ata_bmdma_port_start(ap); pdc_common_port_start() 350 ap->private_data = pp; pdc_common_port_start() 355 static int pdc_sata_port_start(struct ata_port *ap) pdc_sata_port_start() argument 359 rc = pdc_common_port_start(ap); pdc_sata_port_start() 364 if (ap->flags & PDC_FLAG_GEN_II) { pdc_sata_port_start() 365 void __iomem *sata_mmio = ap->ioaddr.scr_addr; pdc_sata_port_start() 376 static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap) pdc_fpdma_clear_interrupt_flag() argument 378 void __iomem *sata_mmio = ap->ioaddr.scr_addr; pdc_fpdma_clear_interrupt_flag() 391 static void pdc_fpdma_reset(struct ata_port *ap) pdc_fpdma_reset() argument 393 void __iomem *sata_mmio = ap->ioaddr.scr_addr; pdc_fpdma_reset() 406 pdc_fpdma_clear_interrupt_flag(ap); pdc_fpdma_reset() 409 static void pdc_not_at_command_packet_phase(struct ata_port *ap) pdc_not_at_command_packet_phase() argument 411 void __iomem *sata_mmio = ap->ioaddr.scr_addr; pdc_not_at_command_packet_phase() 425 static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap) pdc_clear_internal_debug_record_error_register() argument 427 void __iomem *sata_mmio = ap->ioaddr.scr_addr; pdc_clear_internal_debug_record_error_register() 433 static void pdc_reset_port(struct ata_port *ap) pdc_reset_port() argument 435 void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; pdc_reset_port() 439 if (ap->flags & PDC_FLAG_GEN_II) pdc_reset_port() 440 pdc_not_at_command_packet_phase(ap); pdc_reset_port() 461 if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) { pdc_reset_port() 462 pdc_fpdma_reset(ap); pdc_reset_port() 463 pdc_clear_internal_debug_record_error_register(ap); pdc_reset_port() 467 static int pdc_pata_cable_detect(struct ata_port *ap) pdc_pata_cable_detect() argument 470 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_pata_cable_detect() 478 static int pdc_sata_cable_detect(struct ata_port *ap) pdc_sata_cable_detect() argument 488 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); pdc_sata_scr_read() 497 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); pdc_sata_scr_write() 503 struct ata_port *ap = qc->ap; pdc_atapi_pkt() local 504 dma_addr_t sg_table = ap->bmdma_prd_dma; pdc_atapi_pkt() 507 struct pdc_port_priv *pp = ap->private_data; pdc_atapi_pkt() 533 if (sata_scr_valid(&ap->link)) pdc_atapi_pkt() 591 struct ata_port *ap = qc->ap; pdc_fill_sg() local 592 struct ata_bmdma_prd *prd = ap->bmdma_prd; pdc_fill_sg() 654 struct pdc_port_priv *pp = qc->ap->private_data; pdc_qc_prep() 664 i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, pdc_qc_prep() 699 static unsigned int pdc_sata_nr_ports(const struct ata_port *ap) pdc_sata_nr_ports() argument 701 return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2; pdc_sata_nr_ports() 704 static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap) pdc_sata_ata_port_to_ata_no() argument 706 const struct ata_host *host = ap->host; pdc_sata_ata_port_to_ata_no() 707 unsigned int nr_ports = pdc_sata_nr_ports(ap); pdc_sata_ata_port_to_ata_no() 710 for (i = 0; i < nr_ports && host->ports[i] != ap; ++i) pdc_sata_ata_port_to_ata_no() 713 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); pdc_sata_ata_port_to_ata_no() 716 static void pdc_freeze(struct ata_port *ap) pdc_freeze() argument 718 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_freeze() 728 static void pdc_sata_freeze(struct ata_port *ap) pdc_sata_freeze() argument 730 struct ata_host *host = ap->host; pdc_sata_freeze() 733 unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); pdc_sata_freeze() 740 * 2) ap->lock == &ap->host->lock pdc_sata_freeze() 741 * 3) ->freeze() and ->thaw() are called with ap->lock held pdc_sata_freeze() 748 pdc_freeze(ap); pdc_sata_freeze() 751 static void pdc_thaw(struct ata_port *ap) pdc_thaw() argument 753 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_thaw() 766 static void pdc_sata_thaw(struct ata_port *ap) pdc_sata_thaw() argument 768 struct ata_host *host = ap->host; pdc_sata_thaw() 771 unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); pdc_sata_thaw() 774 pdc_thaw(ap); pdc_sata_thaw() 789 pdc_reset_port(link->ap); pdc_pata_softreset() 793 static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap) pdc_ata_port_to_ata_no() argument 795 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_ata_port_to_ata_no() 796 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc_ata_port_to_ata_no() 802 static void pdc_hard_reset_port(struct ata_port *ap) pdc_hard_reset_port() argument 804 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc_hard_reset_port() 806 unsigned int ata_no = pdc_ata_port_to_ata_no(ap); pdc_hard_reset_port() 807 struct pdc_host_priv *hpriv = ap->host->private_data; pdc_hard_reset_port() 827 if (link->ap->flags & PDC_FLAG_GEN_II) pdc_sata_hardreset() 828 pdc_not_at_command_packet_phase(link->ap); pdc_sata_hardreset() 830 pdc_hard_reset_port(link->ap); pdc_sata_hardreset() 831 pdc_reset_port(link->ap); pdc_sata_hardreset() 840 static void pdc_error_handler(struct ata_port *ap) pdc_error_handler() argument 842 if (!(ap->pflags & ATA_PFLAG_FROZEN)) pdc_error_handler() 843 pdc_reset_port(ap); pdc_error_handler() 845 ata_sff_error_handler(ap); pdc_error_handler() 850 struct ata_port *ap = qc->ap; pdc_post_internal_cmd() local 854 pdc_reset_port(ap); pdc_post_internal_cmd() 857 static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, pdc_error_intr() argument 860 struct ata_eh_info *ehi = &ap->link.eh_info; pdc_error_intr() 877 if (sata_scr_valid(&ap->link)) { pdc_error_intr() 880 pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror); pdc_error_intr() 886 pdc_reset_port(ap); pdc_error_intr() 888 ata_port_abort(ap); pdc_error_intr() 891 static unsigned int pdc_host_intr(struct ata_port *ap, pdc_host_intr() argument 895 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_host_intr() 899 if (ap->flags & PDC_FLAG_GEN_II) pdc_host_intr() 905 pdc_error_intr(ap, qc, port_status, err_mask); pdc_host_intr() 914 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); pdc_host_intr() 919 ap->stats.idle_irq++; pdc_host_intr() 926 static void pdc_irq_clear(struct ata_port *ap) pdc_irq_clear() argument 928 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_irq_clear() 936 struct ata_port *ap; pdc_interrupt() local 986 ap = host->ports[i]; pdc_interrupt() 992 struct ata_eh_info *ehi = &ap->link.eh_info; pdc_interrupt() 996 ata_port_freeze(ap); pdc_interrupt() 1006 qc = ata_qc_from_tag(ap, ap->link.active_tag); pdc_interrupt() 1008 handled += pdc_host_intr(ap, qc); pdc_interrupt() 1021 struct ata_port *ap = qc->ap; pdc_packet_start() local 1022 struct pdc_port_priv *pp = ap->private_data; pdc_packet_start() 1023 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc_packet_start() 1024 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; pdc_packet_start() 1025 unsigned int port_no = ap->port_no; pdc_packet_start() 1028 VPRINTK("ENTER, ap %p\n", ap); pdc_packet_start() 1060 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) pdc_tf_load_mmio() argument 1063 ata_sff_tf_load(ap, tf); pdc_tf_load_mmio() 1066 static void pdc_exec_command_mmio(struct ata_port *ap, pdc_exec_command_mmio() argument 1070 ata_sff_exec_command(ap, tf); pdc_exec_command_mmio() 1109 static void pdc_ata_setup_port(struct ata_port *ap, pdc_ata_setup_port() argument 1112 ap->ioaddr.cmd_addr = base; pdc_ata_setup_port() 1113 ap->ioaddr.data_addr = base; pdc_ata_setup_port() 1114 ap->ioaddr.feature_addr = pdc_ata_setup_port() 1115 ap->ioaddr.error_addr = base + 0x4; pdc_ata_setup_port() 1116 ap->ioaddr.nsect_addr = base + 0x8; pdc_ata_setup_port() 1117 ap->ioaddr.lbal_addr = base + 0xc; pdc_ata_setup_port() 1118 ap->ioaddr.lbam_addr = base + 0x10; pdc_ata_setup_port() 1119 ap->ioaddr.lbah_addr = base + 0x14; pdc_ata_setup_port() 1120 ap->ioaddr.device_addr = base + 0x18; pdc_ata_setup_port() 1121 ap->ioaddr.command_addr = pdc_ata_setup_port() 1122 ap->ioaddr.status_addr = base + 0x1c; pdc_ata_setup_port() 1123 ap->ioaddr.altstatus_addr = pdc_ata_setup_port() 1124 ap->ioaddr.ctl_addr = base + 0x38; pdc_ata_setup_port() 1125 ap->ioaddr.scr_addr = scr_addr; pdc_ata_setup_port() 1235 struct ata_port *ap = host->ports[i]; pdc_ata_init_one() local 1240 pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset); pdc_ata_init_one() 1242 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); pdc_ata_init_one() 1243 ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata"); pdc_ata_init_one()
|
H A D | pata_amd.c | 30 * @ap: ATA port being set up 42 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock) timing_setup() argument 48 struct pci_dev *pdev = to_pci_dev(ap->host->dev); timing_setup() 50 int dn = ap->port_no * 2 + adev->devno; timing_setup() 138 struct ata_port *ap = link->ap; amd_pre_reset() local 139 struct pci_dev *pdev = to_pci_dev(ap->host->dev); amd_pre_reset() 141 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) amd_pre_reset() 149 * @ap: port 154 static int amd_cable_detect(struct ata_port *ap) amd_cable_detect() argument 157 struct pci_dev *pdev = to_pci_dev(ap->host->dev); amd_cable_detect() 161 if (ata66 & bitmask[ap->port_no]) amd_cable_detect() 168 * @ap: ATA interface 177 static void amd_fifo_setup(struct ata_port *ap) amd_fifo_setup() argument 180 struct pci_dev *pdev = to_pci_dev(ap->host->dev); amd_fifo_setup() 182 u8 fifo = fifobit[ap->port_no]; amd_fifo_setup() 186 ata_for_each_dev(adev, &ap->link, ENABLED) { amd_fifo_setup() 195 r &= ~fifobit[ap->port_no]; amd_fifo_setup() 202 * @ap: ATA interface 208 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev) amd33_set_piomode() argument 210 amd_fifo_setup(ap); amd33_set_piomode() 211 timing_setup(ap, adev, 0x40, adev->pio_mode, 1); amd33_set_piomode() 214 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev) amd66_set_piomode() argument 216 amd_fifo_setup(ap); amd66_set_piomode() 217 timing_setup(ap, adev, 0x40, adev->pio_mode, 2); amd66_set_piomode() 220 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev) amd100_set_piomode() argument 222 amd_fifo_setup(ap); amd100_set_piomode() 223 timing_setup(ap, adev, 0x40, adev->pio_mode, 3); amd100_set_piomode() 226 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev) amd133_set_piomode() argument 228 amd_fifo_setup(ap); amd133_set_piomode() 229 timing_setup(ap, adev, 0x40, adev->pio_mode, 4); amd133_set_piomode() 234 * @ap: ATA interface 241 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev) amd33_set_dmamode() argument 243 timing_setup(ap, adev, 0x40, adev->dma_mode, 1); amd33_set_dmamode() 246 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev) amd66_set_dmamode() argument 248 timing_setup(ap, adev, 0x40, adev->dma_mode, 2); amd66_set_dmamode() 251 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev) amd100_set_dmamode() argument 253 timing_setup(ap, adev, 0x40, adev->dma_mode, 3); amd100_set_dmamode() 256 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) amd133_set_dmamode() argument 258 timing_setup(ap, adev, 0x40, adev->dma_mode, 4); amd133_set_dmamode() 273 struct ata_port *ap = dev->link->ap; nv_mode_filter() local 280 udma = saved_udma = (unsigned long)ap->host->private_data; nv_mode_filter() 282 if (ap->port_no == 0) nv_mode_filter() 291 gtm = ata_acpi_init_gtm(ap); nv_mode_filter() 313 ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, " nv_mode_filter() 336 struct ata_port *ap = link->ap; nv_pre_reset() local 337 struct pci_dev *pdev = to_pci_dev(ap->host->dev); nv_pre_reset() 339 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) nv_pre_reset() 347 * @ap: ATA interface 353 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev) nv100_set_piomode() argument 355 timing_setup(ap, adev, 0x50, adev->pio_mode, 3); nv100_set_piomode() 358 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev) nv133_set_piomode() argument 360 timing_setup(ap, adev, 0x50, adev->pio_mode, 4); nv133_set_piomode() 365 * @ap: ATA interface 372 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev) nv100_set_dmamode() argument 374 timing_setup(ap, adev, 0x50, adev->dma_mode, 3); nv100_set_dmamode() 377 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev) nv133_set_dmamode() argument 379 timing_setup(ap, adev, 0x50, adev->dma_mode, 4); nv133_set_dmamode()
|
H A D | pata_pxa.c | 62 struct pata_pxa_data *pd = qc->ap->private_data; pxa_load_dmac() 107 struct pata_pxa_data *pd = qc->ap->private_data; pxa_qc_prep() 138 qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); pxa_bmdma_setup() 146 struct pata_pxa_data *pd = qc->ap->private_data; pxa_bmdma_start() 156 struct pata_pxa_data *pd = qc->ap->private_data; pxa_bmdma_stop() 160 dev_err(qc->ap->dev, "Timeout waiting for DMA completion!"); pxa_bmdma_stop() 169 static unsigned char pxa_bmdma_status(struct ata_port *ap) pxa_bmdma_status() argument 171 struct pata_pxa_data *pd = ap->private_data; pxa_bmdma_status() 183 static void pxa_irq_clear(struct ata_port *ap) pxa_irq_clear() argument 221 struct ata_port *ap = port; pxa_ata_dma_irq() local 222 struct pata_pxa_data *pd = ap->private_data; pxa_ata_dma_irq() 234 struct ata_port *ap; pxa_ata_probe() local 290 ap = host->ports[0]; pxa_ata_probe() 291 ap->ops = &pxa_ata_port_ops; pxa_ata_probe() 292 ap->pio_mask = ATA_PIO4; pxa_ata_probe() 293 ap->mwdma_mask = ATA_MWDMA2; pxa_ata_probe() 295 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, pxa_ata_probe() 297 ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, pxa_ata_probe() 299 ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, pxa_ata_probe() 305 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; pxa_ata_probe() 306 ap->ioaddr.data_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 308 ap->ioaddr.error_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 310 ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 312 ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 314 ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 316 ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 318 ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 320 ap->ioaddr.device_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 322 ap->ioaddr.status_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 324 ap->ioaddr.command_addr = ap->ioaddr.cmd_addr + pxa_ata_probe() 335 ap->private_data = data; pxa_ata_probe() 351 pxa_ata_dma_irq, ap); pxa_ata_probe()
|
H A D | pata_ninja32.c | 51 * @ap: ATA interface 58 static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev) ninja32_set_piomode() argument 64 ap->ioaddr.bmdma_addr + 0x1f); ninja32_set_piomode() 65 ap->private_data = adev; ninja32_set_piomode() 69 static void ninja32_dev_select(struct ata_port *ap, unsigned int device) ninja32_dev_select() argument 71 struct ata_device *adev = &ap->link.device[device]; ninja32_dev_select() 72 if (ap->private_data != adev) { ninja32_dev_select() 73 iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f); ninja32_dev_select() 74 ata_sff_dev_select(ap, device); ninja32_dev_select() 75 ninja32_set_piomode(ap, adev); ninja32_dev_select() 105 struct ata_port *ap; ninja32_init_one() local 112 ap = host->ports[0]; ninja32_init_one() 138 ap->ops = &ninja32_port_ops; ninja32_init_one() 139 ap->pio_mask = ATA_PIO4; ninja32_init_one() 140 ap->flags |= ATA_FLAG_SLAVE_POSS; ninja32_init_one() 142 ap->ioaddr.cmd_addr = base + 0x10; ninja32_init_one() 143 ap->ioaddr.ctl_addr = base + 0x1E; ninja32_init_one() 144 ap->ioaddr.altstatus_addr = base + 0x1E; ninja32_init_one() 145 ap->ioaddr.bmdma_addr = base; ninja32_init_one() 146 ata_sff_std_ports(&ap->ioaddr); ninja32_init_one() 147 ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; ninja32_init_one()
|
H A D | sata_sil24.c | 324 * ap->private_data 342 static void sil24_pmp_attach(struct ata_port *ap); 343 static void sil24_pmp_detach(struct ata_port *ap); 344 static void sil24_freeze(struct ata_port *ap); 345 static void sil24_thaw(struct ata_port *ap); 352 static void sil24_error_handler(struct ata_port *ap); 354 static int sil24_port_start(struct ata_port *ap); 360 static int sil24_port_resume(struct ata_port *ap); 469 static unsigned long sil24_port_offset(struct ata_port *ap) sil24_port_offset() argument 471 return ap->port_no * PORT_REGS_SIZE; sil24_port_offset() 474 static void __iomem *sil24_port_base(struct ata_port *ap) sil24_port_base() argument 476 return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap); sil24_port_base() 481 void __iomem *port = sil24_port_base(dev->link->ap); sil24_dev_config() 489 static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) sil24_read_tf() argument 491 void __iomem *port = sil24_port_base(ap); sil24_read_tf() 509 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; sil24_scr_read() 520 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; sil24_scr_write() 529 static void sil24_config_port(struct ata_port *ap) sil24_config_port() argument 531 void __iomem *port = sil24_port_base(ap); sil24_config_port() 534 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) sil24_config_port() 554 static void sil24_config_pmp(struct ata_port *ap, int attached) sil24_config_pmp() argument 556 void __iomem *port = sil24_port_base(ap); sil24_config_pmp() 564 static void sil24_clear_pmp(struct ata_port *ap) sil24_clear_pmp() argument 566 void __iomem *port = sil24_port_base(ap); sil24_clear_pmp() 579 static int sil24_init_port(struct ata_port *ap) sil24_init_port() argument 581 void __iomem *port = sil24_port_base(ap); sil24_init_port() 582 struct sil24_port_priv *pp = ap->private_data; sil24_init_port() 586 if (sata_pmp_attached(ap)) sil24_init_port() 587 sil24_clear_pmp(ap); sil24_init_port() 590 ata_wait_register(ap, port + PORT_CTRL_STAT, sil24_init_port() 592 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, sil24_init_port() 597 ap->link.eh_context.i.action |= ATA_EH_RESET; sil24_init_port() 604 static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, sil24_exec_polled_cmd() argument 609 void __iomem *port = sil24_port_base(ap); sil24_exec_polled_cmd() 610 struct sil24_port_priv *pp = ap->private_data; sil24_exec_polled_cmd() 632 irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0, sil24_exec_polled_cmd() 642 sil24_init_port(ap); sil24_exec_polled_cmd() 659 struct ata_port *ap = link->ap; sil24_softreset() local 669 if (sil24_init_port(ap)) { sil24_softreset() 679 rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, sil24_softreset() 689 sil24_read_tf(ap, 0, &tf); sil24_softreset() 703 struct ata_port *ap = link->ap; sil24_hardreset() local 704 void __iomem *port = sil24_port_base(ap); sil24_hardreset() 705 struct sil24_port_priv *pp = ap->private_data; sil24_hardreset() 716 ata_port_warn(ap, sil24_hardreset() 720 ata_msleep(ap, 10); sil24_hardreset() 722 ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, sil24_hardreset() 726 sil24_config_port(ap); sil24_hardreset() 727 sil24_config_pmp(ap, ap->nr_pmp_links); sil24_hardreset() 741 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, sil24_hardreset() 801 struct ata_port *ap = link->ap; sil24_qc_defer() local 826 if (unlikely(ap->excl_link)) { sil24_qc_defer() 827 if (link == ap->excl_link) { sil24_qc_defer() 828 if (ap->nr_active_links) sil24_qc_defer() 834 ap->excl_link = link; sil24_qc_defer() 835 if (ap->nr_active_links) sil24_qc_defer() 845 struct ata_port *ap = qc->ap; sil24_qc_prep() local 846 struct sil24_port_priv *pp = ap->private_data; sil24_qc_prep() 891 struct ata_port *ap = qc->ap; sil24_qc_issue() local 892 struct sil24_port_priv *pp = ap->private_data; sil24_qc_issue() 893 void __iomem *port = sil24_port_base(ap); sil24_qc_issue() 914 sil24_read_tf(qc->ap, qc->tag, &qc->result_tf); sil24_qc_fill_rtf() 918 static void sil24_pmp_attach(struct ata_port *ap) sil24_pmp_attach() argument 920 u32 *gscr = ap->link.device->gscr; sil24_pmp_attach() 922 sil24_config_pmp(ap, 1); sil24_pmp_attach() 923 sil24_init_port(ap); sil24_pmp_attach() 927 ata_port_info(ap, sil24_pmp_attach() 929 ap->flags &= ~ATA_FLAG_NCQ; sil24_pmp_attach() 933 static void sil24_pmp_detach(struct ata_port *ap) sil24_pmp_detach() argument 935 sil24_init_port(ap); sil24_pmp_detach() 936 sil24_config_pmp(ap, 0); sil24_pmp_detach() 938 ap->flags |= ATA_FLAG_NCQ; sil24_pmp_detach() 946 rc = sil24_init_port(link->ap); sil24_pmp_hardreset() 955 static void sil24_freeze(struct ata_port *ap) sil24_freeze() argument 957 void __iomem *port = sil24_port_base(ap); sil24_freeze() 965 static void sil24_thaw(struct ata_port *ap) sil24_thaw() argument 967 void __iomem *port = sil24_port_base(ap); sil24_thaw() 978 static void sil24_error_intr(struct ata_port *ap) sil24_error_intr() argument 980 void __iomem *port = sil24_port_base(ap); sil24_error_intr() 981 struct sil24_port_priv *pp = ap->private_data; sil24_error_intr() 993 link = &ap->link; sil24_error_intr() 1001 sata_async_notification(ap); sil24_error_intr() 1033 if (ap->nr_active_links >= 3) { sil24_error_intr() 1042 if (sata_pmp_attached(ap)) { sil24_error_intr() 1046 if (pmp < ap->nr_pmp_links) { sil24_error_intr() 1047 link = &ap->pmp_link[pmp]; sil24_error_intr() 1049 qc = ata_qc_from_tag(ap, link->active_tag); sil24_error_intr() 1060 qc = ata_qc_from_tag(ap, link->active_tag); sil24_error_intr() 1090 if (sata_pmp_attached(ap)) sil24_error_intr() 1096 ata_port_freeze(ap); sil24_error_intr() 1101 ata_port_abort(ap); sil24_error_intr() 1105 static inline void sil24_host_intr(struct ata_port *ap) sil24_host_intr() argument 1107 void __iomem *port = sil24_port_base(ap); sil24_host_intr() 1118 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) sil24_host_intr() 1124 sil24_error_intr(ap); sil24_host_intr() 1129 rc = ata_qc_complete_multiple(ap, qc_active); sil24_host_intr() 1133 struct ata_eh_info *ehi = &ap->link.eh_info; sil24_host_intr() 1136 ata_port_freeze(ap); sil24_host_intr() 1141 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) sil24_host_intr() 1142 ata_port_info(ap, sil24_host_intr() 1144 slot_stat, ap->link.active_tag, ap->link.sactive); sil24_host_intr() 1179 static void sil24_error_handler(struct ata_port *ap) sil24_error_handler() argument 1181 struct sil24_port_priv *pp = ap->private_data; sil24_error_handler() 1183 if (sil24_init_port(ap)) sil24_error_handler() 1184 ata_eh_freeze_port(ap); sil24_error_handler() 1186 sata_pmp_error_handler(ap); sil24_error_handler() 1193 struct ata_port *ap = qc->ap; sil24_post_internal_cmd() local 1196 if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap)) sil24_post_internal_cmd() 1197 ata_eh_freeze_port(ap); sil24_post_internal_cmd() 1200 static int sil24_port_start(struct ata_port *ap) sil24_port_start() argument 1202 struct device *dev = ap->host->dev; sil24_port_start() 1220 ap->private_data = pp; sil24_port_start() 1222 ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); sil24_port_start() 1223 ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port"); sil24_port_start() 1242 struct ata_port *ap = host->ports[i]; sil24_init_controller() local 1243 void __iomem *port = sil24_port_base(ap); sil24_init_controller() 1262 sil24_config_port(ap); sil24_init_controller() 1379 static int sil24_port_resume(struct ata_port *ap) sil24_port_resume() argument 1381 sil24_config_pmp(ap, ap->nr_pmp_links); sil24_port_resume()
|
H A D | libata-transport.h | 10 int ata_tport_add(struct device *parent, struct ata_port *ap); 11 void ata_tport_delete(struct ata_port *ap);
|
H A D | pata_bf54x.c | 280 * @ap: Port whose timings we are configuring 289 static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev) bfin_set_piomode() argument 292 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_set_piomode() 304 dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); bfin_set_piomode() 359 * @ap: Port whose timings we are configuring 368 static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev) bfin_set_dmamode() argument 371 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_set_dmamode() 380 dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode); bfin_set_dmamode() 429 dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode); bfin_set_dmamode() 663 * @ap: Port to which output is sent 669 static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) bfin_tf_load() argument 671 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_tf_load() 674 if (tf->ctl != ap->last_ctl) { bfin_tf_load() 676 ap->last_ctl = tf->ctl; bfin_tf_load() 677 ata_wait_idle(ap); bfin_tf_load() 689 dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X " bfin_tf_load() 703 dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", bfin_tf_load() 713 dev_dbg(ap->dev, "device 0x%X\n", tf->device); bfin_tf_load() 716 ata_wait_idle(ap); bfin_tf_load() 721 * @ap: port where the device is 726 static u8 bfin_check_status(struct ata_port *ap) bfin_check_status() argument 728 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_check_status() 734 * @ap: Port from which input is read 740 static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) bfin_tf_read() argument 742 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_tf_read() 744 tf->command = bfin_check_status(ap); bfin_tf_read() 764 * @ap: port to which command is being issued 770 static void bfin_exec_command(struct ata_port *ap, bfin_exec_command() argument 773 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_exec_command() 774 dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); bfin_exec_command() 777 ata_sff_pause(ap); bfin_exec_command() 782 * @ap: port where the device is 785 static u8 bfin_check_altstatus(struct ata_port *ap) bfin_check_altstatus() argument 787 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_check_altstatus() 793 * @ap: ATA channel to manipulate 799 static void bfin_dev_select(struct ata_port *ap, unsigned int device) bfin_dev_select() argument 801 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_dev_select() 810 ata_sff_pause(ap); bfin_dev_select() 815 * @ap: port where the device is 819 static void bfin_set_devctl(struct ata_port *ap, u8 ctl) bfin_set_devctl() argument 821 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_set_devctl() 834 struct ata_port *ap = qc->ap; bfin_bmdma_setup() local 835 struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd; bfin_bmdma_setup() 836 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_bmdma_setup() 844 dev_dbg(qc->ap->dev, "in atapi dma setup\n"); bfin_bmdma_setup() 855 dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir); bfin_bmdma_setup() 874 set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma); bfin_bmdma_setup() 882 bfin_exec_command(ap, &qc->tf); bfin_bmdma_setup() 913 struct ata_port *ap = qc->ap; bfin_bmdma_start() local 914 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_bmdma_start() 916 dev_dbg(qc->ap->dev, "in atapi dma start\n"); bfin_bmdma_start() 918 if (!(ap->udma_mask || ap->mwdma_mask)) bfin_bmdma_start() 922 if (ap->udma_mask) bfin_bmdma_start() 937 struct ata_port *ap = qc->ap; bfin_bmdma_stop() local 940 dev_dbg(qc->ap->dev, "in atapi dma stop\n"); bfin_bmdma_stop() 942 if (!(ap->udma_mask || ap->mwdma_mask)) bfin_bmdma_stop() 954 dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir); bfin_bmdma_stop() 959 * @ap: ATA channel to examine 965 static unsigned int bfin_devchk(struct ata_port *ap, bfin_devchk() argument 968 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_devchk() 971 bfin_dev_select(ap, device); bfin_devchk() 997 static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) bfin_bus_post_reset() argument 999 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_bus_post_reset() 1008 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); bfin_bus_post_reset() 1017 bfin_dev_select(ap, 1); bfin_bus_post_reset() 1026 ata_msleep(ap, 50); /* give drive a breather */ bfin_bus_post_reset() 1029 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); bfin_bus_post_reset() 1032 bfin_dev_select(ap, 0); bfin_bus_post_reset() 1034 bfin_dev_select(ap, 1); bfin_bus_post_reset() 1036 bfin_dev_select(ap, 0); bfin_bus_post_reset() 1045 static unsigned int bfin_bus_softreset(struct ata_port *ap, bfin_bus_softreset() argument 1048 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_bus_softreset() 1051 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); bfin_bus_softreset() 1053 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST); bfin_bus_softreset() 1055 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); bfin_bus_softreset() 1067 ata_msleep(ap, 150); bfin_bus_softreset() 1073 if (bfin_check_status(ap) == 0xFF) bfin_bus_softreset() 1076 bfin_bus_post_reset(ap, devmask); bfin_bus_softreset() 1083 * @ap: port to reset 1092 struct ata_port *ap = link->ap; bfin_softreset() local 1093 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; bfin_softreset() 1098 if (bfin_devchk(ap, 0)) bfin_softreset() 1100 if (slave_possible && bfin_devchk(ap, 1)) bfin_softreset() 1104 bfin_dev_select(ap, 0); bfin_softreset() 1107 err_mask = bfin_bus_softreset(ap, devmask); bfin_softreset() 1109 ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", bfin_softreset() 1115 classes[0] = ata_sff_dev_classify(&ap->link.device[0], bfin_softreset() 1118 classes[1] = ata_sff_dev_classify(&ap->link.device[1], bfin_softreset() 1126 * @ap: Port associated with this ATA transaction. 1129 static unsigned char bfin_bmdma_status(struct ata_port *ap) bfin_bmdma_status() argument 1132 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_bmdma_status() 1139 dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat); bfin_bmdma_status() 1157 struct ata_port *ap = dev->link->ap; bfin_data_xfer() local 1158 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_data_xfer() 1188 * @ap: Port associated with this ATA transaction. 1193 static void bfin_irq_clear(struct ata_port *ap) bfin_irq_clear() argument 1195 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_irq_clear() 1197 dev_dbg(ap->dev, "in atapi irq clear\n"); bfin_irq_clear() 1205 * @ap: port to thaw 1210 void bfin_thaw(struct ata_port *ap) bfin_thaw() argument 1212 dev_dbg(ap->dev, "in atapi dma thaw\n"); bfin_thaw() 1213 bfin_check_status(ap); bfin_thaw() 1214 ata_sff_irq_on(ap); bfin_thaw() 1219 * @ap: the target ata_port 1227 struct ata_port *ap = link->ap; bfin_postreset() local 1228 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; bfin_postreset() 1231 ata_sff_irq_on(ap); bfin_postreset() 1235 bfin_dev_select(ap, 1); bfin_postreset() 1237 bfin_dev_select(ap, 0); bfin_postreset() 1245 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); bfin_postreset() 1248 static void bfin_port_stop(struct ata_port *ap) bfin_port_stop() argument 1250 dev_dbg(ap->dev, "in atapi port stop\n"); bfin_port_stop() 1251 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { bfin_port_stop() 1252 dma_free_coherent(ap->dev, bfin_port_stop() 1254 ap->bmdma_prd, bfin_port_stop() 1255 ap->bmdma_prd_dma); bfin_port_stop() 1262 static int bfin_port_start(struct ata_port *ap) bfin_port_start() argument 1264 dev_dbg(ap->dev, "in atapi port start\n"); bfin_port_start() 1265 if (!(ap->udma_mask || ap->mwdma_mask)) bfin_port_start() 1268 ap->bmdma_prd = dma_alloc_coherent(ap->dev, bfin_port_start() 1270 &ap->bmdma_prd_dma, bfin_port_start() 1273 if (ap->bmdma_prd == NULL) { bfin_port_start() 1274 dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n"); bfin_port_start() 1284 dma_free_coherent(ap->dev, bfin_port_start() 1286 ap->bmdma_prd, bfin_port_start() 1287 ap->bmdma_prd_dma); bfin_port_start() 1291 ap->udma_mask = 0; bfin_port_start() 1292 ap->mwdma_mask = 0; bfin_port_start() 1293 dev_err(ap->dev, "Unable to request ATAPI DMA!" bfin_port_start() 1299 static unsigned int bfin_ata_host_intr(struct ata_port *ap, bfin_ata_host_intr() argument 1302 struct ata_eh_info *ehi = &ap->link.eh_info; bfin_ata_host_intr() 1306 ap->print_id, qc->tf.protocol, ap->hsm_task_state); bfin_ata_host_intr() 1309 switch (ap->hsm_task_state) { bfin_ata_host_intr() 1326 host_stat = ap->ops->bmdma_status(ap); bfin_ata_host_intr() 1328 ap->print_id, host_stat); bfin_ata_host_intr() 1335 ap->ops->bmdma_stop(qc); bfin_ata_host_intr() 1340 ap->hsm_task_state = HSM_ST_ERR; bfin_ata_host_intr() 1351 status = ap->ops->sff_check_altstatus(ap); bfin_ata_host_intr() 1356 status = ap->ops->sff_check_status(ap); bfin_ata_host_intr() 1361 ap->ops->sff_irq_clear(ap); bfin_ata_host_intr() 1363 ata_sff_hsm_move(ap, qc, status, 0); bfin_ata_host_intr() 1373 ap->stats.idle_irq++; bfin_ata_host_intr() 1376 if ((ap->stats.idle_irq % 1000) == 0) { bfin_ata_host_intr() 1377 ap->ops->irq_ack(ap, 0); /* debug trap */ bfin_ata_host_intr() 1378 ata_port_warn(ap, "irq trap\n"); bfin_ata_host_intr() 1396 struct ata_port *ap = host->ports[i]; bfin_ata_interrupt() local 1399 qc = ata_qc_from_tag(ap, ap->link.active_tag); bfin_ata_interrupt() 1401 handled |= bfin_ata_host_intr(ap, qc); bfin_ata_interrupt()
|
H A D | pata_atiixp.c | 48 static int atiixp_cable_detect(struct ata_port *ap) atiixp_cable_detect() argument 50 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atiixp_cable_detect() 58 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); atiixp_cable_detect() 82 struct ata_port *ap = link->ap; atiixp_prereset() local 83 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atiixp_prereset() 85 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) atiixp_prereset() 93 * @ap: ATA interface 101 static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio) atiixp_set_pio_timing() argument 105 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atiixp_set_pio_timing() 106 int dn = 2 * ap->port_no + adev->devno; atiixp_set_pio_timing() 107 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); atiixp_set_pio_timing() 124 * @ap: ATA interface 131 static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev) atiixp_set_piomode() argument 135 atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0); atiixp_set_piomode() 141 * @ap: ATA interface 148 static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) atiixp_set_dmamode() argument 152 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atiixp_set_dmamode() 154 int dn = 2 * ap->port_no + adev->devno; atiixp_set_dmamode() 170 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); atiixp_set_dmamode() 195 atiixp_set_pio_timing(ap, adev, wanted_pio); atiixp_set_dmamode() 212 struct ata_port *ap = qc->ap; atiixp_bmdma_start() local 215 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atiixp_bmdma_start() 216 int dn = (2 * ap->port_no) + adev->devno; atiixp_bmdma_start() 241 struct ata_port *ap = qc->ap; atiixp_bmdma_stop() local 242 struct pci_dev *pdev = to_pci_dev(ap->host->dev); atiixp_bmdma_stop() 243 int dn = (2 * ap->port_no) + qc->dev->devno; atiixp_bmdma_stop()
|
H A D | pata_cmd64x.c | 72 static int cmd648_cable_detect(struct ata_port *ap) cmd648_cable_detect() argument 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd648_cable_detect() 79 if (r & (1 << ap->port_no)) cmd648_cable_detect() 86 * @ap: ATA interface 93 static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode) cmd64x_set_timing() argument 95 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd64x_set_timing() 112 int arttim = arttim_port[ap->port_no][adev->devno]; cmd64x_set_timing() 113 int drwtim = drwtim_port[ap->port_no][adev->devno]; cmd64x_set_timing() 121 if (ap->port_no) { cmd64x_set_timing() 170 * @ap: ATA interface 177 static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) cmd64x_set_piomode() argument 179 cmd64x_set_timing(ap, adev, adev->pio_mode); cmd64x_set_piomode() 184 * @ap: ATA interface 190 static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) cmd64x_set_dmamode() argument 196 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd64x_set_dmamode() 199 int pciU = UDIDETCR0 + 8 * ap->port_no; cmd64x_set_dmamode() 200 int pciD = BMIDESR0 + 8 * ap->port_no; cmd64x_set_dmamode() 222 cmd64x_set_timing(ap, adev, adev->dma_mode); cmd64x_set_dmamode() 233 * @ap: ATA interface 238 static bool cmd64x_sff_irq_check(struct ata_port *ap) cmd64x_sff_irq_check() argument 240 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd64x_sff_irq_check() 241 int irq_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0; cmd64x_sff_irq_check() 242 int irq_reg = ap->port_no ? ARTTIM23 : CFR; cmd64x_sff_irq_check() 253 * @ap: ATA interface 258 static void cmd64x_sff_irq_clear(struct ata_port *ap) cmd64x_sff_irq_clear() argument 260 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd64x_sff_irq_clear() 261 int irq_reg = ap->port_no ? ARTTIM23 : CFR; cmd64x_sff_irq_clear() 264 ata_bmdma_irq_clear(ap); cmd64x_sff_irq_clear() 272 * @ap: ATA interface 277 static bool cmd648_sff_irq_check(struct ata_port *ap) cmd648_sff_irq_check() argument 279 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd648_sff_irq_check() 281 int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0; cmd648_sff_irq_check() 289 * @ap: ATA interface 294 static void cmd648_sff_irq_clear(struct ata_port *ap) cmd648_sff_irq_clear() argument 296 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd648_sff_irq_clear() 298 int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0; cmd648_sff_irq_clear() 301 ata_bmdma_irq_clear(ap); cmd648_sff_irq_clear()
|
H A D | sata_sil.c | 126 static void sil_freeze(struct ata_port *ap); 127 static void sil_thaw(struct ata_port *ap); 269 struct ata_port *ap = qc->ap; sil_bmdma_stop() local 270 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; sil_bmdma_stop() 271 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2; sil_bmdma_stop() 277 ata_sff_dma_pause(ap); sil_bmdma_stop() 282 struct ata_port *ap = qc->ap; sil_bmdma_setup() local 283 void __iomem *bmdma = ap->ioaddr.bmdma_addr; sil_bmdma_setup() 286 iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS); sil_bmdma_setup() 289 ap->ops->sff_exec_command(ap, &qc->tf); sil_bmdma_setup() 295 struct ata_port *ap = qc->ap; sil_bmdma_start() local 296 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; sil_bmdma_start() 297 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2; sil_bmdma_start() 312 struct ata_port *ap = qc->ap; sil_fill_sg() local 316 prd = &ap->bmdma_prd[0]; sil_fill_sg() 362 struct ata_port *ap = link->ap; sil_set_mode() local 363 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; sil_set_mode() 364 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; sil_set_mode() 392 static inline void __iomem *sil_scr_addr(struct ata_port *ap, sil_scr_addr() argument 395 void __iomem *offset = ap->ioaddr.scr_addr; sil_scr_addr() 414 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); sil_scr_read() 425 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); sil_scr_write() 434 static void sil_host_intr(struct ata_port *ap, u32 bmdma2) sil_host_intr() argument 436 struct ata_eh_info *ehi = &ap->link.eh_info; sil_host_intr() 437 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); sil_host_intr() 447 sil_scr_read(&ap->link, SCR_ERROR, &serror); sil_host_intr() 448 sil_scr_write(&ap->link, SCR_ERROR, serror); sil_host_intr() 454 ap->link.eh_info.serror |= serror; sil_host_intr() 464 ap->ops->sff_check_status(ap); sil_host_intr() 469 switch (ap->hsm_task_state) { sil_host_intr() 485 ap->ops->bmdma_stop(qc); sil_host_intr() 489 ap->hsm_task_state = HSM_ST_ERR; sil_host_intr() 500 status = ap->ops->sff_check_status(ap); sil_host_intr() 505 ata_bmdma_irq_clear(ap); sil_host_intr() 508 ata_sff_hsm_move(ap, qc, status, 0); sil_host_intr() 518 ata_port_freeze(ap); sil_host_intr() 531 struct ata_port *ap = host->ports[i]; sil_interrupt() local 532 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); sil_interrupt() 535 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) sil_interrupt() 542 sil_host_intr(ap, bmdma2); sil_interrupt() 551 static void sil_freeze(struct ata_port *ap) sil_freeze() argument 553 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; sil_freeze() 557 writel(0, mmio_base + sil_port[ap->port_no].sien); sil_freeze() 561 tmp |= SIL_MASK_IDE0_INT << ap->port_no; sil_freeze() 570 iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE, sil_freeze() 571 ap->ioaddr.bmdma_addr); sil_freeze() 576 ioread8(ap->ioaddr.bmdma_addr); sil_freeze() 579 static void sil_thaw(struct ata_port *ap) sil_thaw() argument 581 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; sil_thaw() 585 ap->ops->sff_check_status(ap); sil_thaw() 586 ata_bmdma_irq_clear(ap); sil_thaw() 589 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) sil_thaw() 590 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); sil_thaw() 594 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); sil_thaw() 628 struct ata_port *ap = dev->link->ap; sil_dev_config() local 629 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; sil_dev_config() 643 ((ap->flags & SIL_FLAG_MOD15WRITE) && sil_dev_config() 783 struct ata_port *ap = host->ports[i]; sil_init_one() local 784 struct ata_ioports *ioaddr = &ap->ioaddr; sil_init_one() 793 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); sil_init_one() 794 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); sil_init_one()
|
H A D | pata_octeon_cf.c | 52 struct ata_port *ap; member in struct:octeon_cf_port 126 * @ap: ATA port information 129 static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) octeon_cf_set_piomode() argument 131 struct octeon_cf_port *cf_port = ap->private_data; octeon_cf_set_piomode() 223 static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) octeon_cf_set_dmamode() argument 225 struct octeon_cf_port *cf_port = ap->private_data; octeon_cf_set_dmamode() 307 struct ata_port *ap = dev->link->ap; octeon_cf_data_xfer8() local 308 void __iomem *data_addr = ap->ioaddr.data_addr; octeon_cf_data_xfer8() 323 ioread8(ap->ioaddr.altstatus_addr); octeon_cf_data_xfer8() 346 struct ata_port *ap = dev->link->ap; octeon_cf_data_xfer16() local 347 void __iomem *data_addr = ap->ioaddr.data_addr; octeon_cf_data_xfer16() 362 ioread8(ap->ioaddr.altstatus_addr); octeon_cf_data_xfer16() 391 static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) octeon_cf_tf_read16() argument 395 void __iomem *base = ap->ioaddr.data_addr; octeon_cf_tf_read16() 413 if (likely(ap->ioaddr.ctl_addr)) { octeon_cf_tf_read16() 414 iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr); octeon_cf_tf_read16() 427 iowrite8(tf->ctl, ap->ioaddr.ctl_addr); octeon_cf_tf_read16() 428 ap->last_ctl = tf->ctl; octeon_cf_tf_read16() 435 static u8 octeon_cf_check_status16(struct ata_port *ap) octeon_cf_check_status16() argument 438 void __iomem *base = ap->ioaddr.data_addr; octeon_cf_check_status16() 447 struct ata_port *ap = link->ap; octeon_cf_softreset16() local 448 void __iomem *base = ap->ioaddr.data_addr; octeon_cf_softreset16() 453 __raw_writew(ap->ctl, base + 0xe); octeon_cf_softreset16() 455 __raw_writew(ap->ctl | ATA_SRST, base + 0xe); octeon_cf_softreset16() 457 __raw_writew(ap->ctl, base + 0xe); octeon_cf_softreset16() 475 static void octeon_cf_tf_load16(struct ata_port *ap, octeon_cf_tf_load16() argument 480 void __iomem *base = ap->ioaddr.data_addr; octeon_cf_tf_load16() 482 if (tf->ctl != ap->last_ctl) { octeon_cf_tf_load16() 483 iowrite8(tf->ctl, ap->ioaddr.ctl_addr); octeon_cf_tf_load16() 484 ap->last_ctl = tf->ctl; octeon_cf_tf_load16() 485 ata_wait_idle(ap); octeon_cf_tf_load16() 509 ata_wait_idle(ap); octeon_cf_tf_load16() 513 static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device) octeon_cf_dev_select() argument 523 static void octeon_cf_exec_command16(struct ata_port *ap, octeon_cf_exec_command16() argument 527 void __iomem *base = ap->ioaddr.data_addr; octeon_cf_exec_command16() 537 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); octeon_cf_exec_command16() 542 ata_wait_idle(ap); octeon_cf_exec_command16() 545 static void octeon_cf_ata_port_noaction(struct ata_port *ap) octeon_cf_ata_port_noaction() argument 551 struct ata_port *ap = qc->ap; octeon_cf_dma_setup() local 554 cf_port = ap->private_data; octeon_cf_dma_setup() 559 ap->ops->sff_exec_command(ap, &qc->tf); octeon_cf_dma_setup() 570 struct octeon_cf_port *cf_port = qc->ap->private_data; octeon_cf_dma_start() 630 static unsigned int octeon_cf_dma_finished(struct ata_port *ap, octeon_cf_dma_finished() argument 633 struct ata_eh_info *ehi = &ap->link.eh_info; octeon_cf_dma_finished() 634 struct octeon_cf_port *cf_port = ap->private_data; octeon_cf_dma_finished() 640 ap->print_id, qc->tf.protocol, ap->hsm_task_state); octeon_cf_dma_finished() 643 if (ap->hsm_task_state != HSM_ST_LAST) octeon_cf_dma_finished() 650 ap->hsm_task_state = HSM_ST_ERR; octeon_cf_dma_finished() 666 status = ap->ops->sff_check_status(ap); octeon_cf_dma_finished() 668 ata_sff_hsm_move(ap, qc, status, 0); octeon_cf_dma_finished() 693 struct ata_port *ap; octeon_cf_interrupt() local 698 ap = host->ports[i]; octeon_cf_interrupt() 699 cf_port = ap->private_data; octeon_cf_interrupt() 704 qc = ata_qc_from_tag(ap, ap->link.active_tag); octeon_cf_interrupt() 721 status = ioread8(ap->ioaddr.altstatus_addr); octeon_cf_interrupt() 740 handled |= octeon_cf_dma_finished(ap, qc); octeon_cf_interrupt() 753 struct ata_port *ap = cf_port->ap; octeon_cf_delayed_finish() local 754 struct ata_host *host = ap->host; octeon_cf_delayed_finish() 767 if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished) octeon_cf_delayed_finish() 770 status = ioread8(ap->ioaddr.altstatus_addr); octeon_cf_delayed_finish() 778 qc = ata_qc_from_tag(ap, ap->link.active_tag); octeon_cf_delayed_finish() 780 octeon_cf_dma_finished(ap, qc); octeon_cf_delayed_finish() 806 struct ata_port *ap = qc->ap; octeon_cf_qc_issue() local 812 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ octeon_cf_qc_issue() 815 ap->hsm_task_state = HSM_ST_LAST; octeon_cf_qc_issue() 819 dev_err(ap->dev, "Error, ATAPI not supported\n"); octeon_cf_qc_issue() 856 struct ata_port *ap; octeon_cf_probe() local 949 ap = host->ports[0]; octeon_cf_probe() 950 ap->private_data = cf_port; octeon_cf_probe() 952 cf_port->ap = ap; octeon_cf_probe() 953 ap->ops = &octeon_cf_ops; octeon_cf_probe() 954 ap->pio_mask = ATA_PIO6; octeon_cf_probe() 955 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING; octeon_cf_probe() 959 ap->ioaddr.cmd_addr = base; octeon_cf_probe() 960 ata_sff_std_ports(&ap->ioaddr); octeon_cf_probe() 962 ap->ioaddr.altstatus_addr = base + 0xe; octeon_cf_probe() 963 ap->ioaddr.ctl_addr = base + 0xe; octeon_cf_probe() 967 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1; octeon_cf_probe() 968 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1); octeon_cf_probe() 969 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1; octeon_cf_probe() 970 ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1; octeon_cf_probe() 971 ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1; octeon_cf_probe() 972 ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1; octeon_cf_probe() 973 ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1; octeon_cf_probe() 974 ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1; octeon_cf_probe() 975 ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1; octeon_cf_probe() 976 ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1; octeon_cf_probe() 977 ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1; octeon_cf_probe() 978 ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1; octeon_cf_probe() 979 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1; octeon_cf_probe() 982 ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0; octeon_cf_probe() 998 ap->ioaddr.data_addr = base + ATA_REG_DATA; octeon_cf_probe() 999 ap->ioaddr.nsect_addr = base + ATA_REG_NSECT; octeon_cf_probe() 1000 ap->ioaddr.lbal_addr = base + ATA_REG_LBAL; octeon_cf_probe() 1001 ap->ioaddr.ctl_addr = base + 0xe; octeon_cf_probe() 1002 ap->ioaddr.altstatus_addr = base + 0xe; octeon_cf_probe() 1004 cf_port->c0 = ap->ioaddr.ctl_addr; octeon_cf_probe() 1010 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr); octeon_cf_probe()
|
H A D | pata_palmld.c | 54 struct ata_port *ap; palmld_pata_probe() local 85 ap = host->ports[0]; palmld_pata_probe() 86 ap->ops = &palmld_port_ops; palmld_pata_probe() 87 ap->pio_mask = ATA_PIO4; palmld_pata_probe() 88 ap->flags |= ATA_FLAG_PIO_POLLING; palmld_pata_probe() 91 ap->ioaddr.cmd_addr = mem + 0x10; palmld_pata_probe() 92 ap->ioaddr.altstatus_addr = mem + 0xe; palmld_pata_probe() 93 ap->ioaddr.ctl_addr = mem + 0xe; palmld_pata_probe() 96 ata_sff_std_ports(&ap->ioaddr); palmld_pata_probe()
|
H A D | pata_pdc202xx_old.c | 26 static int pdc2026x_cable_detect(struct ata_port *ap) pdc2026x_cable_detect() argument 28 struct pci_dev *pdev = to_pci_dev(ap->host->dev); pdc2026x_cable_detect() 32 if (cis & (1 << (10 + ap->port_no))) pdc2026x_cable_detect() 37 static void pdc202xx_exec_command(struct ata_port *ap, pdc202xx_exec_command() argument 40 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); pdc202xx_exec_command() 42 iowrite8(tf->command, ap->ioaddr.command_addr); pdc202xx_exec_command() 46 static bool pdc202xx_irq_check(struct ata_port *ap) pdc202xx_irq_check() argument 48 struct pci_dev *pdev = to_pci_dev(ap->host->dev); pdc202xx_irq_check() 52 if (ap->port_no) { pdc202xx_irq_check() 69 * @ap: ATA interface 78 static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) pdc202xx_configure_piomode() argument 80 struct pci_dev *pdev = to_pci_dev(ap->host->dev); pdc202xx_configure_piomode() 81 int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; pdc202xx_configure_piomode() 104 * @ap: ATA interface 111 static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev) pdc202xx_set_piomode() argument 113 pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); pdc202xx_set_piomode() 118 * @ap: ATA interface 125 static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) pdc202xx_set_dmamode() argument 127 struct pci_dev *pdev = to_pci_dev(ap->host->dev); pdc202xx_set_dmamode() 128 int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; pdc202xx_set_dmamode() 178 struct ata_port *ap = qc->ap; pdc2026x_bmdma_start() local 181 int sel66 = ap->port_no ? 0x08: 0x02; pdc2026x_bmdma_start() 183 void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr; pdc2026x_bmdma_start() 185 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); pdc2026x_bmdma_start() 197 pdc202xx_set_dmamode(ap, qc->dev); pdc2026x_bmdma_start() 228 struct ata_port *ap = qc->ap; pdc2026x_bmdma_stop() local 232 int sel66 = ap->port_no ? 0x08: 0x02; pdc2026x_bmdma_stop() 234 void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr; pdc2026x_bmdma_stop() 236 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); pdc2026x_bmdma_stop() 247 pdc202xx_set_piomode(ap, adev); pdc2026x_bmdma_stop() 264 static int pdc2026x_port_start(struct ata_port *ap) pdc2026x_port_start() argument 266 void __iomem *bmdma = ap->ioaddr.bmdma_addr; pdc2026x_port_start() 272 return ata_bmdma_port_start(ap); pdc2026x_port_start()
|
H A D | libata-eh.c | 166 static void __ata_port_freeze(struct ata_port *ap); 168 static void ata_eh_handle_port_suspend(struct ata_port *ap); 169 static void ata_eh_handle_port_resume(struct ata_port *ap); 171 static void ata_eh_handle_port_suspend(struct ata_port *ap) ata_eh_handle_port_suspend() argument 174 static void ata_eh_handle_port_resume(struct ata_port *ap) ata_eh_handle_port_resume() argument 245 * @ap: target ATA port 256 void ata_port_desc(struct ata_port *ap, const char *fmt, ...) ata_port_desc() argument 260 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); ata_port_desc() 262 if (ap->link.eh_info.desc_len) ata_port_desc() 263 __ata_ehi_push_desc(&ap->link.eh_info, " "); ata_port_desc() 266 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); ata_port_desc() 274 * @ap: target ATA port 287 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, ata_port_pbar_desc() argument 290 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ata_port_pbar_desc() 303 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); ata_port_pbar_desc() 305 ata_port_desc(ap, "%s 0x%llx", name, ata_port_pbar_desc() 470 * @ap: ATA port to acquire EH ownership for 472 * Acquire EH ownership for @ap. This is the basic exclusion 479 void ata_eh_acquire(struct ata_port *ap) ata_eh_acquire() argument 481 mutex_lock(&ap->host->eh_mutex); ata_eh_acquire() 482 WARN_ON_ONCE(ap->host->eh_owner); ata_eh_acquire() 483 ap->host->eh_owner = current; ata_eh_acquire() 488 * @ap: ATA port to release EH ownership for 490 * Release EH ownership for @ap if the caller. The caller must 496 void ata_eh_release(struct ata_port *ap) ata_eh_release() argument 498 WARN_ON_ONCE(ap->host->eh_owner != current); ata_eh_release() 499 ap->host->eh_owner = NULL; ata_eh_release() 500 mutex_unlock(&ap->host->eh_mutex); ata_eh_release() 525 struct ata_port *ap = ata_shost_to_port(host); ata_scsi_timed_out() local 532 if (ap->ops->error_handler) { ata_scsi_timed_out() 538 spin_lock_irqsave(ap->lock, flags); ata_scsi_timed_out() 539 qc = ata_qc_from_tag(ap, ap->link.active_tag); ata_scsi_timed_out() 546 spin_unlock_irqrestore(ap->lock, flags); ata_scsi_timed_out() 553 static void ata_eh_unload(struct ata_port *ap) ata_eh_unload() argument 562 ata_for_each_link(link, ap, PMP_FIRST) { ata_for_each_link() 569 spin_lock_irqsave(ap->lock, flags); 571 ata_port_freeze(ap); /* won't be thawed */ 572 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 573 ap->pflags |= ATA_PFLAG_UNLOADED; 575 spin_unlock_irqrestore(ap->lock, flags); 592 struct ata_port *ap = ata_shost_to_port(host); ata_scsi_error() local 602 ata_scsi_cmd_error_handler(host, ap, &eh_work_q); ata_scsi_error() 606 ata_scsi_port_error_handler(host, ap); ata_scsi_error() 617 * @ap: ATA port within the host 621 * ap->eh_done_q. This function is the first part of the libata error 624 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, ata_scsi_cmd_error_handler() argument 631 ata_sff_flush_pio_task(ap); ata_scsi_cmd_error_handler() 647 if (ap->ops->error_handler) { ata_scsi_cmd_error_handler() 651 spin_lock_irqsave(ap->lock, flags); ata_scsi_cmd_error_handler() 653 /* This must occur under the ap->lock as we don't want ata_scsi_cmd_error_handler() 662 if (ap->ops->lost_interrupt) ata_scsi_cmd_error_handler() 663 ap->ops->lost_interrupt(ap); ata_scsi_cmd_error_handler() 669 qc = __ata_qc_from_tag(ap, i); list_for_each_entry_safe() 689 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); list_for_each_entry_safe() 700 __ata_port_freeze(ap); 702 spin_unlock_irqrestore(ap->lock, flags); 705 ap->eh_tries = ATA_EH_MAX_TRIES; 707 spin_unlock_wait(ap->lock); 715 * @ap: the ATA port 717 * Handle the recovery of the port @ap after all the commands 720 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) ata_scsi_port_error_handler() argument 725 if (ap->ops->error_handler) { ata_scsi_port_error_handler() 729 ata_eh_acquire(ap); ata_scsi_port_error_handler() 732 del_timer_sync(&ap->fastdrain_timer); ata_scsi_port_error_handler() 735 ata_eh_handle_port_resume(ap); ata_scsi_port_error_handler() 738 spin_lock_irqsave(ap->lock, flags); ata_scsi_port_error_handler() 740 ata_for_each_link(link, ap, HOST_FIRST) { ata_for_each_link() 757 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 758 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 759 ap->excl_link = NULL; /* don't maintain exclusion over EH */ 761 spin_unlock_irqrestore(ap->lock, flags); 764 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 765 ap->ops->error_handler(ap); 768 if ((ap->pflags & ATA_PFLAG_UNLOADING) && 769 !(ap->pflags & ATA_PFLAG_UNLOADED)) 770 ata_eh_unload(ap); 771 ata_eh_finish(ap); 775 ata_eh_handle_port_suspend(ap); 781 spin_lock_irqsave(ap->lock, flags); 783 if (ap->pflags & ATA_PFLAG_EH_PENDING) { 784 if (--ap->eh_tries) { 785 spin_unlock_irqrestore(ap->lock, flags); 788 ata_port_err(ap, 791 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 795 ata_for_each_link(link, ap, HOST_FIRST) 799 * ap->lock such that if exception occurs after this 803 ap->ops->end_eh(ap); 805 spin_unlock_irqrestore(ap->lock, flags); 806 ata_eh_release(ap); 808 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 809 ap->ops->eng_timeout(ap); 812 scsi_eh_flush_done_q(&ap->eh_done_q); 815 spin_lock_irqsave(ap->lock, flags); 817 if (ap->pflags & ATA_PFLAG_LOADING) 818 ap->pflags &= ~ATA_PFLAG_LOADING; 819 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 820 schedule_delayed_work(&ap->hotplug_task, 0); 822 if (ap->pflags & ATA_PFLAG_RECOVERED) 823 ata_port_info(ap, "EH complete\n"); 825 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 828 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 829 wake_up_all(&ap->eh_wait_q); 831 spin_unlock_irqrestore(ap->lock, flags); 837 * @ap: Port to wait EH for 844 void ata_port_wait_eh(struct ata_port *ap) ata_port_wait_eh() argument 850 spin_lock_irqsave(ap->lock, flags); ata_port_wait_eh() 852 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { ata_port_wait_eh() 853 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); ata_port_wait_eh() 854 spin_unlock_irqrestore(ap->lock, flags); ata_port_wait_eh() 856 spin_lock_irqsave(ap->lock, flags); ata_port_wait_eh() 858 finish_wait(&ap->eh_wait_q, &wait); ata_port_wait_eh() 860 spin_unlock_irqrestore(ap->lock, flags); ata_port_wait_eh() 863 if (scsi_host_in_recovery(ap->scsi_host)) { ata_port_wait_eh() 864 ata_msleep(ap, 10); ata_port_wait_eh() 870 static int ata_eh_nr_in_flight(struct ata_port *ap) ata_eh_nr_in_flight() argument 877 if (ata_qc_from_tag(ap, tag)) ata_eh_nr_in_flight() 885 struct ata_port *ap = (void *)arg; ata_eh_fastdrain_timerfn() local 889 spin_lock_irqsave(ap->lock, flags); ata_eh_fastdrain_timerfn() 891 cnt = ata_eh_nr_in_flight(ap); ata_eh_fastdrain_timerfn() 897 if (cnt == ap->fastdrain_cnt) { ata_eh_fastdrain_timerfn() 904 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); ata_eh_fastdrain_timerfn() 909 ata_port_freeze(ap); ata_eh_fastdrain_timerfn() 912 ap->fastdrain_cnt = cnt; ata_eh_fastdrain_timerfn() 913 ap->fastdrain_timer.expires = ata_eh_fastdrain_timerfn() 915 add_timer(&ap->fastdrain_timer); ata_eh_fastdrain_timerfn() 919 spin_unlock_irqrestore(ap->lock, flags); ata_eh_fastdrain_timerfn() 924 * @ap: target ATA port 934 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) ata_eh_set_pending() argument 939 if (ap->pflags & ATA_PFLAG_EH_PENDING) ata_eh_set_pending() 942 ap->pflags |= ATA_PFLAG_EH_PENDING; ata_eh_set_pending() 948 cnt = ata_eh_nr_in_flight(ap); ata_eh_set_pending() 953 ap->fastdrain_cnt = cnt; ata_eh_set_pending() 954 ap->fastdrain_timer.expires = ata_eh_set_pending() 956 add_timer(&ap->fastdrain_timer); ata_eh_set_pending() 971 struct ata_port *ap = qc->ap; ata_qc_schedule_eh() local 975 WARN_ON(!ap->ops->error_handler); ata_qc_schedule_eh() 978 ata_eh_set_pending(ap, 1); ata_qc_schedule_eh() 992 * @ap: ATA port to schedule EH for 997 void ata_std_sched_eh(struct ata_port *ap) ata_std_sched_eh() argument 999 WARN_ON(!ap->ops->error_handler); ata_std_sched_eh() 1001 if (ap->pflags & ATA_PFLAG_INITIALIZING) ata_std_sched_eh() 1004 ata_eh_set_pending(ap, 1); ata_std_sched_eh() 1005 scsi_schedule_eh(ap->scsi_host); ata_std_sched_eh() 1013 * @ap: ATA port to end EH for 1016 * shost, so host fields can be directly manipulated under ap->lock, in 1023 void ata_std_end_eh(struct ata_port *ap) ata_std_end_eh() argument 1025 struct Scsi_Host *host = ap->scsi_host; ata_std_end_eh() 1034 * @ap: ATA port to schedule EH for 1036 * Schedule error handling for @ap. EH will kick in as soon as 1042 void ata_port_schedule_eh(struct ata_port *ap) ata_port_schedule_eh() argument 1045 ap->ops->sched_eh(ap); ata_port_schedule_eh() 1048 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) ata_do_link_abort() argument 1052 WARN_ON(!ap->ops->error_handler); ata_do_link_abort() 1055 ata_eh_set_pending(ap, 0); ata_do_link_abort() 1058 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); ata_do_link_abort() 1068 ata_port_schedule_eh(ap); ata_do_link_abort() 1087 return ata_do_link_abort(link->ap, link); ata_link_abort() 1092 * @ap: ATA port to abort qc's for 1094 * Abort all active qc's of @ap and schedule EH. 1102 int ata_port_abort(struct ata_port *ap) ata_port_abort() argument 1104 return ata_do_link_abort(ap, NULL); ata_port_abort() 1109 * @ap: ATA port to freeze 1116 * ap->ops->freeze() callback can be used for freezing the port 1125 static void __ata_port_freeze(struct ata_port *ap) __ata_port_freeze() argument 1127 WARN_ON(!ap->ops->error_handler); __ata_port_freeze() 1129 if (ap->ops->freeze) __ata_port_freeze() 1130 ap->ops->freeze(ap); __ata_port_freeze() 1132 ap->pflags |= ATA_PFLAG_FROZEN; __ata_port_freeze() 1134 DPRINTK("ata%u port frozen\n", ap->print_id); __ata_port_freeze() 1139 * @ap: ATA port to freeze 1141 * Abort and freeze @ap. The freeze operation must be called 1151 int ata_port_freeze(struct ata_port *ap) ata_port_freeze() argument 1155 WARN_ON(!ap->ops->error_handler); ata_port_freeze() 1157 __ata_port_freeze(ap); ata_port_freeze() 1158 nr_aborted = ata_port_abort(ap); ata_port_freeze() 1165 * @ap: ATA port where async notification is received 1176 int sata_async_notification(struct ata_port *ap) sata_async_notification() argument 1181 if (!(ap->flags & ATA_FLAG_AN)) sata_async_notification() 1184 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); sata_async_notification() 1186 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); sata_async_notification() 1188 if (!sata_pmp_attached(ap) || rc) { sata_async_notification() 1190 if (!sata_pmp_attached(ap)) { sata_async_notification() 1195 struct ata_device *dev = ap->link.device; sata_async_notification() 1207 ata_port_schedule_eh(ap); sata_async_notification() 1215 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 1228 ata_port_schedule_eh(ap); 1238 * @ap: ATA port to freeze 1240 * Freeze @ap. 1245 void ata_eh_freeze_port(struct ata_port *ap) ata_eh_freeze_port() argument 1249 if (!ap->ops->error_handler) ata_eh_freeze_port() 1252 spin_lock_irqsave(ap->lock, flags); ata_eh_freeze_port() 1253 __ata_port_freeze(ap); ata_eh_freeze_port() 1254 spin_unlock_irqrestore(ap->lock, flags); ata_eh_freeze_port() 1259 * @ap: ATA port to thaw 1261 * Thaw frozen port @ap. 1266 void ata_eh_thaw_port(struct ata_port *ap) ata_eh_thaw_port() argument 1270 if (!ap->ops->error_handler) ata_eh_thaw_port() 1273 spin_lock_irqsave(ap->lock, flags); ata_eh_thaw_port() 1275 ap->pflags &= ~ATA_PFLAG_FROZEN; ata_eh_thaw_port() 1277 if (ap->ops->thaw) ata_eh_thaw_port() 1278 ap->ops->thaw(ap); ata_eh_thaw_port() 1280 spin_unlock_irqrestore(ap->lock, flags); ata_eh_thaw_port() 1282 DPRINTK("ata%u port thawed\n", ap->print_id); ata_eh_thaw_port() 1292 struct ata_port *ap = qc->ap; __ata_eh_qc_complete() local 1296 spin_lock_irqsave(ap->lock, flags); __ata_eh_qc_complete() 1300 spin_unlock_irqrestore(ap->lock, flags); __ata_eh_qc_complete() 1302 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); __ata_eh_qc_complete() 1352 if (ata_msg_drv(dev->link->ap)) ata_dev_disable() 1376 struct ata_port *ap = link->ap; ata_eh_detach_dev() local 1382 spin_lock_irqsave(ap->lock, flags); ata_eh_detach_dev() 1388 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; ata_eh_detach_dev() 1397 spin_unlock_irqrestore(ap->lock, flags); ata_eh_detach_dev() 1416 struct ata_port *ap = link->ap; ata_eh_about_to_do() local 1421 spin_lock_irqsave(ap->lock, flags); ata_eh_about_to_do() 1428 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) ata_eh_about_to_do() 1429 ap->pflags |= ATA_PFLAG_RECOVERED; ata_eh_about_to_do() 1431 spin_unlock_irqrestore(ap->lock, flags); ata_eh_about_to_do() 1436 * @ap: target ATA port 1563 u8 *buf = dev->link->ap->sector_buf; ata_eh_read_log_10h() 1650 struct ata_port *ap = dev->link->ap; atapi_eh_request_sense() local 1670 if (ap->flags & ATA_FLAG_PIO_DMA) { atapi_eh_request_sense() 1746 struct ata_port *ap = link->ap; ata_eh_analyze_ncq_error() local 1754 if (ap->pflags & ATA_PFLAG_FROZEN) ata_eh_analyze_ncq_error() 1763 qc = __ata_qc_from_tag(ap, tag); ata_eh_analyze_ncq_error() 1788 qc = __ata_qc_from_tag(ap, tag); ata_eh_analyze_ncq_error() 1838 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { ata_eh_analyze_tf() 2087 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && ata_eh_speed_down() 2137 struct ata_port *ap = link->ap; ata_eh_link_autopsy() local 2172 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_link_autopsy() 2210 if (ap->pflags & ATA_PFLAG_FROZEN || ata_eh_link_autopsy() 2228 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; ata_eh_link_autopsy() 2247 * @ap: host port to perform autopsy on 2249 * Analyze all links of @ap and determine why they failed and 2255 void ata_eh_autopsy(struct ata_port *ap) ata_eh_autopsy() argument 2259 ata_for_each_link(link, ap, EDGE) ata_for_each_link() 2266 if (ap->slave_link) { ata_for_each_link() 2267 struct ata_eh_context *mehc = &ap->link.eh_context; ata_for_each_link() 2268 struct ata_eh_context *sehc = &ap->slave_link->eh_context; ata_for_each_link() 2274 ata_eh_link_autopsy(ap->slave_link); ata_for_each_link() 2277 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); ata_for_each_link() 2281 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); ata_for_each_link() 2287 if (sata_pmp_attached(ap)) 2288 ata_eh_link_autopsy(&ap->link); 2422 struct ata_port *ap = link->ap; ata_eh_link_report() local 2436 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_link_report() 2453 if (ap->pflags & ATA_PFLAG_FROZEN) ata_eh_link_report() 2456 if (ap->eh_tries < ATA_EH_MAX_TRIES) ata_eh_link_report() 2458 ap->eh_tries); ata_eh_link_report() 2500 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_link_report() 2590 * @ap: ATA port to report EH about 2597 void ata_eh_report(struct ata_port *ap) ata_eh_report() argument 2601 ata_for_each_link(link, ap, HOST_FIRST) ata_eh_report() 2624 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) ata_eh_followup_srst_needed() 2633 struct ata_port *ap = link->ap; ata_eh_reset() local 2634 struct ata_link *slave = ap->slave_link; ata_eh_reset() 2671 spin_lock_irqsave(ap->lock, flags); ata_eh_reset() 2672 ap->pflags |= ATA_PFLAG_RESETTING; ata_eh_reset() 2673 spin_unlock_irqrestore(ap->lock, flags); ata_eh_reset() 2693 if (ap->ops->set_piomode) ata_for_each_dev() 2694 ap->ops->set_piomode(ap, dev); ata_for_each_dev() 2755 if ((ap->pflags & ATA_PFLAG_FROZEN) && 2757 ata_eh_thaw_port(ap); 2768 ata_eh_freeze_port(ap); 2869 ata_eh_thaw_port(ap); 2891 spin_lock_irqsave(link->ap->lock, flags); 2895 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2896 spin_unlock_irqrestore(link->ap->lock, flags); 2898 if (ap->pflags & ATA_PFLAG_FROZEN) 2899 ata_eh_thaw_port(ap); 2959 spin_lock_irqsave(ap->lock, flags); 2960 ap->pflags &= ~ATA_PFLAG_RESETTING; 2961 spin_unlock_irqrestore(ap->lock, flags); 2979 ata_eh_thaw_port(ap); 2991 ata_eh_release(ap); 2994 ata_eh_acquire(ap); 3003 ata_eh_thaw_port(ap); 3019 static inline void ata_eh_pull_park_action(struct ata_port *ap) ata_eh_pull_park_action() argument 3037 * Additionally, all write accesses to &ap->park_req_pending ata_eh_pull_park_action() 3042 * *all* devices on port ap have been pulled into the ata_eh_pull_park_action() 3047 * ap and we have to cycle over the do {} while () loop in ata_eh_pull_park_action() 3051 spin_lock_irqsave(ap->lock, flags); ata_eh_pull_park_action() 3052 reinit_completion(&ap->park_req_pending); ata_for_each_link() 3053 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 3062 spin_unlock_irqrestore(ap->lock, flags); 3096 struct ata_port *ap = link->ap; ata_eh_revalidate_and_attach() local 3138 schedule_work(&(ap->scsi_rescan_task)); ata_for_each_dev() 3171 ata_eh_thaw_port(ap); ata_for_each_dev() 3181 if (ap->ops->cable_detect) 3182 ap->cbl = ap->ops->cable_detect(ap); 3183 ata_force_cbl(ap); 3206 spin_lock_irqsave(ap->lock, flags); ata_for_each_dev() 3207 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; ata_for_each_dev() 3208 spin_unlock_irqrestore(ap->lock, flags); ata_for_each_dev() 3239 struct ata_port *ap = link->ap; ata_set_mode() local 3255 if (ap->ops->set_mode) 3256 rc = ap->ops->set_mode(link, r_failed_dev); 3293 u8 *sense_buffer = dev->link->ap->sector_buf; atapi_eh_clear_ua() 3342 struct ata_port *ap = link->ap; ata_eh_maybe_retry_flush() local 3352 qc = __ata_qc_from_tag(ap, link->active_tag); ata_eh_maybe_retry_flush() 3391 if (!(ap->pflags & ATA_PFLAG_FROZEN)) ata_eh_maybe_retry_flush() 3418 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; ata_eh_set_lpm() local 3422 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; ata_eh_set_lpm() 3428 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) ata_eh_set_lpm() 3466 if (ap) { 3467 rc = ap->ops->set_lpm(link, policy, hints); 3468 if (!rc && ap->slave_link) 3469 rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 3491 if (ap && ap->slave_link) 3492 ap->slave_link->lpm_policy = policy; 3518 if (ap && ap->slave_link) 3519 ap->slave_link->lpm_policy = old_policy; 3554 struct ata_port *ap = link->ap; ata_eh_skip_recovery() local 3567 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) ata_eh_skip_recovery() 3619 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, ata_eh_schedule_probe() 3700 * @ap: host port to recover 3720 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ata_eh_recover() argument 3733 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 3739 spin_lock_irqsave(ap->lock, flags); ata_for_each_link() 3741 spin_unlock_irqrestore(ap->lock, flags); ata_for_each_link() 3770 if (ap->pflags & ATA_PFLAG_UNLOADING) 3774 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 3786 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 3805 * ap->park_req_pending 3807 ata_eh_pull_park_action(ap); 3810 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 3837 ata_eh_release(ap); 3838 deadline = wait_for_completion_timeout(&ap->park_req_pending, 3840 ata_eh_acquire(ap); 3842 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 3855 ata_for_each_link(link, ap, PMP_FIRST) { ata_for_each_link() 3858 if (sata_pmp_attached(ap) && ata_is_host_link(link)) ata_for_each_link() 3907 if (link->lpm_policy != ap->target_lpm_policy) { 3908 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 3922 if (ap->pflags & ATA_PFLAG_FROZEN) { 3926 if (sata_pmp_attached(ap)) 3945 * @ap: host port to finish EH for 3953 void ata_eh_finish(struct ata_port *ap) ata_eh_finish() argument 3959 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_finish() 3985 WARN_ON(ap->nr_active_links); ata_eh_finish() 3986 ap->nr_active_links = 0; ata_eh_finish() 3991 * @ap: host port to handle error for 4003 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_do_eh() argument 4010 ata_eh_autopsy(ap); ata_do_eh() 4011 ata_eh_report(ap); ata_do_eh() 4013 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, ata_do_eh() 4016 ata_for_each_dev(dev, &ap->link, ALL) ata_do_eh() 4020 ata_eh_finish(ap); ata_do_eh() 4025 * @ap: host port to handle error for 4032 void ata_std_error_handler(struct ata_port *ap) ata_std_error_handler() argument 4034 struct ata_port_operations *ops = ap->ops; ata_std_error_handler() 4038 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) ata_std_error_handler() 4041 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); ata_std_error_handler() 4047 * @ap: port to suspend 4049 * Suspend @ap. 4054 static void ata_eh_handle_port_suspend(struct ata_port *ap) ata_eh_handle_port_suspend() argument 4061 spin_lock_irqsave(ap->lock, flags); ata_eh_handle_port_suspend() 4062 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ata_eh_handle_port_suspend() 4063 ap->pm_mesg.event & PM_EVENT_RESUME) { ata_eh_handle_port_suspend() 4064 spin_unlock_irqrestore(ap->lock, flags); ata_eh_handle_port_suspend() 4067 spin_unlock_irqrestore(ap->lock, flags); ata_eh_handle_port_suspend() 4069 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); ata_eh_handle_port_suspend() 4076 if (PMSG_IS_AUTO(ap->pm_mesg)) { ata_eh_handle_port_suspend() 4077 ata_for_each_dev(dev, &ap->link, ENABLED) { ata_eh_handle_port_suspend() 4084 rc = ata_acpi_on_suspend(ap); ata_eh_handle_port_suspend() 4089 ata_eh_freeze_port(ap); ata_eh_handle_port_suspend() 4091 if (ap->ops->port_suspend) ata_eh_handle_port_suspend() 4092 rc = ap->ops->port_suspend(ap, ap->pm_mesg); ata_eh_handle_port_suspend() 4094 ata_acpi_set_state(ap, ap->pm_mesg); ata_eh_handle_port_suspend() 4097 spin_lock_irqsave(ap->lock, flags); ata_eh_handle_port_suspend() 4099 ap->pflags &= ~ATA_PFLAG_PM_PENDING; ata_eh_handle_port_suspend() 4101 ap->pflags |= ATA_PFLAG_SUSPENDED; ata_eh_handle_port_suspend() 4102 else if (ap->pflags & ATA_PFLAG_FROZEN) ata_eh_handle_port_suspend() 4103 ata_port_schedule_eh(ap); ata_eh_handle_port_suspend() 4105 spin_unlock_irqrestore(ap->lock, flags); ata_eh_handle_port_suspend() 4112 * @ap: port to resume 4114 * Resume @ap. 4119 static void ata_eh_handle_port_resume(struct ata_port *ap) ata_eh_handle_port_resume() argument 4127 spin_lock_irqsave(ap->lock, flags); ata_eh_handle_port_resume() 4128 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ata_eh_handle_port_resume() 4129 !(ap->pm_mesg.event & PM_EVENT_RESUME)) { ata_eh_handle_port_resume() 4130 spin_unlock_irqrestore(ap->lock, flags); ata_eh_handle_port_resume() 4133 spin_unlock_irqrestore(ap->lock, flags); ata_eh_handle_port_resume() 4135 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); ata_eh_handle_port_resume() 4144 ata_for_each_link(link, ap, HOST_FIRST) ata_eh_handle_port_resume() 4148 ata_acpi_set_state(ap, ap->pm_mesg); ata_eh_handle_port_resume() 4150 if (ap->ops->port_resume) ata_eh_handle_port_resume() 4151 rc = ap->ops->port_resume(ap); ata_eh_handle_port_resume() 4154 ata_acpi_on_resume(ap); ata_eh_handle_port_resume() 4157 spin_lock_irqsave(ap->lock, flags); ata_eh_handle_port_resume() 4158 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); ata_eh_handle_port_resume() 4159 spin_unlock_irqrestore(ap->lock, flags); ata_eh_handle_port_resume()
|
H A D | sata_vsc.c | 105 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); vsc_sata_scr_read() 115 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); vsc_sata_scr_write() 120 static void vsc_freeze(struct ata_port *ap) vsc_freeze() argument 124 mask_addr = ap->host->iomap[VSC_MMIO_BAR] + vsc_freeze() 125 VSC_SATA_INT_MASK_OFFSET + ap->port_no; vsc_freeze() 131 static void vsc_thaw(struct ata_port *ap) vsc_thaw() argument 135 mask_addr = ap->host->iomap[VSC_MMIO_BAR] + vsc_thaw() 136 VSC_SATA_INT_MASK_OFFSET + ap->port_no; vsc_thaw() 142 static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) vsc_intr_mask_update() argument 147 mask_addr = ap->host->iomap[VSC_MMIO_BAR] + vsc_intr_mask_update() 148 VSC_SATA_INT_MASK_OFFSET + ap->port_no; vsc_intr_mask_update() 158 static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) vsc_sata_tf_load() argument 160 struct ata_ioports *ioaddr = &ap->ioaddr; vsc_sata_tf_load() 169 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) { vsc_sata_tf_load() 170 ap->last_ctl = tf->ctl; vsc_sata_tf_load() 171 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN); vsc_sata_tf_load() 195 ata_wait_idle(ap); vsc_sata_tf_load() 199 static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) vsc_sata_tf_read() argument 201 struct ata_ioports *ioaddr = &ap->ioaddr; vsc_sata_tf_read() 204 tf->command = ata_sff_check_status(ap); vsc_sata_tf_read() 227 static inline void vsc_error_intr(u8 port_status, struct ata_port *ap) vsc_error_intr() argument 230 ata_port_freeze(ap); vsc_error_intr() 232 ata_port_abort(ap); vsc_error_intr() 235 static void vsc_port_intr(u8 port_status, struct ata_port *ap) vsc_port_intr() argument 241 vsc_error_intr(port_status, ap); vsc_port_intr() 245 qc = ata_qc_from_tag(ap, ap->link.active_tag); vsc_port_intr() 247 handled = ata_bmdma_port_intr(ap, qc); vsc_port_intr() 255 ap->ops->sff_check_status(ap); vsc_port_intr() 378 struct ata_port *ap = host->ports[i]; vsc_sata_init_one() local 381 vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset); vsc_sata_init_one() 383 ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio"); vsc_sata_init_one() 384 ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port"); vsc_sata_init_one()
|
H A D | sata_qstor.c | 117 static int qs_port_start(struct ata_port *ap); 122 static void qs_freeze(struct ata_port *ap); 123 static void qs_thaw(struct ata_port *ap); 125 static void qs_error_handler(struct ata_port *ap); 187 static inline void qs_enter_reg_mode(struct ata_port *ap) qs_enter_reg_mode() argument 189 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); qs_enter_reg_mode() 190 struct qs_port_priv *pp = ap->private_data; qs_enter_reg_mode() 197 static inline void qs_reset_channel_logic(struct ata_port *ap) qs_reset_channel_logic() argument 199 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); qs_reset_channel_logic() 203 qs_enter_reg_mode(ap); qs_reset_channel_logic() 206 static void qs_freeze(struct ata_port *ap) qs_freeze() argument 208 u8 __iomem *mmio_base = qs_mmio_base(ap->host); qs_freeze() 211 qs_enter_reg_mode(ap); qs_freeze() 214 static void qs_thaw(struct ata_port *ap) qs_thaw() argument 216 u8 __iomem *mmio_base = qs_mmio_base(ap->host); qs_thaw() 218 qs_enter_reg_mode(ap); qs_thaw() 224 struct ata_port *ap = link->ap; qs_prereset() local 226 qs_reset_channel_logic(ap); qs_prereset() 234 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8)); qs_scr_read() 238 static void qs_error_handler(struct ata_port *ap) qs_error_handler() argument 240 qs_enter_reg_mode(ap); qs_error_handler() 241 ata_sff_error_handler(ap); qs_error_handler() 248 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8)); qs_scr_write() 255 struct ata_port *ap = qc->ap; qs_fill_sg() local 256 struct qs_port_priv *pp = ap->private_data; qs_fill_sg() 281 struct qs_port_priv *pp = qc->ap->private_data; qs_qc_prep() 289 qs_enter_reg_mode(qc->ap); qs_qc_prep() 318 struct ata_port *ap = qc->ap; qs_packet_start() local 319 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); qs_packet_start() 321 VPRINTK("ENTER, ap %p\n", ap); qs_packet_start() 331 struct qs_port_priv *pp = qc->ap->private_data; qs_qc_issue() 358 struct ata_port *ap = qc->ap; qs_do_or_die() local 359 struct ata_eh_info *ehi = &ap->link.eh_info; qs_do_or_die() 365 ata_port_abort(ap); qs_do_or_die() 367 ata_port_freeze(ap); qs_do_or_die() 387 struct ata_port *ap = host->ports[port_no]; qs_intr_pkt() local 388 struct qs_port_priv *pp = ap->private_data; qs_intr_pkt() 396 qc = ata_qc_from_tag(ap, ap->link.active_tag); qs_intr_pkt() 401 qs_enter_reg_mode(qc->ap); qs_intr_pkt() 418 struct ata_port *ap = host->ports[port_no]; qs_intr_mmio() local 419 struct qs_port_priv *pp = ap->private_data; qs_intr_mmio() 422 qc = ata_qc_from_tag(ap, ap->link.active_tag); qs_intr_mmio() 433 ata_sff_check_status(ap); qs_intr_mmio() 441 handled |= ata_sff_port_intr(ap, qc); qs_intr_mmio() 481 static int qs_port_start(struct ata_port *ap) qs_port_start() argument 483 struct device *dev = ap->host->dev; qs_port_start() 485 void __iomem *mmio_base = qs_mmio_base(ap->host); qs_port_start() 486 void __iomem *chan = mmio_base + (ap->port_no * 0x4000); qs_port_start() 497 ap->private_data = pp; qs_port_start() 499 qs_enter_reg_mode(ap); qs_port_start() 619 struct ata_port *ap = host->ports[port_no]; qs_ata_init_one() local 623 qs_ata_setup_port(&ap->ioaddr, chan); qs_ata_init_one() 625 ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio"); qs_ata_init_one() 626 ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port"); qs_ata_init_one()
|
H A D | pata_hpt3x2n.c | 94 * @ap: ATA port 103 static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed) hpt3x2n_find_mode() argument 134 * @ap: ATA port to detect on 139 static int hpt3x2n_cable_detect(struct ata_port *ap) hpt3x2n_cable_detect() argument 142 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt3x2n_cable_detect() 154 if (ata66 & (2 >> ap->port_no)) hpt3x2n_cable_detect() 171 struct ata_port *ap = link->ap; hpt3x2n_pre_reset() local 172 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt3x2n_pre_reset() 175 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); hpt3x2n_pre_reset() 181 static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev, hpt3x2n_set_mode() argument 184 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt3x2n_set_mode() 189 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); hpt3x2n_set_mode() 190 addr2 = 0x51 + 4 * ap->port_no; hpt3x2n_set_mode() 205 timing = hpt3x2n_find_mode(ap, mode); hpt3x2n_set_mode() 214 * @ap: ATA interface 220 static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) hpt3x2n_set_piomode() argument 222 hpt3x2n_set_mode(ap, adev, adev->pio_mode); hpt3x2n_set_piomode() 227 * @ap: ATA interface 233 static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) hpt3x2n_set_dmamode() argument 235 hpt3x2n_set_mode(ap, adev, adev->dma_mode); hpt3x2n_set_dmamode() 247 struct ata_port *ap = qc->ap; hpt3x2n_bmdma_stop() local 248 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt3x2n_bmdma_stop() 249 int mscreg = 0x50 + 2 * ap->port_no; hpt3x2n_bmdma_stop() 254 if (bwsr_stat & (1 << ap->port_no)) hpt3x2n_bmdma_stop() 261 * @ap: ATA port 275 static void hpt3x2n_set_clock(struct ata_port *ap, int source) hpt3x2n_set_clock() argument 277 void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8; hpt3x2n_set_clock() 299 static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) hpt3x2n_use_dpll() argument 301 long flags = (long)ap->host->private_data; hpt3x2n_use_dpll() 313 struct ata_port *ap = qc->ap; hpt3x2n_qc_defer() local 314 struct ata_port *alt = ap->host->ports[ap->port_no ^ 1]; hpt3x2n_qc_defer() 315 int rc, flags = (long)ap->host->private_data; hpt3x2n_qc_defer() 316 int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); hpt3x2n_qc_defer() 330 struct ata_port *ap = qc->ap; hpt3x2n_qc_issue() local 331 int flags = (long)ap->host->private_data; hpt3x2n_qc_issue() 332 int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); hpt3x2n_qc_issue() 337 ap->host->private_data = (void *)(long)flags; hpt3x2n_qc_issue() 339 hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); hpt3x2n_qc_issue()
|
H A D | pata_isapnp.c | 50 struct ata_port *ap; isapnp_init_one() local 73 ap = host->ports[0]; isapnp_init_one() 75 ap->ops = &isapnp_noalt_port_ops; isapnp_init_one() 76 ap->pio_mask = ATA_PIO0; isapnp_init_one() 77 ap->flags |= ATA_FLAG_SLAVE_POSS; isapnp_init_one() 79 ap->ioaddr.cmd_addr = cmd_addr; isapnp_init_one() 84 ap->ioaddr.altstatus_addr = ctl_addr; isapnp_init_one() 85 ap->ioaddr.ctl_addr = ctl_addr; isapnp_init_one() 86 ap->ops = &isapnp_port_ops; isapnp_init_one() 89 ata_sff_std_ports(&ap->ioaddr); isapnp_init_one() 91 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", isapnp_init_one()
|
H A D | libata-pmp.c | 41 struct ata_port *ap = link->ap; sata_pmp_read() local 42 struct ata_device *pmp_dev = ap->link.device; sata_pmp_read() 78 struct ata_port *ap = link->ap; sata_pmp_write() local 79 struct ata_device *pmp_dev = ap->link.device; sata_pmp_write() 113 struct ata_port *ap = link->ap; sata_pmp_qc_defer_cmd_switch() local 115 if (ap->excl_link == NULL || ap->excl_link == link) { sata_pmp_qc_defer_cmd_switch() 116 if (ap->nr_active_links == 0 || ata_link_active(link)) { sata_pmp_qc_defer_cmd_switch() 121 ap->excl_link = link; sata_pmp_qc_defer_cmd_switch() 261 struct ata_port *ap = dev->link->ap; sata_pmp_configure() local 277 if ((ap->flags & ATA_FLAG_AN) && sata_pmp_configure() 299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); sata_pmp_configure() 306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); sata_pmp_configure() 337 static int sata_pmp_init_links (struct ata_port *ap, int nr_ports) sata_pmp_init_links() argument 339 struct ata_link *pmp_link = ap->pmp_link; sata_pmp_init_links() 349 ata_link_init(ap, &pmp_link[i], i); sata_pmp_init_links() 351 ap->pmp_link = pmp_link; sata_pmp_init_links() 375 ap->pmp_link = NULL; sata_pmp_init_links() 379 static void sata_pmp_quirks(struct ata_port *ap) sata_pmp_quirks() argument 381 u32 *gscr = ap->link.device->gscr; sata_pmp_quirks() 388 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 413 ata_for_each_link(link, ap, EDGE) 419 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 449 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; 455 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 465 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 489 struct ata_port *ap = link->ap; sata_pmp_attach() local 495 if (!sata_pmp_supported(ap)) { sata_pmp_attach() 523 rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr)); sata_pmp_attach() 530 spin_lock_irqsave(ap->lock, flags); sata_pmp_attach() 531 WARN_ON(ap->nr_pmp_links); sata_pmp_attach() 532 ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr); sata_pmp_attach() 533 spin_unlock_irqrestore(ap->lock, flags); sata_pmp_attach() 535 sata_pmp_quirks(ap); sata_pmp_attach() 537 if (ap->ops->pmp_attach) sata_pmp_attach() 538 ap->ops->pmp_attach(ap); sata_pmp_attach() 540 ata_for_each_link(tlink, ap, EDGE) sata_pmp_attach() 563 struct ata_port *ap = link->ap; sata_pmp_detach() local 572 if (ap->ops->pmp_detach) sata_pmp_detach() 573 ap->ops->pmp_detach(ap); sata_pmp_detach() 575 ata_for_each_link(tlink, ap, EDGE) sata_pmp_detach() 578 spin_lock_irqsave(ap->lock, flags); sata_pmp_detach() 579 ap->nr_pmp_links = 0; sata_pmp_detach() 581 spin_unlock_irqrestore(ap->lock, flags); sata_pmp_detach() 652 struct ata_port *ap = link->ap; sata_pmp_revalidate() local 653 u32 *gscr = (void *)ap->sector_buf; sata_pmp_revalidate() 735 * @ap: ATA port PMP is attached to 741 * Recover PMP attached to @ap. Recovery procedure is somewhat 752 static int sata_pmp_eh_recover_pmp(struct ata_port *ap, sata_pmp_eh_recover_pmp() argument 756 struct ata_link *link = &ap->link; sata_pmp_eh_recover_pmp() 785 ata_for_each_link(tlink, ap, EDGE) { ata_for_each_link() 845 static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap) sata_pmp_eh_handle_disabled_links() argument 851 spin_lock_irqsave(ap->lock, flags); sata_pmp_eh_handle_disabled_links() 853 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 857 spin_unlock_irqrestore(ap->lock, flags); ata_for_each_link() 875 spin_lock_irqsave(ap->lock, flags); ata_for_each_link() 878 spin_unlock_irqrestore(ap->lock, flags); 885 struct ata_port *ap = link->ap; sata_pmp_handle_link_fail() local 897 spin_lock_irqsave(ap->lock, flags); sata_pmp_handle_link_fail() 899 spin_unlock_irqrestore(ap->lock, flags); sata_pmp_handle_link_fail() 910 * @ap: ATA port to recover 912 * Drive EH recovery operation for PMP enabled port @ap. This 923 static int sata_pmp_eh_recover(struct ata_port *ap) sata_pmp_eh_recover() argument 925 struct ata_port_operations *ops = ap->ops; sata_pmp_eh_recover() 927 struct ata_link *pmp_link = &ap->link; sata_pmp_eh_recover() 938 ata_for_each_link(link, ap, EDGE) sata_pmp_eh_recover() 943 if (!sata_pmp_attached(ap)) { sata_pmp_eh_recover() 944 rc = ata_eh_recover(ap, ops->prereset, ops->softreset, sata_pmp_eh_recover() 947 ata_for_each_dev(dev, &ap->link, ALL) sata_pmp_eh_recover() 956 ata_for_each_link(link, ap, EDGE) sata_pmp_eh_recover() 963 rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset, sata_pmp_eh_recover() 985 rc = sata_pmp_eh_handle_disabled_links(ap); sata_pmp_eh_recover() 990 rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset, sata_pmp_eh_recover() 996 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); sata_pmp_eh_recover() 998 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); sata_pmp_eh_recover() 1004 ata_for_each_link(link, ap, EDGE) ata_for_each_link() 1040 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 1056 ata_port_info(ap, 1074 if (ap->pflags & ATA_PFLAG_UNLOADING) 1077 if (!sata_pmp_attached(ap)) 1085 ata_port_err(ap, "failed to recover PMP after %d tries, giving up\n", 1095 * @ap: host port to handle error for 1098 * @ap. 1103 void sata_pmp_error_handler(struct ata_port *ap) sata_pmp_error_handler() argument 1105 ata_eh_autopsy(ap); sata_pmp_error_handler() 1106 ata_eh_report(ap); sata_pmp_error_handler() 1107 sata_pmp_eh_recover(ap); sata_pmp_error_handler() 1108 ata_eh_finish(ap); sata_pmp_error_handler()
|
H A D | sata_rcar.c | 223 static void sata_rcar_freeze(struct ata_port *ap) sata_rcar_freeze() argument 225 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_freeze() 230 ata_sff_freeze(ap); sata_rcar_freeze() 233 static void sata_rcar_thaw(struct ata_port *ap) sata_rcar_thaw() argument 235 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_thaw() 241 ata_sff_thaw(ap); sata_rcar_thaw() 266 static u8 sata_rcar_check_status(struct ata_port *ap) sata_rcar_check_status() argument 268 return ioread32(ap->ioaddr.status_addr); sata_rcar_check_status() 271 static u8 sata_rcar_check_altstatus(struct ata_port *ap) sata_rcar_check_altstatus() argument 273 return ioread32(ap->ioaddr.altstatus_addr); sata_rcar_check_altstatus() 276 static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl) sata_rcar_set_devctl() argument 278 iowrite32(ctl, ap->ioaddr.ctl_addr); sata_rcar_set_devctl() 281 static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device) sata_rcar_dev_select() argument 283 iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr); sata_rcar_dev_select() 284 ata_sff_pause(ap); /* needed; also flushes, for mmio */ sata_rcar_dev_select() 287 static unsigned int sata_rcar_ata_devchk(struct ata_port *ap, sata_rcar_ata_devchk() argument 290 struct ata_ioports *ioaddr = &ap->ioaddr; sata_rcar_ata_devchk() 293 sata_rcar_dev_select(ap, device); sata_rcar_ata_devchk() 316 struct ata_port *ap = link->ap; sata_rcar_wait_after_reset() local 318 ata_msleep(ap, ATA_WAIT_AFTER_RESET); sata_rcar_wait_after_reset() 323 static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline) sata_rcar_bus_softreset() argument 325 struct ata_ioports *ioaddr = &ap->ioaddr; sata_rcar_bus_softreset() 327 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); sata_rcar_bus_softreset() 330 iowrite32(ap->ctl, ioaddr->ctl_addr); sata_rcar_bus_softreset() 332 iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr); sata_rcar_bus_softreset() 334 iowrite32(ap->ctl, ioaddr->ctl_addr); sata_rcar_bus_softreset() 335 ap->last_ctl = ap->ctl; sata_rcar_bus_softreset() 338 return sata_rcar_wait_after_reset(&ap->link, deadline); sata_rcar_bus_softreset() 344 struct ata_port *ap = link->ap; sata_rcar_softreset() local 350 if (sata_rcar_ata_devchk(ap, 0)) sata_rcar_softreset() 355 rc = sata_rcar_bus_softreset(ap, deadline); sata_rcar_softreset() 369 static void sata_rcar_tf_load(struct ata_port *ap, sata_rcar_tf_load() argument 372 struct ata_ioports *ioaddr = &ap->ioaddr; sata_rcar_tf_load() 375 if (tf->ctl != ap->last_ctl) { sata_rcar_tf_load() 377 ap->last_ctl = tf->ctl; sata_rcar_tf_load() 378 ata_wait_idle(ap); sata_rcar_tf_load() 414 ata_wait_idle(ap); sata_rcar_tf_load() 417 static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) sata_rcar_tf_read() argument 419 struct ata_ioports *ioaddr = &ap->ioaddr; sata_rcar_tf_read() 421 tf->command = sata_rcar_check_status(ap); sata_rcar_tf_read() 437 ap->last_ctl = tf->ctl; sata_rcar_tf_read() 441 static void sata_rcar_exec_command(struct ata_port *ap, sata_rcar_exec_command() argument 444 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); sata_rcar_exec_command() 446 iowrite32(tf->command, ap->ioaddr.command_addr); sata_rcar_exec_command() 447 ata_sff_pause(ap); sata_rcar_exec_command() 454 struct ata_port *ap = dev->link->ap; sata_rcar_data_xfer() local 455 void __iomem *data_addr = ap->ioaddr.data_addr; sata_rcar_data_xfer() 491 struct ata_port *ap; sata_rcar_drain_fifo() local 497 ap = qc->ap; sata_rcar_drain_fifo() 499 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) && sata_rcar_drain_fifo() 501 ioread32(ap->ioaddr.data_addr); sata_rcar_drain_fifo() 505 ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); sata_rcar_drain_fifo() 514 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2)); sata_rcar_scr_read() 524 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2)); sata_rcar_scr_write() 530 struct ata_port *ap = qc->ap; sata_rcar_bmdma_fill_sg() local 531 struct ata_bmdma_prd *prd = ap->bmdma_prd; sata_rcar_bmdma_fill_sg() 564 struct ata_port *ap = qc->ap; sata_rcar_bmdma_setup() local 566 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_bmdma_setup() 572 iowrite32(ap->bmdma_prd_dma, base + ATAPI_DTB_ADR_REG); sata_rcar_bmdma_setup() 586 ap->ops->sff_exec_command(ap, &qc->tf); sata_rcar_bmdma_setup() 591 struct ata_port *ap = qc->ap; sata_rcar_bmdma_start() local 592 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_bmdma_start() 605 struct ata_port *ap = qc->ap; sata_rcar_bmdma_stop() local 606 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_bmdma_stop() 619 ata_sff_dma_pause(ap); sata_rcar_bmdma_stop() 622 static u8 sata_rcar_bmdma_status(struct ata_port *ap) sata_rcar_bmdma_status() argument 624 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_bmdma_status() 676 static void sata_rcar_serr_interrupt(struct ata_port *ap) sata_rcar_serr_interrupt() argument 678 struct sata_rcar_priv *priv = ap->host->private_data; sata_rcar_serr_interrupt() 679 struct ata_eh_info *ehi = &ap->link.eh_info; sata_rcar_serr_interrupt() 702 ata_port_freeze(ap); sata_rcar_serr_interrupt() 704 ata_port_abort(ap); sata_rcar_serr_interrupt() 707 static void sata_rcar_ata_interrupt(struct ata_port *ap) sata_rcar_ata_interrupt() argument 712 qc = ata_qc_from_tag(ap, ap->link.active_tag); sata_rcar_ata_interrupt() 714 handled |= ata_bmdma_port_intr(ap, qc); sata_rcar_ata_interrupt() 718 sata_rcar_check_status(ap); sata_rcar_ata_interrupt() 727 struct ata_port *ap; sata_rcar_interrupt() local 740 ap = host->ports[0]; sata_rcar_interrupt() 743 sata_rcar_ata_interrupt(ap); sata_rcar_interrupt() 746 sata_rcar_serr_interrupt(ap); sata_rcar_interrupt() 757 struct ata_port *ap = host->ports[0]; sata_rcar_setup_port() local 758 struct ata_ioports *ioaddr = &ap->ioaddr; sata_rcar_setup_port() 762 ap->ops = &sata_rcar_port_ops; sata_rcar_setup_port() 763 ap->pio_mask = ATA_PIO4; sata_rcar_setup_port() 764 ap->udma_mask = ATA_UDMA6; sata_rcar_setup_port() 765 ap->flags |= ATA_FLAG_SATA; sata_rcar_setup_port() 768 ap->flags |= ATA_FLAG_NO_DIPM; sata_rcar_setup_port()
|
H A D | pata_sl82c105.c | 57 struct ata_port *ap = link->ap; sl82c105_pre_reset() local 58 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sl82c105_pre_reset() 60 if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) sl82c105_pre_reset() 68 * @ap: ATA interface 77 static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) sl82c105_configure_piomode() argument 79 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sl82c105_configure_piomode() 84 int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno); sl82c105_configure_piomode() 93 * @ap: ATA interface 100 static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev) sl82c105_set_piomode() argument 102 sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); sl82c105_set_piomode() 107 * @ap: ATA interface 114 static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev) sl82c105_configure_dmamode() argument 116 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sl82c105_configure_dmamode() 121 int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno); sl82c105_configure_dmamode() 131 * @ap: ATA interface 139 static void sl82c105_reset_engine(struct ata_port *ap) sl82c105_reset_engine() argument 141 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sl82c105_reset_engine() 162 struct ata_port *ap = qc->ap; sl82c105_bmdma_start() local 165 sl82c105_reset_engine(ap); sl82c105_bmdma_start() 169 sl82c105_configure_dmamode(ap, qc->dev); sl82c105_bmdma_start() 191 struct ata_port *ap = qc->ap; sl82c105_bmdma_stop() local 194 sl82c105_reset_engine(ap); sl82c105_bmdma_stop() 199 sl82c105_set_piomode(ap, qc->dev); sl82c105_bmdma_stop() 214 struct ata_host *host = qc->ap->host; sl82c105_qc_defer() 215 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; sl82c105_qc_defer() 230 static bool sl82c105_sff_irq_check(struct ata_port *ap) sl82c105_sff_irq_check() argument 232 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sl82c105_sff_irq_check() 233 u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; sl82c105_sff_irq_check()
|
H A D | libata-acpi.c | 44 struct ata_port *ap; member in union:ata_acpi_hotplug_context::__anon3300 65 /* @ap and @dev are the same as ata_acpi_handle_hotplug() */ ata_acpi_detach_device() 66 static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) ata_acpi_detach_device() argument 74 ata_for_each_link(tlink, ap, EDGE) ata_acpi_detach_device() 79 ata_port_schedule_eh(ap); ata_acpi_detach_device() 84 * @ap: ATA port ACPI event occurred 99 static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, ata_acpi_handle_hotplug() argument 102 struct ata_eh_info *ehi = &ap->link.eh_info; ata_acpi_handle_hotplug() 106 spin_lock_irqsave(ap->lock, flags); ata_acpi_handle_hotplug() 118 ata_port_freeze(ap); ata_acpi_handle_hotplug() 123 ata_acpi_detach_device(ap, dev); ata_acpi_handle_hotplug() 128 spin_unlock_irqrestore(ap->lock, flags); ata_acpi_handle_hotplug() 131 ata_port_wait_eh(ap); ata_acpi_handle_hotplug() 137 ata_acpi_handle_hotplug(dev->link->ap, dev, event); ata_acpi_dev_notify_dock() 143 ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event); ata_acpi_ap_notify_dock() 147 static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev, ata_acpi_uevent() argument 158 kobj = &ap->dev->kobj; ata_acpi_uevent() 168 ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event); ata_acpi_ap_uevent() 174 ata_acpi_uevent(dev->link->ap, dev, event); ata_acpi_dev_uevent() 178 void ata_acpi_bind_port(struct ata_port *ap) ata_acpi_bind_port() argument 180 struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev); ata_acpi_bind_port() 184 if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion) ata_acpi_bind_port() 187 acpi_preset_companion(&ap->tdev, host_companion, ap->port_no); ata_acpi_bind_port() 189 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) ata_acpi_bind_port() 190 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; ata_acpi_bind_port() 192 adev = ACPI_COMPANION(&ap->tdev); ata_acpi_bind_port() 200 context->data.ap = ap; ata_acpi_bind_port() 207 struct ata_port *ap = dev->link->ap; ata_acpi_bind_dev() local 208 struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev); ata_acpi_bind_dev() 209 struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev); ata_acpi_bind_dev() 219 (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_companion)) ata_acpi_bind_dev() 222 if (ap->flags & ATA_FLAG_ACPI_SATA) { ata_acpi_bind_dev() 223 if (!sata_pmp_attached(ap)) ata_acpi_bind_dev() 224 adr = SATA_ADR(ap->port_no, NO_PORT_MULT); ata_acpi_bind_dev() 226 adr = SATA_ADR(ap->port_no, dev->link->pmp); ata_acpi_bind_dev() 265 struct ata_port *ap = host->ports[i]; ata_acpi_dissociate() local 266 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); ata_acpi_dissociate() 268 if (ACPI_HANDLE(&ap->tdev) && gtm) ata_acpi_dissociate() 269 ata_acpi_stm(ap, gtm); ata_acpi_dissociate() 275 * @ap: target ATA port 286 int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm) ata_acpi_gtm() argument 292 acpi_handle handle = ACPI_HANDLE(&ap->tdev); ata_acpi_gtm() 305 ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n", ata_acpi_gtm() 312 ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n", ata_acpi_gtm() 319 ata_port_err(ap, "_GTM returned invalid length %d\n", ata_acpi_gtm() 335 * @ap: target ATA port 346 int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm) ata_acpi_stm() argument 359 in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id; ata_acpi_stm() 362 in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id; ata_acpi_stm() 367 status = acpi_evaluate_object(ACPI_HANDLE(&ap->tdev), "_STM", ata_acpi_stm() 373 ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n", ata_acpi_stm() 404 struct ata_port *ap = dev->link->ap; ata_dev_get_GTF() local 420 if (ata_msg_probe(ap)) ata_dev_get_GTF() 422 __func__, ap->port_no); ata_dev_get_GTF() 439 if (ata_msg_probe(ap)) ata_dev_get_GTF() 466 if (ata_msg_probe(ap)) ata_dev_get_GTF() 525 * @ap: Port to check 530 int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) ata_acpi_cbl_80wire() argument 534 ata_for_each_dev(dev, &ap->link, ENABLED) { ata_acpi_cbl_80wire() 772 struct ata_port *ap = dev->link->ap; ata_acpi_push_id() local 777 if (ata_msg_probe(ap)) ata_acpi_push_id() 779 __func__, dev->devno, ap->port_no); ata_acpi_push_id() 809 * @ap: target ATA port 811 * This function is called when @ap is about to be suspended. All 822 int ata_acpi_on_suspend(struct ata_port *ap) ata_acpi_on_suspend() argument 830 * @ap: target ATA port 832 * This function is called when @ap is resumed - right after port 838 void ata_acpi_on_resume(struct ata_port *ap) ata_acpi_on_resume() argument 840 const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); ata_acpi_on_resume() 843 if (ACPI_HANDLE(&ap->tdev) && gtm) { ata_acpi_on_resume() 847 ata_acpi_stm(ap, gtm); ata_acpi_on_resume() 853 ata_for_each_dev(dev, &ap->link, ALL) { ata_acpi_on_resume() 865 ata_for_each_dev(dev, &ap->link, ALL) { ata_acpi_on_resume() 891 static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state) sata_acpi_set_state() argument 898 ata_for_each_dev(dev, &ap->link, ENABLED) { sata_acpi_set_state() 920 static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state) pata_acpi_set_state() argument 925 port_handle = ACPI_HANDLE(&ap->tdev); pata_acpi_set_state() 934 ata_for_each_dev(dev, &ap->link, ENABLED) { pata_acpi_set_state() 949 * @ap: target ATA port 955 void ata_acpi_set_state(struct ata_port *ap, pm_message_t state) ata_acpi_set_state() argument 957 if (ap->flags & ATA_FLAG_ACPI_SATA) ata_acpi_set_state() 958 sata_acpi_set_state(ap, state); ata_acpi_set_state() 960 pata_acpi_set_state(ap, state); ata_acpi_set_state() 979 struct ata_port *ap = dev->link->ap; ata_acpi_on_devcfg() local 980 struct ata_eh_context *ehc = &ap->link.eh_context; ata_acpi_on_devcfg() 981 int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA; ata_acpi_on_devcfg() 1021 if (rc == -EINVAL && !nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN)) ata_acpi_on_devcfg() 1036 if (!nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN)) ata_acpi_on_devcfg()
|
H A D | pata_ns87415.c | 40 * @ap: Port whose timings we are configuring 52 static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) ns87415_set_mode() argument 54 struct pci_dev *dev = to_pci_dev(ap->host->dev); ns87415_set_mode() 55 int unit = 2 * ap->port_no + adev->devno; ns87415_set_mode() 98 * @ap: Port whose timings we are configuring 107 static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev) ns87415_set_piomode() argument 109 ns87415_set_mode(ap, adev, adev->pio_mode); ns87415_set_piomode() 122 struct ata_port *ap = qc->ap; ns87415_bmdma_setup() local 128 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); ns87415_bmdma_setup() 131 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ns87415_bmdma_setup() 138 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ns87415_bmdma_setup() 140 ap->ops->sff_exec_command(ap, &qc->tf); ns87415_bmdma_setup() 156 ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode); ns87415_bmdma_start() 170 ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode); ns87415_bmdma_stop() 175 * @ap: Channel to clear 181 static void ns87415_irq_clear(struct ata_port *ap) ns87415_irq_clear() argument 183 void __iomem *mmio = ap->ioaddr.bmdma_addr; ns87415_irq_clear() 241 * @ap: channel to check 247 static u8 ns87560_check_status(struct ata_port *ap) ns87560_check_status() argument 249 return ns87560_read_buggy(ap->ioaddr.status_addr); ns87560_check_status() 254 * @ap: Port from which input is read 263 void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) ns87560_tf_read() argument 265 struct ata_ioports *ioaddr = &ap->ioaddr; ns87560_tf_read() 267 tf->command = ns87560_check_status(ap); ns87560_tf_read() 283 ap->last_ctl = tf->ctl; ns87560_tf_read() 289 * @ap: channel to check 295 static u8 ns87560_bmdma_status(struct ata_port *ap) ns87560_bmdma_status() argument 297 return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); ns87560_bmdma_status()
|
H A D | pata_opti.c | 55 struct ata_port *ap = link->ap; opti_pre_reset() local 56 struct pci_dev *pdev = to_pci_dev(ap->host->dev); opti_pre_reset() 62 if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) opti_pre_reset() 70 * @ap: ATA port 80 static void opti_write_reg(struct ata_port *ap, u8 val, int reg) opti_write_reg() argument 82 void __iomem *regio = ap->ioaddr.cmd_addr; opti_write_reg() 98 * @ap: ATA interface 106 static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev) opti_set_piomode() argument 111 void __iomem *regio = ap->ioaddr.cmd_addr; opti_set_piomode() 141 opti_write_reg(ap, adev->devno, MISC_REG); opti_set_piomode() 142 opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG); opti_set_piomode() 143 opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG); opti_set_piomode() 144 opti_write_reg(ap, addr, MISC_REG); opti_set_piomode() 147 opti_write_reg(ap, 0x85, CNTRL_REG); opti_set_piomode()
|
H A D | pata_sil680.c | 40 * @ap: ATA interface 50 static unsigned long sil680_selreg(struct ata_port *ap, int r) sil680_selreg() argument 53 base += (ap->port_no << 4); sil680_selreg() 59 * @ap: ATA interface 67 static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r) sil680_seldev() argument 70 base += (ap->port_no << 4); sil680_seldev() 78 * @ap: ATA port 84 static int sil680_cable_detect(struct ata_port *ap) sil680_cable_detect() argument 86 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_cable_detect() 87 unsigned long addr = sil680_selreg(ap, 0); sil680_cable_detect() 98 * @ap: ATA interface 106 static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev) sil680_set_piomode() argument 115 unsigned long tfaddr = sil680_selreg(ap, 0x02); sil680_set_piomode() 116 unsigned long addr = sil680_seldev(ap, adev, 0x04); sil680_set_piomode() 117 unsigned long addr_mask = 0x80 + 4 * ap->port_no; sil680_set_piomode() 118 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_set_piomode() 149 * @ap: ATA interface 158 static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) sil680_set_dmamode() argument 166 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_set_dmamode() 167 unsigned long ma = sil680_seldev(ap, adev, 0x08); sil680_set_dmamode() 168 unsigned long ua = sil680_seldev(ap, adev, 0x0C); sil680_set_dmamode() 169 unsigned long addr_mask = 0x80 + 4 * ap->port_no; sil680_set_dmamode() 201 * @ap: port to which command is being issued 211 static void sil680_sff_exec_command(struct ata_port *ap, sil680_sff_exec_command() argument 214 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); sil680_sff_exec_command() 215 iowrite8(tf->command, ap->ioaddr.command_addr); sil680_sff_exec_command() 216 ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); sil680_sff_exec_command() 219 static bool sil680_sff_irq_check(struct ata_port *ap) sil680_sff_irq_check() argument 221 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_sff_irq_check() 222 unsigned long addr = sil680_selreg(ap, 1); sil680_sff_irq_check()
|
H A D | sata_sx4.c | 217 static void pdc_error_handler(struct ata_port *ap); 218 static void pdc_freeze(struct ata_port *ap); 219 static void pdc_thaw(struct ata_port *ap); 220 static int pdc_port_start(struct ata_port *ap); 222 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 223 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 236 static void pdc20621_irq_clear(struct ata_port *ap); 299 static int pdc_port_start(struct ata_port *ap) pdc_port_start() argument 301 struct device *dev = ap->host->dev; pdc_port_start() 312 ap->private_data = pp; pdc_port_start() 456 struct ata_port *ap = qc->ap; pdc20621_dma_prep() local 457 struct pdc_port_priv *pp = ap->private_data; pdc20621_dma_prep() 458 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc20621_dma_prep() 459 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; pdc20621_dma_prep() 460 unsigned int portno = ap->port_no; pdc20621_dma_prep() 466 VPRINTK("ata%u: ENTER\n", ap->print_id); pdc20621_dma_prep() 516 struct ata_port *ap = qc->ap; pdc20621_nodata_prep() local 517 struct pdc_port_priv *pp = ap->private_data; pdc20621_nodata_prep() 518 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc20621_nodata_prep() 519 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; pdc20621_nodata_prep() 520 unsigned int portno = ap->port_no; pdc20621_nodata_prep() 523 VPRINTK("ata%u: ENTER\n", ap->print_id); pdc20621_nodata_prep() 567 struct ata_port *ap = qc->ap; __pdc20621_push_hdma() local 568 struct ata_host *host = ap->host; __pdc20621_push_hdma() 585 struct ata_port *ap = qc->ap; pdc20621_push_hdma() local 586 struct pdc_host_priv *pp = ap->host->private_data; pdc20621_push_hdma() 603 struct ata_port *ap = qc->ap; pdc20621_pop_hdma() local 604 struct pdc_host_priv *pp = ap->host->private_data; pdc20621_pop_hdma() 621 struct ata_port *ap = qc->ap; pdc20621_dump_hdma() local 622 unsigned int port_no = ap->port_no; pdc20621_dump_hdma() 623 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; pdc20621_dump_hdma() 639 struct ata_port *ap = qc->ap; pdc20621_packet_start() local 640 struct ata_host *host = ap->host; pdc20621_packet_start() 641 unsigned int port_no = ap->port_no; pdc20621_packet_start() 650 VPRINTK("ata%u: ENTER\n", ap->print_id); pdc20621_packet_start() 671 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); pdc20621_packet_start() 672 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); pdc20621_packet_start() 702 static inline unsigned int pdc20621_host_intr(struct ata_port *ap, pdc20621_host_intr() argument 707 unsigned int port_no = ap->port_no; pdc20621_host_intr() 720 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, pdc20621_host_intr() 723 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); pdc20621_host_intr() 731 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, pdc20621_host_intr() 746 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, pdc20621_host_intr() 753 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); pdc20621_host_intr() 754 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); pdc20621_host_intr() 759 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, pdc20621_host_intr() 762 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); pdc20621_host_intr() 771 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); pdc20621_host_intr() 778 ap->stats.idle_irq++; pdc20621_host_intr() 784 static void pdc20621_irq_clear(struct ata_port *ap) pdc20621_irq_clear() argument 786 ioread8(ap->ioaddr.status_addr); pdc20621_irq_clear() 792 struct ata_port *ap; pdc20621_interrupt() local 829 ap = NULL; pdc20621_interrupt() 831 ap = host->ports[port_no]; pdc20621_interrupt() 833 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); pdc20621_interrupt() 834 if (tmp && ap) { pdc20621_interrupt() 837 qc = ata_qc_from_tag(ap, ap->link.active_tag); pdc20621_interrupt() 839 handled += pdc20621_host_intr(ap, qc, (i > 4), pdc20621_interrupt() 853 static void pdc_freeze(struct ata_port *ap) pdc_freeze() argument 855 void __iomem *mmio = ap->ioaddr.cmd_addr; pdc_freeze() 867 static void pdc_thaw(struct ata_port *ap) pdc_thaw() argument 869 void __iomem *mmio = ap->ioaddr.cmd_addr; pdc_thaw() 875 ioread8(ap->ioaddr.status_addr); pdc_thaw() 884 static void pdc_reset_port(struct ata_port *ap) pdc_reset_port() argument 886 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; pdc_reset_port() 911 pdc_reset_port(link->ap); pdc_softreset() 915 static void pdc_error_handler(struct ata_port *ap) pdc_error_handler() argument 917 if (!(ap->pflags & ATA_PFLAG_FROZEN)) pdc_error_handler() 918 pdc_reset_port(ap); pdc_error_handler() 920 ata_sff_error_handler(ap); pdc_error_handler() 925 struct ata_port *ap = qc->ap; pdc_post_internal_cmd() local 929 pdc_reset_port(ap); pdc_post_internal_cmd() 962 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) pdc_tf_load_mmio() argument 966 ata_sff_tf_load(ap, tf); pdc_tf_load_mmio() 970 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) pdc_exec_command_mmio() argument 974 ata_sff_exec_command(ap, tf); pdc_exec_command_mmio() 1467 struct ata_port *ap = host->ports[i]; pdc_sata_init_one() local 1471 pdc_sata_setup_port(&ap->ioaddr, base + offset); pdc_sata_init_one() 1473 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); pdc_sata_init_one() 1474 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); pdc_sata_init_one() 1475 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); pdc_sata_init_one()
|
H A D | pata_artop.c | 56 struct ata_port *ap = link->ap; artop62x0_pre_reset() local 57 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop62x0_pre_reset() 61 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) artop62x0_pre_reset() 69 * @ap: Port 74 static int artop6260_cable_detect(struct ata_port *ap) artop6260_cable_detect() argument 76 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6260_cable_detect() 79 if (tmp & (1 << ap->port_no)) artop6260_cable_detect() 86 * @ap: Port whose timings we are configuring 98 static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio) artop6210_load_piomode() argument 100 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6210_load_piomode() 101 int dn = adev->devno + 2 * ap->port_no; artop6210_load_piomode() 113 * @ap: Port whose timings we are configuring 125 static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev) artop6210_set_piomode() argument 127 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6210_set_piomode() 128 int dn = adev->devno + 2 * ap->port_no; artop6210_set_piomode() 131 artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); artop6210_set_piomode() 141 * @ap: Port whose timings we are configuring 152 static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio) artop6260_load_piomode() argument 154 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6260_load_piomode() 155 int dn = adev->devno + 2 * ap->port_no; artop6260_load_piomode() 167 * @ap: Port whose timings we are configuring 179 static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev) artop6260_set_piomode() argument 181 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6260_set_piomode() 184 artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); artop6260_set_piomode() 187 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); artop6260_set_piomode() 189 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); artop6260_set_piomode() 194 * @ap: Port whose timings we are configuring 203 static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev) artop6210_set_dmamode() argument 206 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6210_set_dmamode() 207 int dn = adev->devno + 2 * ap->port_no; artop6210_set_dmamode() 216 artop6210_load_piomode(ap, adev, pio); artop6210_set_dmamode() 233 * @ap: Port whose timings we are configuring 243 static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev) artop6260_set_dmamode() argument 246 struct pci_dev *pdev = to_pci_dev(ap->host->dev); artop6260_set_dmamode() 255 artop6260_load_piomode(ap, adev, pio); artop6260_set_dmamode() 258 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); artop6260_set_dmamode() 266 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); artop6260_set_dmamode() 278 struct ata_host *host = qc->ap->host; artop6210_qc_defer() 279 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; artop6210_qc_defer()
|
H A D | pata_efar.c | 42 struct ata_port *ap = link->ap; efar_pre_reset() local 43 struct pci_dev *pdev = to_pci_dev(ap->host->dev); efar_pre_reset() 45 if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) efar_pre_reset() 53 * @ap: Port 59 static int efar_cable_detect(struct ata_port *ap) efar_cable_detect() argument 61 struct pci_dev *pdev = to_pci_dev(ap->host->dev); efar_cable_detect() 65 if (tmp & (2 >> ap->port_no)) efar_cable_detect() 74 * @ap: Port whose timings we are configuring 83 static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) efar_set_piomode() argument 86 struct pci_dev *dev = to_pci_dev(ap->host->dev); efar_set_piomode() 87 unsigned int master_port = ap->port_no ? 0x42 : 0x40; efar_set_piomode() 124 int shift = 4 * ap->port_no; efar_set_piomode() 132 slave_data &= ap->port_no ? 0x0F : 0xF0; efar_set_piomode() 141 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); efar_set_piomode() 148 * @ap: Port whose timings we are configuring 157 static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev) efar_set_dmamode() argument 159 struct pci_dev *dev = to_pci_dev(ap->host->dev); efar_set_dmamode() 160 u8 master_port = ap->port_no ? 0x42 : 0x40; efar_set_dmamode() 163 int devid = adev->devno + 2 * ap->port_no; efar_set_dmamode() 217 slave_data &= ap->port_no ? 0x0F : 0xF0; efar_set_dmamode() 219 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); efar_set_dmamode()
|
H A D | pata_mpiix.c | 50 struct ata_port *ap = link->ap; mpiix_pre_reset() local 51 struct pci_dev *pdev = to_pci_dev(ap->host->dev); mpiix_pre_reset() 62 * @ap: ATA interface 75 static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) mpiix_set_piomode() argument 79 struct pci_dev *pdev = to_pci_dev(ap->host->dev); mpiix_set_piomode() 106 /* We use ap->private_data as a pointer to the device currently mpiix_set_piomode() 108 ap->private_data = adev; mpiix_set_piomode() 124 struct ata_port *ap = qc->ap; mpiix_qc_issue() local 132 if (adev->pio_mode && adev != ap->private_data) mpiix_qc_issue() 133 mpiix_set_piomode(ap, adev); mpiix_qc_issue() 155 struct ata_port *ap; mpiix_init_one() local 165 ap = host->ports[0]; mpiix_init_one() 191 ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl); mpiix_init_one() 199 ap->ops = &mpiix_port_ops; mpiix_init_one() 200 ap->pio_mask = ATA_PIO4; mpiix_init_one() 201 ap->flags |= ATA_FLAG_SLAVE_POSS; mpiix_init_one() 203 ap->ioaddr.cmd_addr = cmd_addr; mpiix_init_one() 204 ap->ioaddr.ctl_addr = ctl_addr; mpiix_init_one() 205 ap->ioaddr.altstatus_addr = ctl_addr; mpiix_init_one() 208 ata_sff_std_ports(&ap->ioaddr); mpiix_init_one()
|
H A D | sata_svw.c | 56 /* ap->flags bits */ 95 static u8 k2_stat_check_status(struct ata_port *ap); 102 if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) k2_sata_check_atapi_dma() 126 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); k2_sata_scr_read() 136 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); k2_sata_scr_write() 144 void __iomem *mmio = link->ap->ioaddr.bmdma_addr; k2_sata_softreset() 161 void __iomem *mmio = link->ap->ioaddr.bmdma_addr; k2_sata_hardreset() 174 static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) k2_sata_tf_load() argument 176 struct ata_ioports *ioaddr = &ap->ioaddr; k2_sata_tf_load() 179 if (tf->ctl != ap->last_ctl) { k2_sata_tf_load() 181 ap->last_ctl = tf->ctl; k2_sata_tf_load() 182 ata_wait_idle(ap); k2_sata_tf_load() 206 ata_wait_idle(ap); k2_sata_tf_load() 210 static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) k2_sata_tf_read() argument 212 struct ata_ioports *ioaddr = &ap->ioaddr; k2_sata_tf_read() 215 tf->command = k2_stat_check_status(ap); k2_sata_tf_read() 248 struct ata_port *ap = qc->ap; k2_bmdma_setup_mmio() local 251 void __iomem *mmio = ap->ioaddr.bmdma_addr; k2_bmdma_setup_mmio() 255 writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS); k2_bmdma_setup_mmio() 266 ap->ops->sff_exec_command(ap, &qc->tf); k2_bmdma_setup_mmio() 279 struct ata_port *ap = qc->ap; k2_bmdma_start_mmio() local 280 void __iomem *mmio = ap->ioaddr.bmdma_addr; k2_bmdma_start_mmio() 310 ap->ops->sff_exec_command(ap, &qc->tf); k2_bmdma_start_mmio() 314 static u8 k2_stat_check_status(struct ata_port *ap) k2_stat_check_status() argument 316 return readl(ap->ioaddr.status_addr); k2_stat_check_status() 321 struct ata_port *ap; k2_sata_show_info() local 326 ap = ata_shost_to_port(shost); k2_sata_show_info() 327 if (ap == NULL) k2_sata_show_info() 331 np = pci_device_to_OF_node(to_pci_dev(ap->host->dev)); k2_sata_show_info() 336 index = (ap == ap->host->ports[0]) ? 0 : 1; k2_sata_show_info() 481 struct ata_port *ap = host->ports[i]; k2_sata_init_one() local 484 k2_sata_setup_port(&ap->ioaddr, mmio_base + offset); k2_sata_init_one() 486 ata_port_pbar_desc(ap, 5, -1, "mmio"); k2_sata_init_one() 487 ata_port_pbar_desc(ap, 5, offset, "port"); k2_sata_init_one()
|
H A D | pata_imx.c | 46 struct ata_port *ap = link->ap; pata_imx_set_mode() local 47 struct pata_imx_priv *priv = ap->host->private_data; pata_imx_set_mode() 96 struct ata_port *ap; pata_imx_probe() local 128 ap = host->ports[0]; pata_imx_probe() 130 ap->ops = &pata_imx_port_ops; pata_imx_probe() 131 ap->pio_mask = ATA_PIO0; pata_imx_probe() 132 ap->flags |= ATA_FLAG_SLAVE_POSS; pata_imx_probe() 141 ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA; pata_imx_probe() 142 ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL; pata_imx_probe() 144 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; pata_imx_probe() 146 pata_imx_setup_port(&ap->ioaddr); pata_imx_probe() 148 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", pata_imx_probe()
|
H A D | pata_ns87410.c | 41 struct ata_port *ap = link->ap; ns87410_pre_reset() local 42 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ns87410_pre_reset() 48 if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) ns87410_pre_reset() 56 * @ap: ATA interface 63 static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev) ns87410_set_piomode() argument 65 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ns87410_set_piomode() 66 int port = 0x40 + 4 * ap->port_no; ns87410_set_piomode() 100 /* We use ap->private_data as a pointer to the device currently ns87410_set_piomode() 102 ap->private_data = adev; ns87410_set_piomode() 116 struct ata_port *ap = qc->ap; ns87410_qc_issue() local 124 if (adev->pio_mode && adev != ap->private_data) ns87410_qc_issue() 125 ns87410_set_piomode(ap, adev); ns87410_qc_issue()
|
H A D | pata_ep93xx.c | 329 static void ep93xx_pata_set_piomode(struct ata_port *ap, ep93xx_pata_set_piomode() argument 332 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_set_piomode() 357 static u8 ep93xx_pata_check_status(struct ata_port *ap) ep93xx_pata_check_status() argument 359 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_check_status() 364 static u8 ep93xx_pata_check_altstatus(struct ata_port *ap) ep93xx_pata_check_altstatus() argument 366 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_check_altstatus() 372 static void ep93xx_pata_tf_load(struct ata_port *ap, ep93xx_pata_tf_load() argument 375 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_tf_load() 378 if (tf->ctl != ap->last_ctl) { ep93xx_pata_tf_load() 380 ap->last_ctl = tf->ctl; ep93xx_pata_tf_load() 381 ata_wait_idle(ap); ep93xx_pata_tf_load() 410 ata_wait_idle(ap); ep93xx_pata_tf_load() 414 static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) ep93xx_pata_tf_read() argument 416 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_tf_read() 418 tf->command = ep93xx_pata_check_status(ap); ep93xx_pata_tf_read() 440 ap->last_ctl = tf->ctl; ep93xx_pata_tf_read() 445 static void ep93xx_pata_exec_command(struct ata_port *ap, ep93xx_pata_exec_command() argument 448 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_exec_command() 452 ata_sff_pause(ap); ep93xx_pata_exec_command() 456 static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device) ep93xx_pata_dev_select() argument 458 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_dev_select() 465 ata_sff_pause(ap); /* needed; also flushes, for mmio */ ep93xx_pata_dev_select() 469 static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl) ep93xx_pata_set_devctl() argument 471 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_set_devctl() 481 struct ata_port *ap = adev->link->ap; ep93xx_pata_data_xfer() local 482 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_data_xfer() 519 static bool ep93xx_pata_device_is_present(struct ata_port *ap, ep93xx_pata_device_is_present() argument 522 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_device_is_present() 525 ap->ops->sff_dev_select(ap, device); ep93xx_pata_device_is_present() 550 struct ata_port *ap = link->ap; ep93xx_pata_wait_after_reset() local 551 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_wait_after_reset() 556 ata_msleep(ap, ATA_WAIT_AFTER_RESET); ep93xx_pata_wait_after_reset() 574 ap->ops->sff_dev_select(ap, 1); ep93xx_pata_wait_after_reset() 601 ap->ops->sff_dev_select(ap, 0); ep93xx_pata_wait_after_reset() 603 ap->ops->sff_dev_select(ap, 1); ep93xx_pata_wait_after_reset() 605 ap->ops->sff_dev_select(ap, 0); ep93xx_pata_wait_after_reset() 611 static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask, ep93xx_pata_bus_softreset() argument 614 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_bus_softreset() 616 ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL); ep93xx_pata_bus_softreset() 618 ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL); ep93xx_pata_bus_softreset() 620 ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL); ep93xx_pata_bus_softreset() 621 ap->last_ctl = ap->ctl; ep93xx_pata_bus_softreset() 623 return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline); ep93xx_pata_bus_softreset() 704 struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; ep93xx_pata_dma_start() 714 dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n"); ep93xx_pata_dma_start() 721 dev_err(qc->ap->dev, "failed to submit dma transfer\n"); ep93xx_pata_dma_start() 744 struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; ep93xx_pata_dma_stop() 762 ata_sff_dma_pause(qc->ap); ep93xx_pata_dma_stop() 767 qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); ep93xx_pata_dma_setup() 770 static u8 ep93xx_pata_dma_status(struct ata_port *ap) ep93xx_pata_dma_status() argument 772 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_dma_status() 806 struct ata_port *ap = al->ap; ep93xx_pata_softreset() local 807 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; ep93xx_pata_softreset() 813 if (ep93xx_pata_device_is_present(ap, 0)) ep93xx_pata_softreset() 815 if (slave_possible && ep93xx_pata_device_is_present(ap, 1)) ep93xx_pata_softreset() 819 ap->ops->sff_dev_select(al->ap, 0); ep93xx_pata_softreset() 822 rc = ep93xx_pata_bus_softreset(ap, devmask, deadline); ep93xx_pata_softreset() 843 struct ata_port *ap; ep93xx_pata_drain_fifo() local 850 ap = qc->ap; ep93xx_pata_drain_fifo() 851 drv_data = ap->host->private_data; ep93xx_pata_drain_fifo() 853 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) ep93xx_pata_drain_fifo() 859 ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count); ep93xx_pata_drain_fifo() 863 static int ep93xx_pata_port_start(struct ata_port *ap) ep93xx_pata_port_start() argument 865 struct ep93xx_pata_data *drv_data = ap->host->private_data; ep93xx_pata_port_start() 917 struct ata_port *ap; ep93xx_pata_probe() local 965 ap = host->ports[0]; ep93xx_pata_probe() 966 ap->dev = &pdev->dev; ep93xx_pata_probe() 967 ap->ops = &ep93xx_pata_port_ops; ep93xx_pata_probe() 968 ap->flags |= ATA_FLAG_SLAVE_POSS; ep93xx_pata_probe() 969 ap->pio_mask = ATA_PIO4; ep93xx_pata_probe() 984 ap->udma_mask = ATA_UDMA3; ep93xx_pata_probe() 986 ap->udma_mask = ATA_UDMA4; ep93xx_pata_probe() 988 ap->udma_mask = ATA_UDMA2; ep93xx_pata_probe()
|
H A D | libata-core.c | 185 * @ap: ATA port containing links to iterate 194 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, ata_link_next() argument 205 if (sata_pmp_attached(ap)) ata_link_next() 206 return ap->pmp_link; ata_link_next() 209 return &ap->link; ata_link_next() 213 if (link == &ap->link) ata_link_next() 216 if (sata_pmp_attached(ap)) ata_link_next() 217 return ap->pmp_link; ata_link_next() 220 if (unlikely(ap->slave_link)) ata_link_next() 221 return ap->slave_link; ata_link_next() 228 if (unlikely(link == ap->slave_link)) ata_link_next() 232 if (++link < ap->pmp_link + ap->nr_pmp_links) ata_link_next() 236 return &ap->link; ata_link_next() 310 struct ata_port *ap = dev->link->ap; ata_dev_phys_link() local 312 if (!ap->slave_link) ata_dev_phys_link() 315 return &ap->link; ata_dev_phys_link() 316 return ap->slave_link; ata_dev_phys_link() 321 * @ap: ATA port of interest 332 void ata_force_cbl(struct ata_port *ap) ata_force_cbl() argument 339 if (fe->port != -1 && fe->port != ap->print_id) ata_force_cbl() 345 ap->cbl = fe->param.cbl; ata_force_cbl() 346 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); ata_force_cbl() 379 if (fe->port != -1 && fe->port != link->ap->print_id) ata_force_link_limits() 428 if (fe->port != -1 && fe->port != dev->link->ap->print_id) ata_force_xfermask() 481 if (fe->port != -1 && fe->port != dev->link->ap->print_id) ata_force_horkage() 665 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { ata_rwcmd_protocol() 1559 struct ata_port *ap = link->ap; ata_exec_internal_sg() local 1571 spin_lock_irqsave(ap->lock, flags); ata_exec_internal_sg() 1574 if (ap->pflags & ATA_PFLAG_FROZEN) { ata_exec_internal_sg() 1575 spin_unlock_irqrestore(ap->lock, flags); ata_exec_internal_sg() 1586 if (ap->ops->error_handler) ata_exec_internal_sg() 1591 qc = __ata_qc_from_tag(ap, tag); ata_exec_internal_sg() 1595 qc->ap = ap; ata_exec_internal_sg() 1601 preempted_qc_active = ap->qc_active; ata_exec_internal_sg() 1602 preempted_nr_active_links = ap->nr_active_links; ata_exec_internal_sg() 1605 ap->qc_active = 0; ata_exec_internal_sg() 1606 ap->nr_active_links = 0; ata_exec_internal_sg() 1636 spin_unlock_irqrestore(ap->lock, flags); ata_exec_internal_sg() 1647 if (ap->ops->error_handler) ata_exec_internal_sg() 1648 ata_eh_release(ap); ata_exec_internal_sg() 1652 if (ap->ops->error_handler) ata_exec_internal_sg() 1653 ata_eh_acquire(ap); ata_exec_internal_sg() 1655 ata_sff_flush_pio_task(ap); ata_exec_internal_sg() 1658 spin_lock_irqsave(ap->lock, flags); ata_exec_internal_sg() 1668 if (ap->ops->error_handler) ata_exec_internal_sg() 1669 ata_port_freeze(ap); ata_exec_internal_sg() 1673 if (ata_msg_warn(ap)) ata_exec_internal_sg() 1678 spin_unlock_irqrestore(ap->lock, flags); ata_exec_internal_sg() 1682 if (ap->ops->post_internal_cmd) ata_exec_internal_sg() 1683 ap->ops->post_internal_cmd(qc); ata_exec_internal_sg() 1698 spin_lock_irqsave(ap->lock, flags); ata_exec_internal_sg() 1706 ap->qc_active = preempted_qc_active; ata_exec_internal_sg() 1707 ap->nr_active_links = preempted_nr_active_links; ata_exec_internal_sg() 1709 spin_unlock_irqrestore(ap->lock, flags); ata_exec_internal_sg() 1768 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) ata_pio_need_iordy() 1773 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) ata_pio_need_iordy() 1852 struct ata_port *ap = dev->link->ap; ata_dev_read_id() local 1861 if (ata_msg_ctl(ap)) ata_dev_read_id() 1895 if (ap->ops->read_id) ata_dev_read_id() 1896 err_mask = ap->ops->read_id(dev, &tf, id); ata_dev_read_id() 1966 if (ap->host->flags & ATA_HOST_IGNORE_ATA && ata_dev_read_id() 2032 if (ata_msg_warn(ap)) ata_dev_read_id() 2073 struct ata_port *ap = dev->link->ap; ata_dev_knobble() local 2078 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); ata_dev_knobble() 2084 struct ata_port *ap = dev->link->ap; ata_dev_config_ncq() local 2097 if (ap->flags & ATA_FLAG_NCQ) { ata_dev_config_ncq() 2098 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); ata_dev_config_ncq() 2103 (ap->flags & ATA_FLAG_FPDMA_AA) && ata_dev_config_ncq() 2125 if ((ap->flags & ATA_FLAG_FPDMA_AUX) && ata_dev_config_ncq() 2128 0, ap->sector_buf, 1); ata_dev_config_ncq() 2137 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); ata_dev_config_ncq() 2165 struct ata_port *ap = dev->link->ap; ata_dev_configure() local 2176 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { ata_dev_configure() 2181 if (ata_msg_probe(ap)) ata_dev_configure() 2194 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && ata_dev_configure() 2214 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; ata_dev_configure() 2228 if (ata_msg_probe(ap)) ata_dev_configure() 2253 if (ata_msg_probe(ap)) ata_dev_configure() 2312 if (ata_msg_drv(ap) && print_info) { ata_dev_configure() 2337 if (ata_msg_drv(ap) && print_info) { ata_dev_configure() 2353 u8 *sata_setting = ap->sector_buf; ata_dev_configure() 2385 if (ata_msg_warn(ap)) ata_dev_configure() 2398 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && ata_dev_configure() 2399 (!sata_pmp_attached(ap) || ata_dev_configure() 2400 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { ata_dev_configure() 2430 if (ata_msg_drv(ap) && print_info) ata_dev_configure() 2447 if (ata_msg_drv(ap) && print_info) ata_dev_configure() 2470 if (ap->ops->dev_config) ata_dev_configure() 2471 ap->ops->dev_config(dev); ata_dev_configure() 2496 if (ata_msg_probe(ap)) ata_dev_configure() 2503 * @ap: port 2509 int ata_cable_40wire(struct ata_port *ap) ata_cable_40wire() argument 2516 * @ap: port 2522 int ata_cable_80wire(struct ata_port *ap) ata_cable_80wire() argument 2529 * @ap: port 2534 int ata_cable_unknown(struct ata_port *ap) ata_cable_unknown() argument 2541 * @ap: port 2546 int ata_cable_ignore(struct ata_port *ap) ata_cable_ignore() argument 2553 * @ap: port 2558 int ata_cable_sata(struct ata_port *ap) ata_cable_sata() argument 2565 * @ap: Bus to probe 2578 int ata_bus_probe(struct ata_port *ap) ata_bus_probe() argument 2585 ata_for_each_dev(dev, &ap->link, ALL) ata_bus_probe() 2589 ata_for_each_dev(dev, &ap->link, ALL) { ata_bus_probe() 2605 if (ap->ops->set_piomode) ata_bus_probe() 2606 ap->ops->set_piomode(ap, dev); ata_bus_probe() 2610 ap->ops->phy_reset(ap); ata_bus_probe() 2612 ata_for_each_dev(dev, &ap->link, ALL) { ata_bus_probe() 2625 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { ata_bus_probe() 2639 if (ap->ops->cable_detect) ata_bus_probe() 2640 ap->cbl = ap->ops->cable_detect(ap); ata_bus_probe() 2647 ata_for_each_dev(dev, &ap->link, ENABLED) ata_bus_probe() 2649 ap->cbl = ATA_CBL_SATA; ata_bus_probe() 2654 ata_for_each_dev(dev, &ap->link, ENABLED) { ata_bus_probe() 2655 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; ata_bus_probe() 2657 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; ata_bus_probe() 2663 rc = ata_set_mode(&ap->link, &dev); ata_bus_probe() 2667 ata_for_each_dev(dev, &ap->link, ENABLED) ata_bus_probe() 2689 sata_down_spd_limit(&ap->link, 0); ata_bus_probe() 2820 struct ata_link *host_link = &link->ap->link; __sata_set_spd_needed() 3205 struct ata_port *ap = dev->link->ap; ata_dev_set_mode() local 3217 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) ata_dev_set_mode() 3303 struct ata_port *ap = link->ap; ata_do_set_mode() local 3349 if (ap->ops->set_piomode) ata_for_each_dev() 3350 ap->ops->set_piomode(ap, dev); ata_for_each_dev() 3360 if (ap->ops->set_dmamode) ata_for_each_dev() 3361 ap->ops->set_dmamode(ap, dev); ata_for_each_dev() 3374 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3375 ap->host->simplex_claimed = ap; 3411 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) ata_wait_ready() 3420 WARN_ON(link == link->ap->slave_link); ata_wait_ready() 3447 else if ((link->ap->flags & ATA_FLAG_SATA) && ata_wait_ready() 3466 ata_msleep(link->ap, 50); ata_wait_ready() 3487 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); ata_wait_after_reset() 3535 ata_msleep(link->ap, interval); sata_link_debounce() 3600 ata_msleep(link->ap, 200); sata_link_resume() 3718 struct ata_port *ap = link->ap; ata_std_prereset() local 3728 if (ap->flags & ATA_FLAG_SATA) { ata_std_prereset() 3809 ata_msleep(link->ap, 1); sata_link_hardreset() 3823 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { sata_link_hardreset() 3976 u16 *id = (void *)dev->link->ap->sector_buf; ata_dev_reread_id() 4313 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && ata_dma_blacklisted() 4336 * @ap: port to consider 4340 * there is a good case for setting ap->cbl to the result when 4347 static int cable_is_40wire(struct ata_port *ap) cable_is_40wire() argument 4353 if (ap->cbl == ATA_CBL_PATA40) cable_is_40wire() 4357 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) cable_is_40wire() 4364 if (ap->cbl == ATA_CBL_PATA40_SHORT) cable_is_40wire() 4376 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 4400 struct ata_port *ap = link->ap; ata_dev_xfermask() local 4401 struct ata_host *host = ap->host; ata_dev_xfermask() 4405 xfer_mask = ata_pack_xfermask(ap->pio_mask, ata_dev_xfermask() 4406 ap->mwdma_mask, ap->udma_mask); ata_dev_xfermask() 4431 host->simplex_claimed && host->simplex_claimed != ap) { ata_dev_xfermask() 4437 if (ap->flags & ATA_FLAG_NO_IORDY) ata_dev_xfermask() 4440 if (ap->ops->mode_filter) ata_dev_xfermask() 4441 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); ata_dev_xfermask() 4453 if (cable_is_40wire(ap)) { ata_dev_xfermask() 4468 * on port @ap. 4516 * on port @ap with sector count 4600 struct ata_port *ap = qc->ap; ata_sg_clean() local 4609 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); ata_sg_clean() 4631 struct ata_port *ap = qc->ap; atapi_check_dma() local 4640 if (ap->ops->check_atapi_dma) atapi_check_dma() 4641 return ap->ops->check_atapi_dma(qc); atapi_check_dma() 4714 struct ata_port *ap = qc->ap; ata_sg_setup() local 4717 VPRINTK("ENTER, ata%u\n", ap->print_id); ata_sg_setup() 4719 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); ata_sg_setup() 4763 struct ata_port *ap = dev->link->ap; ata_qc_new_init() local 4767 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) ata_qc_new_init() 4771 if (ap->flags & ATA_FLAG_SAS_HOST) { ata_qc_new_init() 4772 tag = ata_sas_allocate_tag(ap); ata_qc_new_init() 4777 qc = __ata_qc_from_tag(ap, tag); ata_qc_new_init() 4780 qc->ap = ap; ata_qc_new_init() 4800 struct ata_port *ap; ata_qc_free() local 4804 ap = qc->ap; ata_qc_free() 4810 if (ap->flags & ATA_FLAG_SAS_HOST) ata_qc_free() 4811 ata_sas_free_tag(tag, ap); ata_qc_free() 4817 struct ata_port *ap; __ata_qc_complete() local 4822 ap = qc->ap; __ata_qc_complete() 4832 ap->nr_active_links--; __ata_qc_complete() 4835 ap->nr_active_links--; __ata_qc_complete() 4840 ap->excl_link == link)) __ata_qc_complete() 4841 ap->excl_link = NULL; __ata_qc_complete() 4848 ap->qc_active &= ~(1 << qc->tag); __ata_qc_complete() 4856 struct ata_port *ap = qc->ap; fill_result_tf() local 4859 ap->ops->qc_fill_rtf(qc); fill_result_tf() 4892 struct ata_port *ap = qc->ap; ata_qc_complete() local 4907 if (ap->ops->error_handler) { ata_qc_complete() 4936 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); ata_qc_complete() 4956 ata_port_schedule_eh(ap); ata_qc_complete() 4982 * @ap: port in question 4987 * requests normally. ap->qc_active and @qc_active is compared 5000 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) ata_qc_complete_multiple() argument 5005 done_mask = ap->qc_active ^ qc_active; ata_qc_complete_multiple() 5008 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", ata_qc_complete_multiple() 5009 ap->qc_active, qc_active); ata_qc_complete_multiple() 5017 qc = ata_qc_from_tag(ap, tag); ata_qc_complete_multiple() 5042 struct ata_port *ap = qc->ap; ata_qc_issue() local 5050 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); ata_qc_issue() 5056 ap->nr_active_links++; ata_qc_issue() 5061 ap->nr_active_links++; ata_qc_issue() 5066 ap->qc_active |= 1 << qc->tag; ata_qc_issue() 5077 (ap->flags & ATA_FLAG_PIO_DMA))) ata_qc_issue() 5089 ap->ops->qc_prep(qc); ata_qc_issue() 5091 qc->err_mask |= ap->ops->qc_issue(qc); ata_qc_issue() 5116 struct ata_port *ap = link->ap; sata_scr_valid() local 5118 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; sata_scr_valid() 5128 * guaranteed to succeed if @link is ap->link, the cable type of 5132 * None if @link is ap->link. Kernel thread context otherwise. 5141 return link->ap->ops->scr_read(link, reg, val); sata_scr_read() 5155 * guaranteed to succeed if @link is ap->link, the cable type of 5159 * None if @link is ap->link. Kernel thread context otherwise. 5168 return link->ap->ops->scr_write(link, reg, val); sata_scr_write() 5185 * None if @link is ap->link. Kernel thread context otherwise. 5196 rc = link->ap->ops->scr_write(link, reg, val); sata_scr_write_flush() 5198 rc = link->ap->ops->scr_read(link, reg, &val); sata_scr_write_flush() 5273 struct ata_link *slave = link->ap->slave_link; ata_link_online() 5299 struct ata_link *slave = link->ap->slave_link; ata_link_offline() 5308 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, ata_port_request_pm() argument 5318 if (ap->pflags & ATA_PFLAG_PM_PENDING) { ata_port_request_pm() 5319 ata_port_wait_eh(ap); ata_port_request_pm() 5320 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); ata_port_request_pm() 5324 spin_lock_irqsave(ap->lock, flags); ata_port_request_pm() 5326 ap->pm_mesg = mesg; ata_port_request_pm() 5327 ap->pflags |= ATA_PFLAG_PM_PENDING; ata_for_each_link() 5328 ata_for_each_link(link, ap, HOST_FIRST) { ata_for_each_link() 5333 ata_port_schedule_eh(ap); 5335 spin_unlock_irqrestore(ap->lock, flags); 5338 ata_port_wait_eh(ap); 5339 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5354 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) ata_port_suspend() argument 5356 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); ata_port_suspend() 5359 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) ata_port_suspend_async() argument 5361 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); ata_port_suspend_async() 5366 struct ata_port *ap = to_ata_port(dev); ata_port_pm_suspend() local 5371 ata_port_suspend(ap, PMSG_SUSPEND); ata_port_pm_suspend() 5377 struct ata_port *ap = to_ata_port(dev); ata_port_pm_freeze() local 5382 ata_port_suspend(ap, PMSG_FREEZE); ata_port_pm_freeze() 5395 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) ata_port_resume() argument 5397 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); ata_port_resume() 5400 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) ata_port_resume_async() argument 5402 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); ata_port_resume_async() 5424 struct ata_port *ap = to_ata_port(dev); ata_port_runtime_idle() local 5428 ata_for_each_link(link, ap, HOST_FIRST) { ata_for_each_link() 5468 void ata_sas_port_suspend(struct ata_port *ap) ata_sas_port_suspend() argument 5470 ata_port_suspend_async(ap, PMSG_SUSPEND); ata_sas_port_suspend() 5474 void ata_sas_port_resume(struct ata_port *ap) ata_sas_port_resume() argument 5476 ata_port_resume_async(ap, PMSG_RESUME); ata_sas_port_resume() 5524 struct ata_port *ap = link->ap; ata_dev_init() local 5535 spin_lock_irqsave(ap->lock, flags); ata_dev_init() 5538 spin_unlock_irqrestore(ap->lock, flags); ata_dev_init() 5549 * @ap: ATA port link is attached to 5558 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) ata_link_init() argument 5566 link->ap = ap; ata_link_init() 5571 /* can't use iterator, ap isn't initialized yet */ ata_link_init() 5631 struct ata_port *ap; ata_port_alloc() local 5635 ap = kzalloc(sizeof(*ap), GFP_KERNEL); ata_port_alloc() 5636 if (!ap) ata_port_alloc() 5639 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; ata_port_alloc() 5640 ap->lock = &host->lock; ata_port_alloc() 5641 ap->print_id = -1; ata_port_alloc() 5642 ap->local_port_no = -1; ata_port_alloc() 5643 ap->host = host; ata_port_alloc() 5644 ap->dev = host->dev; ata_port_alloc() 5648 ap->msg_enable = 0x00FF; ata_port_alloc() 5650 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; ata_port_alloc() 5652 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; ata_port_alloc() 5655 mutex_init(&ap->scsi_scan_mutex); ata_port_alloc() 5656 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); ata_port_alloc() 5657 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); ata_port_alloc() 5658 INIT_LIST_HEAD(&ap->eh_done_q); ata_port_alloc() 5659 init_waitqueue_head(&ap->eh_wait_q); ata_port_alloc() 5660 init_completion(&ap->park_req_pending); ata_port_alloc() 5661 init_timer_deferrable(&ap->fastdrain_timer); ata_port_alloc() 5662 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; ata_port_alloc() 5663 ap->fastdrain_timer.data = (unsigned long)ap; ata_port_alloc() 5665 ap->cbl = ATA_CBL_NONE; ata_port_alloc() 5667 ata_link_init(ap, &ap->link, 0); ata_port_alloc() 5670 ap->stats.unhandled_irq = 1; ata_port_alloc() 5671 ap->stats.idle_irq = 1; ata_port_alloc() 5673 ata_sff_port_init(ap); ata_port_alloc() 5675 return ap; ata_port_alloc() 5684 struct ata_port *ap = host->ports[i]; ata_host_release() local 5686 if (!ap) ata_host_release() 5689 if (ap->scsi_host) ata_host_release() 5690 scsi_host_put(ap->scsi_host); ata_host_release() 5692 kfree(ap->pmp_link); ata_host_release() 5693 kfree(ap->slave_link); ata_host_release() 5694 kfree(ap); ata_host_release() 5749 struct ata_port *ap; ata_host_alloc() local 5751 ap = ata_port_alloc(host); ata_host_alloc() 5752 if (!ap) ata_host_alloc() 5755 ap->port_no = i; ata_host_alloc() 5756 host->ports[i] = ap; ata_host_alloc() 5796 struct ata_port *ap = host->ports[i]; ata_host_alloc_pinfo() local 5801 ap->pio_mask = pi->pio_mask; ata_host_alloc_pinfo() 5802 ap->mwdma_mask = pi->mwdma_mask; ata_host_alloc_pinfo() 5803 ap->udma_mask = pi->udma_mask; ata_host_alloc_pinfo() 5804 ap->flags |= pi->flags; ata_host_alloc_pinfo() 5805 ap->link.flags |= pi->link_flags; ata_host_alloc_pinfo() 5806 ap->ops = pi->port_ops; ata_host_alloc_pinfo() 5817 * @ap: port to initialize slave link for 5819 * Create and initialize slave link for @ap. This enables slave 5842 * handling, separate @ap->slave_link is used. All dirty details 5861 int ata_slave_link_init(struct ata_port *ap) ata_slave_link_init() argument 5865 WARN_ON(ap->slave_link); ata_slave_link_init() 5866 WARN_ON(ap->flags & ATA_FLAG_PMP); ata_slave_link_init() 5872 ata_link_init(ap, link, 1); ata_slave_link_init() 5873 ap->slave_link = link; ata_slave_link_init() 5885 struct ata_port *ap = host->ports[i]; ata_host_stop() local 5887 if (ap->ops->port_stop) ata_host_stop() 5888 ap->ops->port_stop(ap); ata_host_stop() 5973 struct ata_port *ap = host->ports[i]; ata_host_start() local 5975 ata_finalize_port_ops(ap->ops); ata_host_start() 5977 if (!host->ops && !ata_port_is_dummy(ap)) ata_host_start() 5978 host->ops = ap->ops; ata_host_start() 5980 if (ap->ops->port_stop) ata_host_start() 5994 struct ata_port *ap = host->ports[i]; ata_host_start() local 5996 if (ap->ops->port_start) { ata_host_start() 5997 rc = ap->ops->port_start(ap); ata_host_start() 6006 ata_eh_freeze_port(ap); ata_host_start() 6016 struct ata_port *ap = host->ports[i]; ata_host_start() local 6018 if (ap->ops->port_stop) ata_host_start() 6019 ap->ops->port_stop(ap); ata_host_start() 6042 void __ata_port_probe(struct ata_port *ap) __ata_port_probe() argument 6044 struct ata_eh_info *ehi = &ap->link.eh_info; __ata_port_probe() 6048 spin_lock_irqsave(ap->lock, flags); __ata_port_probe() 6054 ap->pflags &= ~ATA_PFLAG_INITIALIZING; __ata_port_probe() 6055 ap->pflags |= ATA_PFLAG_LOADING; __ata_port_probe() 6056 ata_port_schedule_eh(ap); __ata_port_probe() 6058 spin_unlock_irqrestore(ap->lock, flags); __ata_port_probe() 6061 int ata_port_probe(struct ata_port *ap) ata_port_probe() argument 6065 if (ap->ops->error_handler) { ata_port_probe() 6066 __ata_port_probe(ap); ata_port_probe() 6067 ata_port_wait_eh(ap); ata_port_probe() 6069 DPRINTK("ata%u: bus probe begin\n", ap->print_id); ata_port_probe() 6070 rc = ata_bus_probe(ap); ata_port_probe() 6071 DPRINTK("ata%u: bus probe end\n", ap->print_id); ata_port_probe() 6079 struct ata_port *ap = data; async_port_probe() local 6088 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) async_port_probe() 6091 (void)ata_port_probe(ap); async_port_probe() 6096 ata_scsi_scan_host(ap, 1); async_port_probe() 6155 struct ata_port *ap = host->ports[i]; ata_host_register() local 6159 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) ata_host_register() 6160 ap->cbl = ATA_CBL_SATA; ata_host_register() 6163 sata_link_init_spd(&ap->link); ata_host_register() 6164 if (ap->slave_link) ata_host_register() 6165 sata_link_init_spd(ap->slave_link); ata_host_register() 6168 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, ata_host_register() 6169 ap->udma_mask); ata_host_register() 6171 if (!ata_port_is_dummy(ap)) { ata_host_register() 6172 ata_port_info(ap, "%cATA max %s %s\n", ata_host_register() 6173 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', ata_host_register() 6175 ap->link.eh_info.desc); ata_host_register() 6176 ata_ehi_clear_desc(&ap->link.eh_info); ata_host_register() 6178 ata_port_info(ap, "DUMMY\n"); ata_host_register() 6183 struct ata_port *ap = host->ports[i]; ata_host_register() local 6184 async_schedule(async_port_probe, ap); ata_host_register() 6254 * @ap: ATA port to be detached 6256 * Detach all ATA devices and the associated SCSI devices of @ap; 6257 * then, remove the associated SCSI host. @ap is guaranteed to 6263 static void ata_port_detach(struct ata_port *ap) ata_port_detach() argument 6269 if (!ap->ops->error_handler) ata_port_detach() 6273 spin_lock_irqsave(ap->lock, flags); ata_port_detach() 6274 ap->pflags |= ATA_PFLAG_UNLOADING; ata_port_detach() 6275 ata_port_schedule_eh(ap); ata_port_detach() 6276 spin_unlock_irqrestore(ap->lock, flags); ata_port_detach() 6279 ata_port_wait_eh(ap); ata_port_detach() 6282 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); ata_port_detach() 6284 cancel_delayed_work_sync(&ap->hotplug_task); ata_port_detach() 6288 ata_for_each_link(link, ap, HOST_FIRST) { ata_for_each_dev() 6294 if (ap->pmp_link) { 6297 ata_tlink_delete(&ap->pmp_link[i]); 6300 scsi_remove_host(ap->scsi_host); 6301 ata_tport_delete(ap); 6683 * @ap: ATA port to attribute the sleep to 6686 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6689 * @ap->host will be allowed to own the EH while this task is 6695 void ata_msleep(struct ata_port *ap, unsigned int msecs) ata_msleep() argument 6697 bool owns_eh = ap && ap->host->eh_owner == current; ata_msleep() 6700 ata_eh_release(ap); ata_msleep() 6705 ata_eh_acquire(ap); ata_msleep() 6710 * @ap: ATA port to wait register for, can be NULL 6732 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, ata_wait_register() argument 6747 ata_msleep(ap, interval); ata_wait_register() 6794 static void ata_dummy_error_handler(struct ata_port *ap) ata_dummy_error_handler() argument 6814 void ata_port_printk(const struct ata_port *ap, const char *level, ata_port_printk() argument 6825 printk("%sata%u: %pV", level, ap->print_id, &vaf); ata_port_printk() 6842 if (sata_pmp_attached(link->ap) || link->ap->slave_link) ata_link_printk() 6844 level, link->ap->print_id, link->pmp, &vaf); ata_link_printk() 6847 level, link->ap->print_id, &vaf); ata_link_printk() 6865 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, ata_dev_printk()
|
H A D | pata_hpt3x3.c | 29 * @ap: ATA interface 37 static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) hpt3x3_set_piomode() argument 39 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt3x3_set_piomode() 41 int dn = 2 * ap->port_no + adev->devno; hpt3x3_set_piomode() 57 * @ap: ATA interface 67 static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) hpt3x3_set_dmamode() argument 69 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt3x3_set_dmamode() 71 int dn = 2 * ap->port_no + adev->devno; hpt3x3_set_dmamode() 92 * @ap: port to freeze 98 static void hpt3x3_freeze(struct ata_port *ap) hpt3x3_freeze() argument 100 void __iomem *mmio = ap->ioaddr.bmdma_addr; hpt3x3_freeze() 104 ata_sff_dma_pause(ap); hpt3x3_freeze() 105 ata_sff_freeze(ap); hpt3x3_freeze() 118 struct ata_port *ap = qc->ap; hpt3x3_bmdma_setup() local 119 u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); hpt3x3_bmdma_setup() 121 iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); hpt3x3_bmdma_setup() 234 struct ata_port *ap = host->ports[i]; hpt3x3_init_one() local 235 struct ata_ioports *ioaddr = &ap->ioaddr; hpt3x3_init_one() 244 ata_port_pbar_desc(ap, 4, -1, "ioport"); hpt3x3_init_one() 245 ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); hpt3x3_init_one()
|
H A D | pata_optidma.c | 56 struct ata_port *ap = link->ap; optidma_pre_reset() local 57 struct pci_dev *pdev = to_pci_dev(ap->host->dev); optidma_pre_reset() 62 if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) optidma_pre_reset() 70 * @ap: ATA port 76 static void optidma_unlock(struct ata_port *ap) optidma_unlock() argument 78 void __iomem *regio = ap->ioaddr.cmd_addr; optidma_unlock() 88 * @ap: ATA port 93 static void optidma_lock(struct ata_port *ap) optidma_lock() argument 95 void __iomem *regio = ap->ioaddr.cmd_addr; optidma_lock() 103 * @ap: ATA interface 115 static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode) optidma_mode_setup() argument 120 void __iomem *regio = ap->ioaddr.cmd_addr; optidma_mode_setup() 138 optidma_unlock(ap); optidma_mode_setup() 181 optidma_lock(ap); optidma_mode_setup() 190 * @ap: ATA port 200 static void optiplus_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode) optiplus_mode_setup() argument 202 struct pci_dev *pdev = to_pci_dev(ap->host->dev); optiplus_mode_setup() 206 int unit = 2 * ap->port_no + adev->devno; optiplus_mode_setup() 212 optidma_mode_setup(ap, adev, adev->dma_mode); optiplus_mode_setup() 215 if (ap->port_no) { optiplus_mode_setup() 230 * @ap: ATA port 238 static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev) optidma_set_pio_mode() argument 240 optidma_mode_setup(ap, adev, adev->pio_mode); optidma_set_pio_mode() 245 * @ap: ATA port 253 static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev) optidma_set_dma_mode() argument 255 optidma_mode_setup(ap, adev, adev->dma_mode); optidma_set_dma_mode() 260 * @ap: ATA port 268 static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev) optiplus_set_pio_mode() argument 270 optiplus_mode_setup(ap, adev, adev->pio_mode); optiplus_set_pio_mode() 275 * @ap: ATA port 283 static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev) optiplus_set_dma_mode() argument 285 optiplus_mode_setup(ap, adev, adev->dma_mode); optiplus_set_dma_mode() 319 struct ata_port *ap = link->ap; optidma_set_mode() local 321 int nybble = 4 * ap->port_no; optidma_set_mode() 322 struct pci_dev *pdev = to_pci_dev(ap->host->dev); optidma_set_mode()
|
H A D | pata_legacy.c | 267 static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) pdc20230_set_piomode() argument 310 struct ata_port *ap = dev->link->ap; pdc_data_xfer_vlb() local 314 && (ap->pflags & ATA_PFLAG_PIO32)) { pdc_data_xfer_vlb() 320 ioread8(ap->ioaddr.nsect_addr); pdc_data_xfer_vlb() 321 ioread8(ap->ioaddr.nsect_addr); pdc_data_xfer_vlb() 322 ioread8(ap->ioaddr.nsect_addr); pdc_data_xfer_vlb() 326 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); pdc_data_xfer_vlb() 328 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); pdc_data_xfer_vlb() 333 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); pdc_data_xfer_vlb() 337 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); pdc_data_xfer_vlb() 361 static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) ht6560a_set_piomode() argument 377 iowrite8(recover << 4 | active, ap->ioaddr.device_addr); ht6560a_set_piomode() 378 ioread8(ap->ioaddr.status_addr); ht6560a_set_piomode() 395 static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) ht6560b_set_piomode() argument 411 iowrite8(recover << 4 | active, ap->ioaddr.device_addr); ht6560b_set_piomode() 420 ioread8(ap->ioaddr.status_addr); ht6560b_set_piomode() 458 static void opti82c611a_set_piomode(struct ata_port *ap, opti82c611a_set_piomode() argument 469 ioread16(ap->ioaddr.error_addr); opti82c611a_set_piomode() 470 ioread16(ap->ioaddr.error_addr); opti82c611a_set_piomode() 471 iowrite8(3, ap->ioaddr.nsect_addr); opti82c611a_set_piomode() 474 clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03]; opti82c611a_set_piomode() 492 rc = ioread8(ap->ioaddr.lbal_addr); opti82c611a_set_piomode() 495 iowrite8(rc, ap->ioaddr.lbal_addr); opti82c611a_set_piomode() 498 iowrite8(active << 4 | recover, ap->ioaddr.error_addr); opti82c611a_set_piomode() 502 rc = ioread8(ap->ioaddr.device_addr); opti82c611a_set_piomode() 506 iowrite8(rc, ap->ioaddr.device_addr); opti82c611a_set_piomode() 509 iowrite8(active << 4 | recover, ap->ioaddr.data_addr); opti82c611a_set_piomode() 512 rc = ioread8(ap->ioaddr.lbal_addr); opti82c611a_set_piomode() 515 iowrite8(rc, ap->ioaddr.lbal_addr); opti82c611a_set_piomode() 518 iowrite8(0x83, ap->ioaddr.nsect_addr); opti82c611a_set_piomode() 534 static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev) opti82c46x_set_piomode() argument 548 ioread16(ap->ioaddr.error_addr); opti82c46x_set_piomode() 549 ioread16(ap->ioaddr.error_addr); opti82c46x_set_piomode() 550 iowrite8(3, ap->ioaddr.nsect_addr); opti82c46x_set_piomode() 571 rc = ioread8(ap->ioaddr.lbal_addr); opti82c46x_set_piomode() 574 iowrite8(rc, ap->ioaddr.lbal_addr); opti82c46x_set_piomode() 577 iowrite8(active << 4 | recover, ap->ioaddr.error_addr); opti82c46x_set_piomode() 581 rc = ioread8(ap->ioaddr.device_addr); opti82c46x_set_piomode() 585 iowrite8(rc, ap->ioaddr.device_addr); opti82c46x_set_piomode() 588 iowrite8(active << 4 | recover, ap->ioaddr.data_addr); opti82c46x_set_piomode() 591 rc = ioread8(ap->ioaddr.lbal_addr); opti82c46x_set_piomode() 594 iowrite8(rc, ap->ioaddr.lbal_addr); opti82c46x_set_piomode() 597 iowrite8(0x83, ap->ioaddr.nsect_addr); opti82c46x_set_piomode() 600 ap->host->private_data = ap; opti82c46x_set_piomode() 620 struct ata_port *ap = qc->ap; opti82c46x_qc_issue() local 625 if (ap->host->private_data != ap->host opti82c46x_qc_issue() 626 && ap->host->private_data != NULL) opti82c46x_qc_issue() 627 opti82c46x_set_piomode(ap, adev); opti82c46x_qc_issue() 640 * @ap: Port 651 static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev) qdi65x0_set_piomode() argument 654 struct legacy_data *ld_qdi = ap->host->private_data; qdi65x0_set_piomode() 674 outb(timing, ld_qdi->timing + 2 * ap->port_no); qdi65x0_set_piomode() 691 struct ata_port *ap = qc->ap; qdi_qc_issue() local 693 struct legacy_data *ld_qdi = ap->host->private_data; qdi_qc_issue() 699 2 * ap->port_no); qdi_qc_issue() 708 struct ata_port *ap = adev->link->ap; vlb32_data_xfer() local 712 && (ap->pflags & ATA_PFLAG_PIO32)) { vlb32_data_xfer() 714 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); vlb32_data_xfer() 716 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); vlb32_data_xfer() 722 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); vlb32_data_xfer() 724 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); vlb32_data_xfer() 786 static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev) winbond_set_piomode() argument 789 struct legacy_data *ld_winbond = ap->host->private_data; winbond_set_piomode() 792 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2); winbond_set_piomode() 952 struct ata_port *ap; legacy_init_one() local 983 ap = host->ports[0]; legacy_init_one() 985 ap->ops = ops; legacy_init_one() 986 ap->pio_mask = pio_modes; legacy_init_one() 987 ap->flags |= ATA_FLAG_SLAVE_POSS | iordy; legacy_init_one() 988 ap->pflags |= controller->pflags; legacy_init_one() 989 ap->ioaddr.cmd_addr = io_addr; legacy_init_one() 990 ap->ioaddr.altstatus_addr = ctrl_addr; legacy_init_one() 991 ap->ioaddr.ctl_addr = ctrl_addr; legacy_init_one() 992 ata_sff_std_ports(&ap->ioaddr); legacy_init_one() 993 ap->host->private_data = ld; legacy_init_one() 995 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206); legacy_init_one() 1007 ata_for_each_dev(dev, &ap->link, ALL) { legacy_init_one()
|
H A D | sata_inic162x.c | 258 static void __iomem *inic_port_base(struct ata_port *ap) inic_port_base() argument 260 struct inic_host_priv *hpriv = ap->host->private_data; inic_port_base() 262 return hpriv->mmio_base + ap->port_no * PORT_SIZE; inic_port_base() 287 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; inic_scr_read() 302 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; inic_scr_write() 311 static void inic_stop_idma(struct ata_port *ap) inic_stop_idma() argument 313 void __iomem *port_base = inic_port_base(ap); inic_stop_idma() 320 static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) inic_host_err_intr() argument 322 struct ata_eh_info *ehi = &ap->link.eh_info; inic_host_err_intr() 323 struct inic_port_priv *pp = ap->private_data; inic_host_err_intr() 331 inic_stop_idma(ap); inic_host_err_intr() 371 ata_port_freeze(ap); inic_host_err_intr() 373 ata_port_abort(ap); inic_host_err_intr() 376 static void inic_host_intr(struct ata_port *ap) inic_host_intr() argument 378 void __iomem *port_base = inic_port_base(ap); inic_host_intr() 379 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); inic_host_intr() 389 inic_host_err_intr(ap, irq_stat, idma_stat); inic_host_intr() 395 inic_stop_idma(ap); inic_host_intr() 409 ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", inic_host_intr() 477 struct inic_port_priv *pp = qc->ap->private_data; inic_qc_prep() 539 struct ata_port *ap = qc->ap; inic_qc_issue() local 540 void __iomem *port_base = inic_port_base(ap); inic_qc_issue() 550 static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) inic_tf_read() argument 552 void __iomem *port_base = inic_port_base(ap); inic_tf_read() 575 inic_tf_read(qc->ap, &tf); inic_qc_fill_rtf() 585 static void inic_freeze(struct ata_port *ap) inic_freeze() argument 587 void __iomem *port_base = inic_port_base(ap); inic_freeze() 593 static void inic_thaw(struct ata_port *ap) inic_thaw() argument 595 void __iomem *port_base = inic_port_base(ap); inic_thaw() 603 void __iomem *port_base = inic_port_base(link->ap); inic_check_ready() 615 struct ata_port *ap = link->ap; inic_hardreset() local 616 void __iomem *port_base = inic_port_base(ap); inic_hardreset() 626 ata_msleep(ap, 1); inic_hardreset() 651 inic_tf_read(ap, &tf); inic_hardreset() 658 static void inic_error_handler(struct ata_port *ap) inic_error_handler() argument 660 void __iomem *port_base = inic_port_base(ap); inic_error_handler() 663 ata_std_error_handler(ap); inic_error_handler() 670 inic_reset_port(inic_port_base(qc->ap)); inic_post_internal_cmd() 673 static void init_port(struct ata_port *ap) init_port() argument 675 void __iomem *port_base = inic_port_base(ap); init_port() 676 struct inic_port_priv *pp = ap->private_data; init_port() 686 static int inic_port_resume(struct ata_port *ap) inic_port_resume() argument 688 init_port(ap); inic_port_resume() 692 static int inic_port_start(struct ata_port *ap) inic_port_start() argument 694 struct device *dev = ap->host->dev; inic_port_start() 701 ap->private_data = pp; inic_port_start() 714 init_port(ap); inic_port_start() 852 struct ata_port *ap = host->ports[i]; inic_init_one() local 854 ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); inic_init_one() 855 ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); inic_init_one()
|
H A D | pata_ixp4xx_cf.c | 49 struct ata_port *ap = dev->link->ap; ixp4xx_mmio_data_xfer() local 50 void __iomem *mmio = ap->ioaddr.data_addr; ixp4xx_mmio_data_xfer() 51 struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev); ixp4xx_mmio_data_xfer() 99 static void ixp4xx_setup_port(struct ata_port *ap, ixp4xx_setup_port() argument 103 struct ata_ioports *ioaddr = &ap->ioaddr; ixp4xx_setup_port() 137 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl); ixp4xx_setup_port() 145 struct ata_port *ap; ixp4xx_pata_probe() local 179 ap = host->ports[0]; ixp4xx_pata_probe() 181 ap->ops = &ixp4xx_port_ops; ixp4xx_pata_probe() 182 ap->pio_mask = ATA_PIO4; ixp4xx_pata_probe() 183 ap->flags |= ATA_FLAG_NO_ATAPI; ixp4xx_pata_probe() 185 ixp4xx_setup_port(ap, data, cs0->start, cs1->start); ixp4xx_pata_probe()
|
H A D | libata.h | 63 extern void ata_force_cbl(struct ata_port *ap); 100 extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); 106 extern int ata_port_probe(struct ata_port *ap); 107 extern void __ata_port_probe(struct ata_port *ap); 115 extern int ata_acpi_on_suspend(struct ata_port *ap); 116 extern void ata_acpi_on_resume(struct ata_port *ap); 119 extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state); 120 extern void ata_acpi_bind_port(struct ata_port *ap); 125 static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } ata_acpi_on_resume() argument 126 static inline void ata_acpi_on_resume(struct ata_port *ap) { } ata_acpi_on_devcfg() argument 129 static inline void ata_acpi_set_state(struct ata_port *ap, ata_acpi_set_state() argument 131 static inline void ata_acpi_bind_port(struct ata_port *ap) {} ata_acpi_bind_dev() argument 138 extern void ata_scsi_scan_host(struct ata_port *ap, int sync); ata_acpi_bind_dev() 144 extern int ata_bus_probe(struct ata_port *ap); ata_acpi_bind_dev() 147 int ata_sas_allocate_tag(struct ata_port *ap); ata_acpi_bind_dev() 148 void ata_sas_free_tag(unsigned int tag, struct ata_port *ap); ata_acpi_bind_dev() 154 extern void ata_eh_acquire(struct ata_port *ap); ata_acpi_bind_dev() 155 extern void ata_eh_release(struct ata_port *ap); ata_acpi_bind_dev() 168 extern void ata_eh_autopsy(struct ata_port *ap); ata_acpi_bind_dev() 170 extern void ata_eh_report(struct ata_port *ap); ata_acpi_bind_dev() 175 extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ata_acpi_bind_dev() 179 extern void ata_eh_finish(struct ata_port *ap); ata_acpi_bind_dev() 219 extern void ata_sff_flush_pio_task(struct ata_port *ap); 220 extern void ata_sff_port_init(struct ata_port *ap); 224 static inline void ata_sff_flush_pio_task(struct ata_port *ap) ata_sff_flush_pio_task() argument 226 static inline void ata_sff_port_init(struct ata_port *ap) ata_sff_port_init() argument
|
H A D | pata_icside.c | 187 static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev) pata_icside_set_dmamode() argument 189 struct pata_icside_state *state = ap->host->private_data; pata_icside_set_dmamode() 216 state->port[ap->port_no].speed[adev->devno] = cycle; pata_icside_set_dmamode() 221 struct ata_port *ap = qc->ap; pata_icside_bmdma_setup() local 222 struct pata_icside_state *state = ap->host->private_data; pata_icside_bmdma_setup() 234 writeb(state->port[ap->port_no].port_sel, state->ioc_base); pata_icside_bmdma_setup() 236 set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); pata_icside_bmdma_setup() 241 ap->ops->sff_exec_command(ap, &qc->tf); pata_icside_bmdma_setup() 246 struct ata_port *ap = qc->ap; pata_icside_bmdma_start() local 247 struct pata_icside_state *state = ap->host->private_data; pata_icside_bmdma_start() 255 struct ata_port *ap = qc->ap; pata_icside_bmdma_stop() local 256 struct pata_icside_state *state = ap->host->private_data; pata_icside_bmdma_stop() 261 ata_sff_dma_pause(ap); pata_icside_bmdma_stop() 264 static u8 pata_icside_bmdma_status(struct ata_port *ap) pata_icside_bmdma_status() argument 266 struct pata_icside_state *state = ap->host->private_data; pata_icside_bmdma_status() 269 irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 : pata_icside_bmdma_status() 303 struct ata_port *ap = link->ap; pata_icside_postreset() local 304 struct pata_icside_state *state = ap->host->private_data; pata_icside_postreset() 309 state->port[ap->port_no].disabled = 1; pata_icside_postreset() 318 (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1); pata_icside_postreset() 340 static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base, pata_icside_setup_ioaddr() argument 344 struct ata_ioports *ioaddr = &ap->ioaddr; pata_icside_setup_ioaddr() 362 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", pata_icside_setup_ioaddr() 367 ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base); pata_icside_setup_ioaddr() 462 struct ata_port *ap = host->ports[i]; pata_icside_add_ports() local 464 ap->pio_mask = ATA_PIO4; pata_icside_add_ports() 465 ap->mwdma_mask = info->mwdma_mask; pata_icside_add_ports() 466 ap->flags |= ATA_FLAG_SLAVE_POSS; pata_icside_add_ports() 467 ap->ops = &pata_icside_port_ops; pata_icside_add_ports() 469 pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); pata_icside_add_ports()
|
H A D | pata_it821x.c | 117 * @ap: ATA port 126 static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing) it821x_program() argument 128 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it821x_program() 129 struct it821x_dev *itdev = ap->private_data; it821x_program() 130 int channel = ap->port_no; it821x_program() 144 * @ap: ATA port 154 static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing) it821x_program_udma() argument 156 struct it821x_dev *itdev = ap->private_data; it821x_program_udma() 157 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it821x_program_udma() 158 int channel = ap->port_no; it821x_program_udma() 178 * @ap: ATA interface 185 static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev) it821x_clock_strategy() argument 187 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it821x_clock_strategy() 188 struct it821x_dev *itdev = ap->private_data; it821x_clock_strategy() 224 v &= ~(1 << (1 + ap->port_no)); it821x_clock_strategy() 225 v |= sel << (1 + ap->port_no); it821x_clock_strategy() 233 it821x_program_udma(ap, pair, itdev->udma[1-unit]); it821x_clock_strategy() 234 it821x_program(ap, pair, itdev->pio[1-unit]); it821x_clock_strategy() 241 it821x_program_udma(ap, adev, itdev->udma[unit]); it821x_clock_strategy() 242 it821x_program(ap, adev, itdev->pio[unit]); it821x_clock_strategy() 248 * @ap: ATA interface 255 static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev) it821x_passthru_set_piomode() argument 261 struct it821x_dev *itdev = ap->private_data; it821x_passthru_set_piomode() 269 it821x_clock_strategy(ap, adev); it821x_passthru_set_piomode() 270 it821x_program(ap, adev, itdev->pio[unit]); it821x_passthru_set_piomode() 275 * @ap: ATA interface 285 static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev) it821x_passthru_set_dmamode() argument 292 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it821x_passthru_set_dmamode() 293 struct it821x_dev *itdev = ap->private_data; it821x_passthru_set_dmamode() 294 int channel = ap->port_no; it821x_passthru_set_dmamode() 315 it821x_clock_strategy(ap, adev); it821x_passthru_set_dmamode() 316 it821x_program_udma(ap, adev, itdev->udma[unit]); it821x_passthru_set_dmamode() 332 it821x_clock_strategy(ap, adev); it821x_passthru_set_dmamode() 347 struct ata_port *ap = qc->ap; it821x_passthru_bmdma_start() local 349 struct it821x_dev *itdev = ap->private_data; it821x_passthru_bmdma_start() 353 it821x_program(ap, adev, itdev->mwdma[unit]); it821x_passthru_bmdma_start() 355 it821x_program_udma(ap, adev, itdev->udma[unit]); it821x_passthru_bmdma_start() 370 struct ata_port *ap = qc->ap; it821x_passthru_bmdma_stop() local 372 struct it821x_dev *itdev = ap->private_data; it821x_passthru_bmdma_stop() 377 it821x_program(ap, adev, itdev->pio[unit]); it821x_passthru_bmdma_stop() 383 * @ap: ATA port 389 static void it821x_passthru_dev_select(struct ata_port *ap, it821x_passthru_dev_select() argument 392 struct it821x_dev *itdev = ap->private_data; it821x_passthru_dev_select() 394 struct ata_device *adev = &ap->link.device[device]; it821x_passthru_dev_select() 395 it821x_program(ap, adev, itdev->pio[adev->devno]); it821x_passthru_dev_select() 398 ata_sff_dev_select(ap, device); it821x_passthru_dev_select() 449 it821x_passthru_dev_select(qc->ap, qc->dev->devno); it821x_passthru_qc_issue() 577 struct ata_port *ap = qc->ap; it821x_check_atapi_dma() local 578 struct it821x_dev *itdev = ap->private_data; it821x_check_atapi_dma() 647 * @ap: IT821x port to interrogate 656 static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) it821x_firmware_command() argument 667 ap->ctl |= ATA_NIEN; it821x_firmware_command() 668 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); it821x_firmware_command() 669 ata_wait_idle(ap); it821x_firmware_command() 670 iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr); it821x_firmware_command() 671 iowrite8(cmd, ap->ioaddr.command_addr); it821x_firmware_command() 676 status = ioread8(ap->ioaddr.status_addr); it821x_firmware_command() 683 ioread16_rep(ap->ioaddr.data_addr, buf, len/2); it821x_firmware_command() 695 * @ap: IT821x port being probed 701 static void it821x_probe_firmware(struct ata_port *ap) it821x_probe_firmware() argument 709 buf = it821x_firmware_command(ap, 0xFA, 512); it821x_probe_firmware() 727 * @ap: ATA port being set up 735 static int it821x_port_start(struct ata_port *ap) it821x_port_start() argument 737 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it821x_port_start() 741 int ret = ata_bmdma_port_start(ap); it821x_port_start() 748 ap->private_data = itdev; it821x_port_start() 757 if (ap->port_no == 0) it821x_port_start() 758 it821x_probe_firmware(ap); it821x_port_start() 761 if (conf & (1 << (1 + ap->port_no))) it821x_port_start() 782 * @ap: port we are checking 788 static int it821x_rdc_cable(struct ata_port *ap) it821x_rdc_cable() argument 791 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it821x_rdc_cable() 794 if (r40 & (1 << (2 + ap->port_no))) it821x_rdc_cable()
|
H A D | pata_cmd640.c | 45 * @ap: ATA port 51 static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) cmd640_set_piomode() argument 53 struct cmd640_reg *timing = ap->private_data; cmd640_set_piomode() 54 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd640_set_piomode() 69 if (ap->port_no && pair) { cmd640_set_piomode() 96 if (ap->port_no == 0) { cmd640_set_piomode() 130 struct ata_port *ap = qc->ap; cmd640_qc_issue() local 132 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd640_qc_issue() 133 struct cmd640_reg *timing = ap->private_data; cmd640_qc_issue() 135 if (ap->port_no != 0 && adev->devno != timing->last) { cmd640_qc_issue() 144 * @ap: ATA port being set up 150 static int cmd640_port_start(struct ata_port *ap) cmd640_port_start() argument 152 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd640_port_start() 159 ap->private_data = timing; cmd640_port_start() 163 static bool cmd640_sff_irq_check(struct ata_port *ap) cmd640_sff_irq_check() argument 165 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cmd640_sff_irq_check() 166 int irq_reg = ap->port_no ? ARTIM23 : CFR; cmd640_sff_irq_check() 167 u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04; cmd640_sff_irq_check()
|
H A D | pata_oldpiix.c | 39 struct ata_port *ap = link->ap; oldpiix_pre_reset() local 40 struct pci_dev *pdev = to_pci_dev(ap->host->dev); oldpiix_pre_reset() 46 if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) oldpiix_pre_reset() 54 * @ap: Port whose timings we are configuring 63 static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev) oldpiix_set_piomode() argument 66 struct pci_dev *dev = to_pci_dev(ap->host->dev); oldpiix_set_piomode() 67 unsigned int idetm_port= ap->port_no ? 0x42 : 0x40; oldpiix_set_piomode() 111 ap->private_data = adev; oldpiix_set_piomode() 116 * @ap: Port whose timings we are configuring 125 static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev) oldpiix_set_dmamode() argument 127 struct pci_dev *dev = to_pci_dev(ap->host->dev); oldpiix_set_dmamode() 128 u8 idetm_port = ap->port_no ? 0x42 : 0x40; oldpiix_set_dmamode() 178 ap->private_data = adev; oldpiix_set_dmamode() 194 struct ata_port *ap = qc->ap; oldpiix_qc_issue() local 197 if (adev != ap->private_data) { oldpiix_qc_issue() 198 oldpiix_set_piomode(ap, adev); oldpiix_qc_issue() 200 oldpiix_set_dmamode(ap, adev); oldpiix_qc_issue()
|
H A D | sata_sis.c | 117 struct ata_port *ap = link->ap; get_scr_cfg_addr() local 118 struct pci_dev *pdev = to_pci_dev(ap->host->dev); get_scr_cfg_addr() 122 if (ap->port_no) { get_scr_cfg_addr() 147 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); sis_scr_cfg_read() 160 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); sis_scr_cfg_write() 169 struct ata_port *ap = link->ap; sis_scr_read() local 170 void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10; sis_scr_read() 175 if (ap->flags & SIS_FLAG_CFGSCR) sis_scr_read() 184 struct ata_port *ap = link->ap; sis_scr_write() local 185 void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10; sis_scr_write() 190 if (ap->flags & SIS_FLAG_CFGSCR) sis_scr_write() 286 struct ata_port *ap = host->ports[i]; sis_init_one() local 288 if (ap->flags & ATA_FLAG_SATA && sis_init_one() 289 ap->flags & ATA_FLAG_SLAVE_POSS) { sis_init_one() 290 rc = ata_slave_link_init(ap); sis_init_one()
|
H A D | sata_fsl.c | 474 ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n", sata_fsl_fill_sg() 477 ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n", sata_fsl_fill_sg() 517 struct ata_port *ap = qc->ap; sata_fsl_qc_prep() local 518 struct sata_fsl_port_priv *pp = ap->private_data; sata_fsl_qc_prep() 519 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_qc_prep() 565 struct ata_port *ap = qc->ap; sata_fsl_qc_issue() local 566 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_qc_issue() 594 struct sata_fsl_port_priv *pp = qc->ap->private_data; sata_fsl_qc_fill_rtf() 595 struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; sata_fsl_qc_fill_rtf() 609 struct sata_fsl_host_priv *host_priv = link->ap->host->private_data; sata_fsl_scr_write() 633 struct sata_fsl_host_priv *host_priv = link->ap->host->private_data; sata_fsl_scr_read() 654 static void sata_fsl_freeze(struct ata_port *ap) sata_fsl_freeze() argument 656 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_freeze() 675 static void sata_fsl_thaw(struct ata_port *ap) sata_fsl_thaw() argument 677 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_thaw() 697 static void sata_fsl_pmp_attach(struct ata_port *ap) sata_fsl_pmp_attach() argument 699 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_pmp_attach() 707 static void sata_fsl_pmp_detach(struct ata_port *ap) sata_fsl_pmp_detach() argument 709 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_pmp_detach() 723 static int sata_fsl_port_start(struct ata_port *ap) sata_fsl_port_start() argument 725 struct device *dev = ap->host->dev; sata_fsl_port_start() 729 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_port_start() 753 ap->private_data = pp; sata_fsl_port_start() 779 static void sata_fsl_port_stop(struct ata_port *ap) sata_fsl_port_stop() argument 781 struct device *dev = ap->host->dev; sata_fsl_port_stop() 782 struct sata_fsl_port_priv *pp = ap->private_data; sata_fsl_port_stop() 783 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_port_stop() 796 ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1); sata_fsl_port_stop() 798 ap->private_data = NULL; sata_fsl_port_stop() 805 static unsigned int sata_fsl_dev_classify(struct ata_port *ap) sata_fsl_dev_classify() argument 807 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_dev_classify() 829 struct ata_port *ap = link->ap; sata_fsl_hardreset() local 830 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_hardreset() 847 temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, sata_fsl_hardreset() 851 ata_port_err(ap, "Hardreset failed, not off-lined %d\n", i); sata_fsl_hardreset() 870 ata_msleep(ap, 1); sata_fsl_hardreset() 886 temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500); sata_fsl_hardreset() 889 ata_port_err(ap, "Hardreset failed, not on-lined\n"); sata_fsl_hardreset() 903 temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500); sata_fsl_hardreset() 905 ata_port_warn(ap, "No Device OR PHYRDY change,Hstatus = 0x%x\n", sata_fsl_hardreset() 915 temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0x10, sata_fsl_hardreset() 919 ata_port_warn(ap, "No Signature Update\n"); sata_fsl_hardreset() 923 ata_port_info(ap, "Signature Update detected @ %d msecs\n", sata_fsl_hardreset() 925 *class = sata_fsl_dev_classify(ap); sata_fsl_hardreset() 942 struct ata_port *ap = link->ap; sata_fsl_softreset() local 943 struct sata_fsl_port_priv *pp = ap->private_data; sata_fsl_softreset() 944 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_softreset() 997 temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000); sata_fsl_softreset() 999 ata_port_warn(ap, "ATA_SRST issue failed\n"); sata_fsl_softreset() 1005 sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror); sata_fsl_softreset() 1013 ata_msleep(ap, 1); sata_fsl_softreset() 1032 ata_msleep(ap, 150); /* ?? */ sata_fsl_softreset() 1054 *class = sata_fsl_dev_classify(ap); sata_fsl_softreset() 1067 static void sata_fsl_error_handler(struct ata_port *ap) sata_fsl_error_handler() argument 1071 sata_pmp_error_handler(ap); sata_fsl_error_handler() 1086 static void sata_fsl_error_intr(struct ata_port *ap) sata_fsl_error_intr() argument 1088 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_error_intr() 1101 link = &ap->link; sata_fsl_error_intr() 1109 sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError); sata_fsl_error_intr() 1111 sata_fsl_scr_write(&ap->link, SCR_ERROR, SError); sata_fsl_error_intr() 1126 sata_async_notification(ap); sata_fsl_error_intr() 1155 if (ap->nr_pmp_links) { sata_fsl_error_intr() 1163 if (dev_num < ap->nr_pmp_links && dereg != 0) { sata_fsl_error_intr() 1164 link = &ap->pmp_link[dev_num]; sata_fsl_error_intr() 1166 qc = ata_qc_from_tag(ap, link->active_tag); sata_fsl_error_intr() 1184 qc = ata_qc_from_tag(ap, link->active_tag); sata_fsl_error_intr() 1203 ata_port_freeze(ap); sata_fsl_error_intr() 1208 ata_port_abort(ap); sata_fsl_error_intr() 1212 static void sata_fsl_host_intr(struct ata_port *ap) sata_fsl_host_intr() argument 1214 struct sata_fsl_host_priv *host_priv = ap->host->private_data; sata_fsl_host_intr() 1224 sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError); sata_fsl_host_intr() 1232 qc = ata_qc_from_tag(ap, tag); sata_fsl_host_intr() 1245 sata_fsl_scr_write(&ap->link, SCR_ERROR, sata_fsl_host_intr() 1258 sata_fsl_error_intr(ap); sata_fsl_host_intr() 1263 sata_fsl_error_intr(ap); sata_fsl_host_intr() 1273 ap->qc_active); sata_fsl_host_intr() 1275 if (done_mask & ap->qc_active) { sata_fsl_host_intr() 1292 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); sata_fsl_host_intr() 1295 } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { sata_fsl_host_intr() 1297 qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); sata_fsl_host_intr() 1321 struct ata_port *ap; sata_fsl_interrupt() local 1336 ap = host->ports[0]; sata_fsl_interrupt() 1337 if (ap) { sata_fsl_interrupt() 1338 sata_fsl_host_intr(ap); sata_fsl_interrupt() 1591 struct ata_port *ap = host->ports[0]; sata_fsl_resume() local 1592 struct sata_fsl_port_priv *pp = ap->private_data; sata_fsl_resume()
|
H A D | pata_pdc2027x.c | 70 static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev); 71 static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); 74 static int pdc2027x_cable_detect(struct ata_port *ap); 184 * @ap: Port 187 static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset) port_mmio() argument 189 return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset; port_mmio() 194 * @ap: Port 198 static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset) dev_mmio() argument 201 return port_mmio(ap, offset) + adj; dev_mmio() 206 * @ap: Port for which cable detect info is desired 214 static int pdc2027x_cable_detect(struct ata_port *ap) pdc2027x_cable_detect() argument 219 cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL)); pdc2027x_cable_detect() 223 PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no); pdc2027x_cable_detect() 227 printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no); pdc2027x_cable_detect() 233 * @ap: Port to check 235 static inline int pdc2027x_port_enabled(struct ata_port *ap) pdc2027x_port_enabled() argument 237 return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02; pdc2027x_port_enabled() 254 if (!pdc2027x_port_enabled(link->ap)) pdc2027x_prereset() 287 * @ap: Port to configure 296 static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev) pdc2027x_set_piomode() argument 313 ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); pdc2027x_set_piomode() 317 iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); pdc2027x_set_piomode() 319 ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); pdc2027x_set_piomode() 322 iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); pdc2027x_set_piomode() 331 * @ap: Port to configure 339 static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) pdc2027x_set_dmamode() argument 355 ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); pdc2027x_set_dmamode() 356 iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); pdc2027x_set_dmamode() 361 ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); pdc2027x_set_dmamode() 366 iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); pdc2027x_set_dmamode() 378 ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); pdc2027x_set_dmamode() 384 iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); pdc2027x_set_dmamode() 404 struct ata_port *ap = link->ap; pdc2027x_set_mode() local 413 pdc2027x_set_piomode(ap, dev); ata_for_each_dev() 419 u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1)); ata_for_each_dev() 421 iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); ata_for_each_dev() 425 pdc2027x_set_dmamode(ap, dev); ata_for_each_dev() 744 struct ata_port *ap = host->ports[i]; pdc2027x_init_one() local 746 pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]); pdc2027x_init_one() 747 ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i]; pdc2027x_init_one() 749 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); pdc2027x_init_one() 750 ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd"); pdc2027x_init_one()
|
H A D | sata_via.c | 81 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 82 static void svia_noop_freeze(struct ata_port *ap); 85 static int vt6421_pata_cable_detect(struct ata_port *ap); 86 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 87 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 191 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg)); svia_scr_read() 199 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg)); svia_scr_write() 206 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); vt8251_scr_read() 207 int slot = 2 * link->ap->port_no + link->pmp; vt8251_scr_read() 254 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); vt8251_scr_write() 255 int slot = 2 * link->ap->port_no + link->pmp; vt8251_scr_write() 282 * @ap: Port to which output is sent 291 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) svia_tf_load() argument 295 if (tf->ctl != ap->last_ctl) { svia_tf_load() 300 ata_sff_tf_load(ap, tf); svia_tf_load() 303 static void svia_noop_freeze(struct ata_port *ap) svia_noop_freeze() argument 308 ap->ops->sff_check_status(ap); svia_noop_freeze() 309 ata_bmdma_irq_clear(ap); svia_noop_freeze() 334 struct ata_port *ap = link->ap; vt6420_prereset() local 335 struct ata_eh_context *ehc = &ap->link.eh_context; vt6420_prereset() 341 if (!(ap->pflags & ATA_PFLAG_LOADING)) vt6420_prereset() 350 ata_msleep(link->ap, 200); vt6420_prereset() 362 ata_port_info(ap, vt6420_prereset() 384 struct ata_port *ap = qc->ap; vt6420_bmdma_start() local 388 ata_sff_pause(ap); vt6420_bmdma_start() 393 static int vt6421_pata_cable_detect(struct ata_port *ap) vt6421_pata_cable_detect() argument 395 struct pci_dev *pdev = to_pci_dev(ap->host->dev); vt6421_pata_cable_detect() 404 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) vt6421_set_pio_mode() argument 406 struct pci_dev *pdev = to_pci_dev(ap->host->dev); vt6421_set_pio_mode() 412 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) vt6421_set_dma_mode() argument 414 struct pci_dev *pdev = to_pci_dev(ap->host->dev); vt6421_set_dma_mode() 438 static void vt6421_init_addrs(struct ata_port *ap) vt6421_init_addrs() argument 440 void __iomem * const * iomap = ap->host->iomap; vt6421_init_addrs() 441 void __iomem *reg_addr = iomap[ap->port_no]; vt6421_init_addrs() 442 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); vt6421_init_addrs() 443 struct ata_ioports *ioaddr = &ap->ioaddr; vt6421_init_addrs() 450 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); vt6421_init_addrs() 454 ata_port_pbar_desc(ap, ap->port_no, -1, "port"); vt6421_init_addrs() 455 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); vt6421_init_addrs()
|
H A D | pata_platform.c | 107 struct ata_port *ap; __pata_platform_probe() local 132 ap = host->ports[0]; __pata_platform_probe() 134 ap->ops = &pata_platform_port_ops; __pata_platform_probe() 135 ap->pio_mask = __pio_mask; __pata_platform_probe() 136 ap->flags |= ATA_FLAG_SLAVE_POSS; __pata_platform_probe() 142 ap->flags |= ATA_FLAG_PIO_POLLING; __pata_platform_probe() 143 ata_port_desc(ap, "no IRQ, using PIO polling"); __pata_platform_probe() 150 ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, __pata_platform_probe() 152 ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, __pata_platform_probe() 155 ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, __pata_platform_probe() 157 ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, __pata_platform_probe() 160 if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { __pata_platform_probe() 165 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; __pata_platform_probe() 167 pata_platform_setup_port(&ap->ioaddr, ioport_shift); __pata_platform_probe() 169 ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport", __pata_platform_probe()
|
H A D | pata_at32.c | 150 static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev) pata_at32_set_piomode() argument 153 struct at32_ide_info *info = ap->host->private_data; pata_at32_set_piomode() 160 dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret); pata_at32_set_piomode() 165 ret = pata_at32_setup_timing(ap->dev, info, &timing); pata_at32_set_piomode() 167 dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret); pata_at32_set_piomode() 186 struct ata_port *ap; pata_at32_init_one() local 192 ap = host->ports[0]; pata_at32_init_one() 195 ap->ops = &at32_port_ops; pata_at32_init_one() 196 ap->pio_mask = PIO_MASK; pata_at32_init_one() 197 ap->flags |= ATA_FLAG_SLAVE_POSS; pata_at32_init_one() 211 ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1); pata_at32_init_one() 212 ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1); pata_at32_init_one() 214 ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1); pata_at32_init_one() 215 ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1); pata_at32_init_one() 216 ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1); pata_at32_init_one() 217 ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1); pata_at32_init_one() 218 ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1); pata_at32_init_one() 219 ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1); pata_at32_init_one() 220 ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1); pata_at32_init_one() 221 ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1); pata_at32_init_one() 222 ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1); pata_at32_init_one() 223 ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1); pata_at32_init_one()
|
H A D | pata_rdc.c | 44 * @ap: Port for which cable detect info is desired 53 static int rdc_pata_cable_detect(struct ata_port *ap) rdc_pata_cable_detect() argument 55 struct rdc_host_priv *hpriv = ap->host->private_data; rdc_pata_cable_detect() 59 mask = 0x30 << (2 * ap->port_no); rdc_pata_cable_detect() 75 struct ata_port *ap = link->ap; rdc_pata_prereset() local 76 struct pci_dev *pdev = to_pci_dev(ap->host->dev); rdc_pata_prereset() 83 if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no])) rdc_pata_prereset() 92 * @ap: Port whose timings we are configuring 101 static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev) rdc_set_piomode() argument 104 struct pci_dev *dev = to_pci_dev(ap->host->dev); rdc_set_piomode() 107 unsigned int master_port= ap->port_no ? 0x42 : 0x40; rdc_set_piomode() 144 slave_data &= (ap->port_no ? 0x0f : 0xf0); rdc_set_piomode() 147 << (ap->port_no ? 4 : 0); rdc_set_piomode() 166 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); rdc_set_piomode() 174 * @ap: Port whose timings we are configuring 183 static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) rdc_set_dmamode() argument 185 struct pci_dev *dev = to_pci_dev(ap->host->dev); rdc_set_dmamode() 187 u8 master_port = ap->port_no ? 0x42 : 0x40; rdc_set_dmamode() 190 int devid = adev->devno + 2 * ap->port_no; rdc_set_dmamode() 266 slave_data &= (ap->port_no ? 0x0f : 0xf0); rdc_set_dmamode() 268 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); rdc_set_dmamode()
|
H A D | sata_mv.c | 605 static int mv_port_start(struct ata_port *ap); 606 static void mv_port_stop(struct ata_port *ap); 613 static void mv_eh_freeze(struct ata_port *ap); 614 static void mv_eh_thaw(struct ata_port *ap); 649 static int mv_stop_edma(struct ata_port *ap); 651 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); 653 static void mv_pmp_select(struct ata_port *ap, int pmp); 658 static void mv_pmp_error_handler(struct ata_port *ap); 659 static void mv_process_crpb_entries(struct ata_port *ap, 662 static void mv_sff_irq_clear(struct ata_port *ap); 667 static u8 mv_bmdma_status(struct ata_port *ap); 668 static u8 mv_sff_check_status(struct ata_port *ap); 935 static inline void __iomem *mv_ap_base(struct ata_port *ap) mv_ap_base() argument 937 return mv_port_base(mv_host_base(ap->host), ap->port_no); mv_ap_base() 947 * @ap: the port whose registers we are caching 955 static void mv_save_cached_regs(struct ata_port *ap) mv_save_cached_regs() argument 957 void __iomem *port_mmio = mv_ap_base(ap); mv_save_cached_regs() 958 struct mv_port_priv *pp = ap->private_data; mv_save_cached_regs() 1063 static void mv_enable_port_irqs(struct ata_port *ap, mv_enable_port_irqs() argument 1066 unsigned int shift, hardport, port = ap->port_no; mv_enable_port_irqs() 1073 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); mv_enable_port_irqs() 1076 static void mv_clear_and_enable_port_irqs(struct ata_port *ap, mv_clear_and_enable_port_irqs() argument 1080 struct mv_host_priv *hpriv = ap->host->private_data; mv_clear_and_enable_port_irqs() 1081 int hardport = mv_hardport_from_port(ap->port_no); mv_clear_and_enable_port_irqs() 1083 mv_host_base(ap->host), ap->port_no); mv_clear_and_enable_port_irqs() 1097 mv_enable_port_irqs(ap, port_irqs); mv_clear_and_enable_port_irqs() 1173 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, mv_start_edma() argument 1181 mv_stop_edma(ap); mv_start_edma() 1184 struct mv_host_priv *hpriv = ap->host->private_data; mv_start_edma() 1186 mv_edma_cfg(ap, want_ncq, 1); mv_start_edma() 1189 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); mv_start_edma() 1196 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) mv_wait_for_edma_empty_idle() argument 1198 void __iomem *port_mmio = mv_ap_base(ap); mv_wait_for_edma_empty_idle() 1216 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */ mv_wait_for_edma_empty_idle() 1243 static int mv_stop_edma(struct ata_port *ap) mv_stop_edma() argument 1245 void __iomem *port_mmio = mv_ap_base(ap); mv_stop_edma() 1246 struct mv_port_priv *pp = ap->private_data; mv_stop_edma() 1252 mv_wait_for_edma_empty_idle(ap); mv_stop_edma() 1254 ata_port_err(ap, "Unable to stop eDMA\n"); mv_stop_edma() 1257 mv_edma_cfg(ap, 0, 0); mv_stop_edma() 1363 *val = readl(mv_ap_base(link->ap) + ofs); mv_scr_read() 1374 void __iomem *addr = mv_ap_base(link->ap) + ofs; mv_scr_write() 1375 struct mv_host_priv *hpriv = link->ap->host->private_data; mv_scr_write() 1395 mv_ap_base(link->ap) + LP_PHY_CTL; mv_scr_write() 1427 if (sata_pmp_attached(adev->link->ap)) { mv6_dev_config() 1438 struct ata_port *ap = link->ap; mv_qc_defer() local 1439 struct mv_port_priv *pp = ap->private_data; mv_qc_defer() 1456 if (unlikely(ap->excl_link)) { mv_qc_defer() 1457 if (link == ap->excl_link) { mv_qc_defer() 1458 if (ap->nr_active_links) mv_qc_defer() 1469 if (ap->nr_active_links == 0) mv_qc_defer() 1483 ap->excl_link = link; mv_qc_defer() 1491 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) mv_config_fbs() argument 1493 struct mv_port_priv *pp = ap->private_data; mv_config_fbs() 1514 port_mmio = mv_ap_base(ap); mv_config_fbs() 1520 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) mv_60x1_errata_sata25() argument 1522 struct mv_host_priv *hpriv = ap->host->private_data; mv_60x1_errata_sata25() 1537 * @ap: Port being initialized 1547 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) mv_bmdma_enable_iie() argument 1549 struct mv_port_priv *pp = ap->private_data; mv_bmdma_enable_iie() 1556 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); mv_bmdma_enable_iie() 1573 static void mv_soc_led_blink_enable(struct ata_port *ap) mv_soc_led_blink_enable() argument 1575 struct ata_host *host = ap->host; mv_soc_led_blink_enable() 1583 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); mv_soc_led_blink_enable() 1588 static void mv_soc_led_blink_disable(struct ata_port *ap) mv_soc_led_blink_disable() argument 1590 struct ata_host *host = ap->host; mv_soc_led_blink_disable() 1609 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); mv_soc_led_blink_disable() 1614 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) mv_edma_cfg() argument 1617 struct mv_port_priv *pp = ap->private_data; mv_edma_cfg() 1618 struct mv_host_priv *hpriv = ap->host->private_data; mv_edma_cfg() 1619 void __iomem *port_mmio = mv_ap_base(ap); mv_edma_cfg() 1631 mv_60x1_errata_sata25(ap, want_ncq); mv_edma_cfg() 1634 int want_fbs = sata_pmp_attached(ap); mv_edma_cfg() 1645 mv_config_fbs(ap, want_ncq, want_fbs); mv_edma_cfg() 1660 mv_bmdma_enable_iie(ap, !want_edma); mv_edma_cfg() 1664 mv_soc_led_blink_enable(ap); mv_edma_cfg() 1666 mv_soc_led_blink_disable(ap); mv_edma_cfg() 1678 static void mv_port_free_dma_mem(struct ata_port *ap) mv_port_free_dma_mem() argument 1680 struct mv_host_priv *hpriv = ap->host->private_data; mv_port_free_dma_mem() 1681 struct mv_port_priv *pp = ap->private_data; mv_port_free_dma_mem() 1709 * @ap: ATA channel to manipulate 1717 static int mv_port_start(struct ata_port *ap) mv_port_start() argument 1719 struct device *dev = ap->host->dev; mv_port_start() 1720 struct mv_host_priv *hpriv = ap->host->private_data; mv_port_start() 1728 ap->private_data = pp; mv_port_start() 1742 ap->flags |= ATA_FLAG_AN; mv_port_start() 1759 spin_lock_irqsave(ap->lock, flags); mv_port_start() 1760 mv_save_cached_regs(ap); mv_port_start() 1761 mv_edma_cfg(ap, 0, 0); mv_port_start() 1762 spin_unlock_irqrestore(ap->lock, flags); mv_port_start() 1767 mv_port_free_dma_mem(ap); mv_port_start() 1773 * @ap: ATA channel to manipulate 1780 static void mv_port_stop(struct ata_port *ap) mv_port_stop() argument 1784 spin_lock_irqsave(ap->lock, flags); mv_port_stop() 1785 mv_stop_edma(ap); mv_port_stop() 1786 mv_enable_port_irqs(ap, 0); mv_port_stop() 1787 spin_unlock_irqrestore(ap->lock, flags); mv_port_stop() 1788 mv_port_free_dma_mem(ap); mv_port_stop() 1802 struct mv_port_priv *pp = qc->ap->private_data; mv_fill_sg() 1846 * @ap: Port associated with this ATA transaction. 1852 static void mv_sff_irq_clear(struct ata_port *ap) mv_sff_irq_clear() argument 1854 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); mv_sff_irq_clear() 1898 struct ata_port *ap = qc->ap; mv_bmdma_setup() local 1899 void __iomem *port_mmio = mv_ap_base(ap); mv_bmdma_setup() 1900 struct mv_port_priv *pp = ap->private_data; mv_bmdma_setup() 1914 ap->ops->sff_exec_command(ap, &qc->tf); mv_bmdma_setup() 1926 struct ata_port *ap = qc->ap; mv_bmdma_start() local 1927 void __iomem *port_mmio = mv_ap_base(ap); mv_bmdma_start() 1944 static void mv_bmdma_stop_ap(struct ata_port *ap) mv_bmdma_stop_ap() argument 1946 void __iomem *port_mmio = mv_ap_base(ap); mv_bmdma_stop_ap() 1956 ata_sff_dma_pause(ap); mv_bmdma_stop_ap() 1962 mv_bmdma_stop_ap(qc->ap); mv_bmdma_stop() 1967 * @ap: port for which to retrieve DMA status. 1974 static u8 mv_bmdma_status(struct ata_port *ap) mv_bmdma_status() argument 1976 void __iomem *port_mmio = mv_ap_base(ap); mv_bmdma_status() 1995 mv_bmdma_stop_ap(ap); mv_bmdma_status() 1996 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) mv_bmdma_status() 2051 struct ata_port *ap = qc->ap; mv_qc_prep() local 2052 struct mv_port_priv *pp = ap->private_data; mv_qc_prep() 2152 struct ata_port *ap = qc->ap; mv_qc_prep_iie() local 2153 struct mv_port_priv *pp = ap->private_data; mv_qc_prep_iie() 2210 * @ap: ATA port to fetch status from 2221 static u8 mv_sff_check_status(struct ata_port *ap) mv_sff_check_status() argument 2223 u8 stat = ioread8(ap->ioaddr.status_addr); mv_sff_check_status() 2224 struct mv_port_priv *pp = ap->private_data; mv_sff_check_status() 2240 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) mv_send_fis() argument 2242 void __iomem *port_mmio = mv_ap_base(ap); mv_send_fis() 2272 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n", mv_send_fis() 2298 struct ata_port *ap = qc->ap; mv_qc_issue_fis() local 2299 struct mv_port_priv *pp = ap->private_data; mv_qc_issue_fis() 2305 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); mv_qc_issue_fis() 2314 ap->hsm_task_state = HSM_ST_FIRST; mv_qc_issue_fis() 2319 ap->hsm_task_state = HSM_ST_FIRST; mv_qc_issue_fis() 2321 ap->hsm_task_state = HSM_ST; mv_qc_issue_fis() 2324 ap->hsm_task_state = HSM_ST_LAST; mv_qc_issue_fis() 2348 struct ata_port *ap = qc->ap; mv_qc_issue() local 2349 void __iomem *port_mmio = mv_ap_base(ap); mv_qc_issue() 2350 struct mv_port_priv *pp = ap->private_data; mv_qc_issue() 2359 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ mv_qc_issue() 2365 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); mv_qc_issue() 2396 if (ap->flags & ATA_FLAG_PIO_POLLING) mv_qc_issue() 2411 mv_stop_edma(ap); mv_qc_issue() 2412 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); mv_qc_issue() 2413 mv_pmp_select(ap, qc->dev->link->pmp); mv_qc_issue() 2416 struct mv_host_priv *hpriv = ap->host->private_data; mv_qc_issue() 2434 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) mv_get_active_qc() argument 2436 struct mv_port_priv *pp = ap->private_data; mv_get_active_qc() 2441 qc = ata_qc_from_tag(ap, ap->link.active_tag); mv_get_active_qc() 2447 static void mv_pmp_error_handler(struct ata_port *ap) mv_pmp_error_handler() argument 2450 struct mv_port_priv *pp = ap->private_data; mv_pmp_error_handler() 2464 struct ata_link *link = &ap->pmp_link[pmp]; mv_pmp_error_handler() 2469 ata_port_freeze(ap); mv_pmp_error_handler() 2471 sata_pmp_error_handler(ap); mv_pmp_error_handler() 2474 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) mv_get_err_pmp_map() argument 2476 void __iomem *port_mmio = mv_ap_base(ap); mv_get_err_pmp_map() 2481 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) mv_pmp_eh_prep() argument 2489 ehi = &ap->link.eh_info; mv_pmp_eh_prep() 2493 struct ata_link *link = &ap->pmp_link[pmp]; mv_pmp_eh_prep() 2506 static int mv_req_q_empty(struct ata_port *ap) mv_req_q_empty() argument 2508 void __iomem *port_mmio = mv_ap_base(ap); mv_req_q_empty() 2518 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) mv_handle_fbs_ncq_dev_err() argument 2520 struct mv_port_priv *pp = ap->private_data; mv_handle_fbs_ncq_dev_err() 2537 new_map = old_map | mv_get_err_pmp_map(ap); mv_handle_fbs_ncq_dev_err() 2541 mv_pmp_eh_prep(ap, new_map & ~old_map); mv_handle_fbs_ncq_dev_err() 2545 ata_port_info(ap, mv_handle_fbs_ncq_dev_err() 2548 ap->qc_active, failed_links, mv_handle_fbs_ncq_dev_err() 2549 ap->nr_active_links); mv_handle_fbs_ncq_dev_err() 2551 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { mv_handle_fbs_ncq_dev_err() 2552 mv_process_crpb_entries(ap, pp); mv_handle_fbs_ncq_dev_err() 2553 mv_stop_edma(ap); mv_handle_fbs_ncq_dev_err() 2554 mv_eh_freeze(ap); mv_handle_fbs_ncq_dev_err() 2555 ata_port_info(ap, "%s: done\n", __func__); mv_handle_fbs_ncq_dev_err() 2558 ata_port_info(ap, "%s: waiting\n", __func__); mv_handle_fbs_ncq_dev_err() 2562 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) mv_handle_fbs_non_ncq_dev_err() argument 2578 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) mv_handle_dev_err() argument 2580 struct mv_port_priv *pp = ap->private_data; mv_handle_dev_err() 2600 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", mv_handle_dev_err() 2604 return mv_handle_fbs_ncq_dev_err(ap); mv_handle_dev_err() 2612 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", mv_handle_dev_err() 2616 return mv_handle_fbs_non_ncq_dev_err(ap); mv_handle_dev_err() 2621 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) mv_unexpected_intr() argument 2623 struct ata_eh_info *ehi = &ap->link.eh_info; mv_unexpected_intr() 2630 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); mv_unexpected_intr() 2637 ata_port_freeze(ap); mv_unexpected_intr() 2642 * @ap: ATA channel to manipulate 2651 static void mv_err_intr(struct ata_port *ap) mv_err_intr() argument 2653 void __iomem *port_mmio = mv_ap_base(ap); mv_err_intr() 2656 struct mv_port_priv *pp = ap->private_data; mv_err_intr() 2657 struct mv_host_priv *hpriv = ap->host->private_data; mv_err_intr() 2659 struct ata_eh_info *ehi = &ap->link.eh_info; mv_err_intr() 2668 sata_scr_read(&ap->link, SCR_ERROR, &serr); mv_err_intr() 2669 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); mv_err_intr() 2683 if (mv_handle_dev_err(ap, edma_err_cause)) mv_err_intr() 2687 qc = mv_get_active_qc(ap); mv_err_intr() 2697 sata_async_notification(ap); mv_err_intr() 2767 mv_eh_freeze(ap); mv_err_intr() 2773 ata_port_freeze(ap); mv_err_intr() 2782 ata_port_abort(ap); mv_err_intr() 2786 static bool mv_process_crpb_response(struct ata_port *ap, mv_process_crpb_response() argument 2814 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) mv_process_crpb_entries() argument 2816 void __iomem *port_mmio = mv_ap_base(ap); mv_process_crpb_entries() 2817 struct mv_host_priv *hpriv = ap->host->private_data; mv_process_crpb_entries() 2836 tag = ap->link.active_tag; mv_process_crpb_entries() 2841 if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) mv_process_crpb_entries() 2847 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); mv_process_crpb_entries() 2856 static void mv_port_intr(struct ata_port *ap, u32 port_cause) mv_port_intr() argument 2866 pp = ap->private_data; mv_port_intr() 2872 mv_process_crpb_entries(ap, pp); mv_port_intr() 2874 mv_handle_fbs_ncq_dev_err(ap); mv_port_intr() 2880 mv_err_intr(ap); mv_port_intr() 2882 struct ata_queued_cmd *qc = mv_get_active_qc(ap); mv_port_intr() 2884 ata_bmdma_port_intr(ap, qc); mv_port_intr() 2886 mv_unexpected_intr(ap, edma_was_enabled); mv_port_intr() 2909 struct ata_port *ap = host->ports[port]; mv_host_intr() local 2958 mv_port_intr(ap, port_cause); mv_host_intr() 2966 struct ata_port *ap; mv_pci_error() local 2982 ap = host->ports[i]; mv_pci_error() 2983 if (!ata_link_offline(&ap->link)) { mv_pci_error() 2984 ehi = &ap->link.eh_info; mv_pci_error() 2991 qc = ata_qc_from_tag(ap, ap->link.active_tag); mv_pci_error() 2997 ata_port_freeze(ap); mv_pci_error() 3072 struct mv_host_priv *hpriv = link->ap->host->private_data; mv5_scr_read() 3074 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); mv5_scr_read() 3086 struct mv_host_priv *hpriv = link->ap->host->private_data; mv5_scr_write() 3088 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); mv5_scr_write() 3606 static void mv_pmp_select(struct ata_port *ap, int pmp) mv_pmp_select() argument 3608 if (sata_pmp_supported(ap)) { mv_pmp_select() 3609 void __iomem *port_mmio = mv_ap_base(ap); mv_pmp_select() 3623 mv_pmp_select(link->ap, sata_srst_pmp(link)); mv_pmp_hardreset() 3630 mv_pmp_select(link->ap, sata_srst_pmp(link)); mv_softreset() 3637 struct ata_port *ap = link->ap; mv_hardreset() local 3638 struct mv_host_priv *hpriv = ap->host->private_data; mv_hardreset() 3639 struct mv_port_priv *pp = ap->private_data; mv_hardreset() 3645 mv_reset_channel(hpriv, mmio, ap->port_no); mv_hardreset() 3663 mv_setup_ifcfg(mv_ap_base(ap), 0); mv_hardreset() 3668 mv_save_cached_regs(ap); mv_hardreset() 3669 mv_edma_cfg(ap, 0, 0); mv_hardreset() 3674 static void mv_eh_freeze(struct ata_port *ap) mv_eh_freeze() argument 3676 mv_stop_edma(ap); mv_eh_freeze() 3677 mv_enable_port_irqs(ap, 0); mv_eh_freeze() 3680 static void mv_eh_thaw(struct ata_port *ap) mv_eh_thaw() argument 3682 struct mv_host_priv *hpriv = ap->host->private_data; mv_eh_thaw() 3683 unsigned int port = ap->port_no; mv_eh_thaw() 3686 void __iomem *port_mmio = mv_ap_base(ap); mv_eh_thaw() 3696 mv_enable_port_irqs(ap, ERR_IRQ); mv_eh_thaw() 3977 struct ata_port *ap = host->ports[port]; mv_init_host() local 3980 mv_port_init(&ap->ioaddr, port_mmio); mv_init_host() 4443 struct ata_port *ap = host->ports[port]; mv_pci_init_one() local 4447 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); mv_pci_init_one() 4448 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); mv_pci_init_one()
|
H A D | pata_serverworks.c | 61 * @ap: ATA port to do cable detect 67 static int oem_cable(struct ata_port *ap) oem_cable() argument 69 struct pci_dev *pdev = to_pci_dev(ap->host->dev); oem_cable() 71 if (pdev->subsystem_device & (1 << (ap->port_no + 14))) oem_cable() 79 int (*cable_detect)(struct ata_port *ap); 96 * @ap: ATA port 102 static int serverworks_cable_detect(struct ata_port *ap) serverworks_cable_detect() argument 104 struct pci_dev *pdev = to_pci_dev(ap->host->dev); serverworks_cable_detect() 111 return cb->cable_detect(ap); serverworks_cable_detect() 190 * @ap: ATA interface 196 static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev) serverworks_set_piomode() argument 199 int offset = 1 + 2 * ap->port_no - adev->devno; serverworks_set_piomode() 200 int devbits = (2 * ap->port_no + adev->devno) * 4; serverworks_set_piomode() 202 struct pci_dev *pdev = to_pci_dev(ap->host->dev); serverworks_set_piomode() 218 * @ap: ATA interface 226 static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev) serverworks_set_dmamode() argument 229 int offset = 1 + 2 * ap->port_no - adev->devno; serverworks_set_dmamode() 230 int devbits = 2 * ap->port_no + adev->devno; serverworks_set_dmamode() 233 struct pci_dev *pdev = to_pci_dev(ap->host->dev); serverworks_set_dmamode() 236 pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra); serverworks_set_dmamode() 250 pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra); serverworks_set_dmamode()
|
H A D | pata_jmicron.c | 43 struct ata_port *ap = link->ap; jmicron_pre_reset() local 44 struct pci_dev *pdev = to_pci_dev(ap->host->dev); jmicron_pre_reset() 47 int port_mask = 1<< (4 * ap->port_no); jmicron_pre_reset() 48 int port = ap->port_no; jmicron_pre_reset() 87 ap->cbl = ATA_CBL_PATA40; jmicron_pre_reset() 89 ap->cbl = ATA_CBL_PATA80; jmicron_pre_reset() 96 ap->cbl = ATA_CBL_PATA40; jmicron_pre_reset() 98 ap->cbl = ATA_CBL_PATA80; jmicron_pre_reset() 101 ap->cbl = ATA_CBL_SATA; jmicron_pre_reset()
|
H A D | pata_mpc52xx.c | 390 mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev) mpc52xx_ata_set_piomode() argument 392 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_ata_set_piomode() 400 dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio); mpc52xx_ata_set_piomode() 408 mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev) mpc52xx_ata_set_dmamode() argument 410 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_ata_set_dmamode() 422 dev_alert(ap->dev, mpc52xx_ata_set_dmamode() 432 mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device) mpc52xx_ata_dev_select() argument 434 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_ata_dev_select() 439 ata_sff_dev_select(ap, device); mpc52xx_ata_dev_select() 445 struct ata_port *ap = qc->ap; mpc52xx_ata_build_dmatable() local 446 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_ata_build_dmatable() 485 dev_alert(ap->dev, "dma table" mpc52xx_ata_build_dmatable() 501 struct ata_port *ap = qc->ap; mpc52xx_bmdma_setup() local 502 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_bmdma_setup() 509 dev_alert(ap->dev, "%s: %i, return 1?\n", mpc52xx_bmdma_setup() 514 dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", mpc52xx_bmdma_setup() 551 ata_wait_idle(ap); mpc52xx_bmdma_setup() 552 ap->ops->sff_exec_command(ap, &qc->tf); mpc52xx_bmdma_setup() 558 struct ata_port *ap = qc->ap; mpc52xx_bmdma_start() local 559 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_bmdma_start() 568 struct ata_port *ap = qc->ap; mpc52xx_bmdma_stop() local 569 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_bmdma_stop() 577 dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", mpc52xx_bmdma_stop() 582 mpc52xx_bmdma_status(struct ata_port *ap) mpc52xx_bmdma_status() argument 584 struct mpc52xx_ata_priv *priv = ap->host->private_data; mpc52xx_bmdma_status() 588 dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", mpc52xx_bmdma_status() 630 struct ata_port *ap; mpc52xx_ata_init_one() local 637 ap = host->ports[0]; mpc52xx_ata_init_one() 638 ap->flags |= ATA_FLAG_SLAVE_POSS; mpc52xx_ata_init_one() 639 ap->pio_mask = ATA_PIO4; mpc52xx_ata_init_one() 640 ap->mwdma_mask = mwdma_mask; mpc52xx_ata_init_one() 641 ap->udma_mask = udma_mask; mpc52xx_ata_init_one() 642 ap->ops = &mpc52xx_ata_port_ops; mpc52xx_ata_init_one() 645 aio = &ap->ioaddr; mpc52xx_ata_init_one() 660 ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); mpc52xx_ata_init_one()
|
H A D | pata_rb532_cf.c | 88 struct ata_port *ap; rb532_pata_setup_ports() local 90 ap = ah->ports[0]; rb532_pata_setup_ports() 92 ap->ops = &rb532_pata_port_ops; rb532_pata_setup_ports() 93 ap->pio_mask = ATA_PIO4; rb532_pata_setup_ports() 95 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE; rb532_pata_setup_ports() 96 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; rb532_pata_setup_ports() 97 ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; rb532_pata_setup_ports() 99 ata_sff_std_ports(&ap->ioaddr); rb532_pata_setup_ports() 101 ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32; rb532_pata_setup_ports() 102 ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR; rb532_pata_setup_ports()
|
H A D | pata_it8213.c | 37 struct ata_port *ap = link->ap; it8213_pre_reset() local 38 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it8213_pre_reset() 39 if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) it8213_pre_reset() 47 * @ap: Port 53 static int it8213_cable_detect(struct ata_port *ap) it8213_cable_detect() argument 55 struct pci_dev *pdev = to_pci_dev(ap->host->dev); it8213_cable_detect() 65 * @ap: Port whose timings we are configuring 74 static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev) it8213_set_piomode() argument 77 struct pci_dev *dev = to_pci_dev(ap->host->dev); it8213_set_piomode() 78 unsigned int master_port = ap->port_no ? 0x42 : 0x40; it8213_set_piomode() 129 * @ap: Port whose timings we are configuring 139 static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev) it8213_set_dmamode() argument 141 struct pci_dev *dev = to_pci_dev(ap->host->dev); it8213_set_dmamode() 214 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); it8213_set_dmamode()
|
H A D | pata_via.c | 170 * @ap: ATA port 180 static int via_cable_detect(struct ata_port *ap) { via_cable_detect() argument 181 const struct via_isa_bridge *config = ap->host->private_data; via_cable_detect() 182 struct pci_dev *pdev = to_pci_dev(ap->host->dev); via_cable_detect() 188 if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0) via_cable_detect() 201 if (ata66 & (0x10100000 >> (16 * ap->port_no))) via_cable_detect() 204 if (ata_acpi_init_gtm(ap) && via_cable_detect() 205 ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap))) via_cable_detect() 212 struct ata_port *ap = link->ap; via_pre_reset() local 213 const struct via_isa_bridge *config = ap->host->private_data; via_pre_reset() 220 struct pci_dev *pdev = to_pci_dev(ap->host->dev); via_pre_reset() 221 if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) via_pre_reset() 231 * @ap: ATA interface 244 static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, via_do_set_mode() argument 247 struct pci_dev *pdev = to_pci_dev(ap->host->dev); via_do_set_mode() 254 int offset = 3 - (2*ap->port_no) - adev->devno; via_do_set_mode() 288 pci_write_config_byte(pdev, 0x4F - ap->port_no, via_do_set_mode() 329 static void via_set_piomode(struct ata_port *ap, struct ata_device *adev) via_set_piomode() argument 331 const struct via_isa_bridge *config = ap->host->private_data; via_set_piomode() 334 via_do_set_mode(ap, adev, adev->pio_mode, set_ast, config->udma_mask); via_set_piomode() 337 static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) via_set_dmamode() argument 339 const struct via_isa_bridge *config = ap->host->private_data; via_set_dmamode() 342 via_do_set_mode(ap, adev, adev->dma_mode, set_ast, config->udma_mask); via_set_dmamode() 356 struct ata_host *host = dev->link->ap->host; via_mode_filter() 380 * @ap: Port to which output is sent 389 static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) via_tf_load() argument 391 struct ata_ioports *ioaddr = &ap->ioaddr; via_tf_load() 392 struct via_port *vp = ap->private_data; via_tf_load() 396 if (tf->ctl != ap->last_ctl) { via_tf_load() 398 ap->last_ctl = tf->ctl; via_tf_load() 399 ata_wait_idle(ap); via_tf_load() 438 ata_wait_idle(ap); via_tf_load() 441 static int via_port_start(struct ata_port *ap) via_port_start() argument 444 struct pci_dev *pdev = to_pci_dev(ap->host->dev); via_port_start() 446 int ret = ata_bmdma_port_start(ap); via_port_start() 453 ap->private_data = vp; via_port_start()
|
H A D | pata_arasan_cf.c | 284 struct ata_port *ap = acdev->host->ports[0]; cf_card_detect() local 285 struct ata_eh_info *ehi = &ap->link.eh_info; cf_card_detect() 302 ata_port_freeze(ap); cf_card_detect() 374 ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout"); dma_complete() 549 status = ioread8(qc->ap->ioaddr.altstatus_addr); data_xfer() 565 qc->ap->hsm_task_state = HSM_ST_ERR; data_xfer() 568 spin_unlock_irqrestore(qc->ap->lock, flags); data_xfer() 582 status = ioread8(qc->ap->ioaddr.altstatus_addr); delayed_finish() 643 static void arasan_cf_freeze(struct ata_port *ap) arasan_cf_freeze() argument 645 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_freeze() 653 ata_sff_dma_pause(ap); arasan_cf_freeze() 654 ata_sff_freeze(ap); arasan_cf_freeze() 657 static void arasan_cf_error_handler(struct ata_port *ap) arasan_cf_error_handler() argument 659 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_error_handler() 668 return ata_sff_error_handler(ap); arasan_cf_error_handler() 674 struct ata_port *ap = qc->ap; arasan_cf_dma_start() local 682 ap->ops->sff_exec_command(ap, tf); arasan_cf_dma_start() 688 struct ata_port *ap = qc->ap; arasan_cf_qc_issue() local 689 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_qc_issue() 696 ata_wait_idle(ap); arasan_cf_qc_issue() 697 ata_sff_dev_select(ap, qc->dev->devno); arasan_cf_qc_issue() 698 ata_wait_idle(ap); arasan_cf_qc_issue() 705 ap->ops->sff_tf_load(ap, &qc->tf); arasan_cf_qc_issue() 709 ap->hsm_task_state = HSM_ST_LAST; arasan_cf_qc_issue() 720 static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev) arasan_cf_set_piomode() argument 722 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_set_piomode() 729 dev_err(ap->dev, "Unknown PIO mode\n"); arasan_cf_set_piomode() 746 static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev) arasan_cf_set_dmamode() argument 748 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_set_dmamode() 767 dev_err(ap->dev, "Unknown DMA mode\n"); arasan_cf_set_dmamode() 795 struct ata_port *ap; arasan_cf_probe() local 851 ap = host->ports[0]; arasan_cf_probe() 854 ap->ops = &arasan_cf_ops; arasan_cf_probe() 855 ap->pio_mask = ATA_PIO6; arasan_cf_probe() 856 ap->mwdma_mask = ATA_MWDMA4; arasan_cf_probe() 857 ap->udma_mask = ATA_UDMA6; arasan_cf_probe() 868 ap->ops->set_piomode = NULL; arasan_cf_probe() 869 ap->pio_mask = 0; arasan_cf_probe() 872 ap->mwdma_mask = 0; arasan_cf_probe() 874 ap->udma_mask = 0; arasan_cf_probe() 876 ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI; arasan_cf_probe() 878 ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT; arasan_cf_probe() 879 ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT; arasan_cf_probe() 880 ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR; arasan_cf_probe() 881 ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR; arasan_cf_probe() 882 ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC; arasan_cf_probe() 883 ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN; arasan_cf_probe() 884 ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL; arasan_cf_probe() 885 ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH; arasan_cf_probe() 886 ap->ioaddr.device_addr = acdev->vbase + ATA_SH; arasan_cf_probe() 887 ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD; arasan_cf_probe() 888 ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD; arasan_cf_probe() 889 ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR; arasan_cf_probe() 890 ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR; arasan_cf_probe() 892 ata_port_desc(ap, "phy_addr %llx virt_addr %p", arasan_cf_probe()
|
H A D | libata-scsi.c | 64 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 66 static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 119 struct ata_port *ap = ata_shost_to_port(shost); ata_scsi_lpm_store() local 136 spin_lock_irqsave(ap->lock, flags); ata_scsi_lpm_store() 138 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 139 ata_for_each_dev(dev, &ap->link, ENABLED) { ata_for_each_link() 147 ap->target_lpm_policy = policy; 148 ata_port_schedule_eh(ap); 150 spin_unlock_irqrestore(ap->lock, flags); 158 struct ata_port *ap = ata_shost_to_port(shost); ata_scsi_lpm_show() local 160 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) ata_scsi_lpm_show() 164 ata_lpm_policy_names[ap->target_lpm_policy]); ata_scsi_lpm_show() 174 struct ata_port *ap; ata_scsi_park_show() local 181 ap = ata_shost_to_port(sdev->host); ata_scsi_park_show() 183 spin_lock_irqsave(ap->lock, flags); ata_scsi_park_show() 184 dev = ata_scsi_find_dev(ap, sdev); ata_scsi_park_show() 196 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && ata_scsi_park_show() 204 spin_unlock_irq(ap->lock); ata_scsi_park_show() 214 struct ata_port *ap; ata_scsi_park_store() local 230 ap = ata_shost_to_port(sdev->host); ata_scsi_park_store() 232 spin_lock_irqsave(ap->lock, flags); ata_scsi_park_store() 233 dev = ata_scsi_find_dev(ap, sdev); ata_scsi_park_store() 252 ata_port_schedule_eh(ap); ata_scsi_park_store() 253 complete(&ap->park_req_pending); ata_scsi_park_store() 265 spin_unlock_irqrestore(ap->lock, flags); ata_scsi_park_store() 285 struct ata_port *ap = ata_shost_to_port(shost); ata_scsi_em_message_store() local 286 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) ata_scsi_em_message_store() 287 return ap->ops->em_store(ap, buf, count); ata_scsi_em_message_store() 296 struct ata_port *ap = ata_shost_to_port(shost); ata_scsi_em_message_show() local 298 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) ata_scsi_em_message_show() 299 return ap->ops->em_show(ap, buf); ata_scsi_em_message_show() 311 struct ata_port *ap = ata_shost_to_port(shost); ata_scsi_em_message_type_show() local 313 return snprintf(buf, 23, "%d\n", ap->em_message_type); ata_scsi_em_message_type_show() 324 struct ata_port *ap = ata_shost_to_port(sdev->host); ata_scsi_activity_show() local 325 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); ata_scsi_activity_show() 327 if (atadev && ap->ops->sw_activity_show && ata_scsi_activity_show() 328 (ap->flags & ATA_FLAG_SW_ACTIVITY)) ata_scsi_activity_show() 329 return ap->ops->sw_activity_show(atadev, buf); ata_scsi_activity_show() 338 struct ata_port *ap = ata_shost_to_port(sdev->host); ata_scsi_activity_store() local 339 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); ata_scsi_activity_store() 343 if (atadev && ap->ops->sw_activity_store && ata_scsi_activity_store() 344 (ap->flags & ATA_FLAG_SW_ACTIVITY)) { ata_scsi_activity_store() 348 rc = ap->ops->sw_activity_store(atadev, val); ata_scsi_activity_store() 415 struct ata_port *ap = ata_shost_to_port(sdev->host); ata_scsi_unlock_native_capacity() local 419 spin_lock_irqsave(ap->lock, flags); ata_scsi_unlock_native_capacity() 421 dev = ata_scsi_find_dev(ap, sdev); ata_scsi_unlock_native_capacity() 425 ata_port_schedule_eh(ap); ata_scsi_unlock_native_capacity() 428 spin_unlock_irqrestore(ap->lock, flags); ata_scsi_unlock_native_capacity() 429 ata_port_wait_eh(ap); ata_scsi_unlock_native_capacity() 434 * @ap: target port 444 static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev, ata_get_identity() argument 447 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); ata_get_identity() 666 static int ata_ioc32(struct ata_port *ap) ata_ioc32() argument 668 if (ap->flags & ATA_FLAG_PIO_DMA) ata_ioc32() 670 if (ap->pflags & ATA_PFLAG_PIO32) ata_ioc32() 675 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, ata_sas_scsi_ioctl() argument 684 spin_lock_irqsave(ap->lock, flags); ata_sas_scsi_ioctl() 685 val = ata_ioc32(ap); ata_sas_scsi_ioctl() 686 spin_unlock_irqrestore(ap->lock, flags); ata_sas_scsi_ioctl() 692 spin_lock_irqsave(ap->lock, flags); ata_sas_scsi_ioctl() 693 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { ata_sas_scsi_ioctl() 695 ap->pflags |= ATA_PFLAG_PIO32; ata_sas_scsi_ioctl() 697 ap->pflags &= ~ATA_PFLAG_PIO32; ata_sas_scsi_ioctl() 699 if (val != ata_ioc32(ap)) ata_sas_scsi_ioctl() 702 spin_unlock_irqrestore(ap->lock, flags); ata_sas_scsi_ioctl() 706 return ata_get_identity(ap, scsidev, arg); ata_sas_scsi_ioctl() 982 int verbose = qc->ap->ops->error_handler == NULL; ata_gen_passthru_sense() 994 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, ata_gen_passthru_sense() 1056 int verbose = qc->ap->ops->error_handler == NULL; ata_gen_ata_sense() 1071 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, ata_gen_ata_sense() 1210 struct ata_port *ap = ata_shost_to_port(sdev->host); ata_scsi_slave_config() local 1211 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); ata_scsi_slave_config() 1238 struct ata_port *ap = ata_shost_to_port(sdev->host); ata_scsi_slave_destroy() local 1243 if (!ap->ops->error_handler) ata_scsi_slave_destroy() 1246 spin_lock_irqsave(ap->lock, flags); ata_scsi_slave_destroy() 1247 dev = __ata_scsi_find_dev(ap, sdev); ata_scsi_slave_destroy() 1252 ata_port_schedule_eh(ap); ata_scsi_slave_destroy() 1254 spin_unlock_irqrestore(ap->lock, flags); ata_scsi_slave_destroy() 1263 * @ap: ATA port to which the device change the queue depth 1271 int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, __ata_change_queue_depth() argument 1280 dev = ata_scsi_find_dev(ap, sdev); __ata_change_queue_depth() 1285 spin_lock_irqsave(ap->lock, flags); __ata_change_queue_depth() 1291 spin_unlock_irqrestore(ap->lock, flags); __ata_change_queue_depth() 1321 struct ata_port *ap = ata_shost_to_port(sdev->host); ata_scsi_change_queue_depth() local 1323 return __ata_change_queue_depth(ap, sdev, queue_depth); ata_scsi_change_queue_depth() 1382 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && ata_scsi_start_stop_xlat() 1386 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && ata_scsi_start_stop_xlat() 1761 struct ata_port *ap = qc->ap; ata_scsi_qc_complete() local 1792 if (need_sense && !ap->ops->error_handler) ata_scsi_qc_complete() 1793 ata_dump_status(ap->print_id, &qc->result_tf); ata_scsi_qc_complete() 1829 struct ata_port *ap = dev->link->ap; ata_scsi_translate() local 1857 if (ap->ops->qc_defer) { ata_scsi_translate() 1858 if ((rc = ap->ops->qc_defer(qc))) ata_scsi_translate() 2601 static inline int ata_pio_use_silly(struct ata_port *ap) ata_pio_use_silly() argument 2603 return (ap->flags & ATA_FLAG_PIO_DMA); ata_pio_use_silly() 2608 struct ata_port *ap = qc->ap; atapi_request_sense() local 2616 if (ap->ops->sff_tf_read) atapi_request_sense() 2617 ap->ops->sff_tf_read(ap, &qc->tf); atapi_request_sense() 2638 if (ata_pio_use_silly(ap)) { atapi_request_sense() 2663 if (unlikely(qc->ap->ops->error_handler && atapi_qc_complete() 2834 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) ata_find_dev() argument 2836 if (!sata_pmp_attached(ap)) { ata_find_dev() 2837 if (likely(devno < ata_link_max_devices(&ap->link))) ata_find_dev() 2838 return &ap->link.device[devno]; ata_find_dev() 2840 if (likely(devno < ap->nr_pmp_links)) ata_find_dev() 2841 return &ap->pmp_link[devno].device[0]; ata_find_dev() 2847 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, __ata_scsi_find_dev() argument 2853 if (!sata_pmp_attached(ap)) { __ata_scsi_find_dev() 2863 return ata_find_dev(ap, devno); __ata_scsi_find_dev() 2868 * @ap: ATA port to which the device is attached 2883 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) ata_scsi_find_dev() argument 2885 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); ata_scsi_find_dev() 3424 * @ap: ATA port to which the command was being sent 3430 static inline void ata_scsi_dump_cdb(struct ata_port *ap, ata_scsi_dump_cdb() argument 3438 ap->print_id, ata_scsi_dump_cdb() 3515 struct ata_port *ap; ata_scsi_queuecmd() local 3521 ap = ata_shost_to_port(shost); ata_scsi_queuecmd() 3523 spin_lock_irqsave(ap->lock, irq_flags); ata_scsi_queuecmd() 3525 ata_scsi_dump_cdb(ap, cmd); ata_scsi_queuecmd() 3527 dev = ata_scsi_find_dev(ap, scsidev); ata_scsi_queuecmd() 3535 spin_unlock_irqrestore(ap->lock, irq_flags); ata_scsi_queuecmd() 3664 struct ata_port *ap = host->ports[i]; ata_scsi_add_hosts() local 3673 *(struct ata_port **)&shost->hostdata[0] = ap; ata_scsi_add_hosts() 3674 ap->scsi_host = shost; ata_scsi_add_hosts() 3677 shost->unique_id = ap->print_id; ata_scsi_add_hosts() 3694 rc = scsi_add_host_with_dma(ap->scsi_host, ata_scsi_add_hosts() 3695 &ap->tdev, ap->host->dev); ata_scsi_add_hosts() 3714 void ata_scsi_scan_host(struct ata_port *ap, int sync) ata_scsi_scan_host() argument 3722 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 3735 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, ata_for_each_dev() 3750 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 3779 ata_port_err(ap, 3783 queue_delayed_work(system_long_wq, &ap->hotplug_task, 3823 struct ata_port *ap = dev->link->ap; ata_scsi_remove_dev() local 3833 mutex_lock(&ap->scsi_host->scan_mutex); ata_scsi_remove_dev() 3834 spin_lock_irqsave(ap->lock, flags); ata_scsi_remove_dev() 3858 spin_unlock_irqrestore(ap->lock, flags); ata_scsi_remove_dev() 3859 mutex_unlock(&ap->scsi_host->scan_mutex); ata_scsi_remove_dev() 3872 struct ata_port *ap = link->ap; ata_scsi_handle_link_detach() local 3881 spin_lock_irqsave(ap->lock, flags); ata_for_each_dev() 3883 spin_unlock_irqrestore(ap->lock, flags); ata_for_each_dev() 3923 struct ata_port *ap = ata_scsi_hotplug() local 3927 if (ap->pflags & ATA_PFLAG_UNLOADING) { ata_scsi_hotplug() 3954 mutex_lock(&ap->scsi_scan_mutex); ata_scsi_hotplug() 3960 ata_scsi_handle_link_detach(&ap->link); ata_scsi_hotplug() 3961 if (ap->pmp_link) ata_scsi_hotplug() 3963 ata_scsi_handle_link_detach(&ap->pmp_link[i]); ata_scsi_hotplug() 3966 ata_scsi_scan_host(ap, 0); ata_scsi_hotplug() 3968 mutex_unlock(&ap->scsi_scan_mutex); ata_scsi_hotplug() 3991 struct ata_port *ap = ata_shost_to_port(shost); ata_scsi_user_scan() local 3995 if (!ap->ops->error_handler) ata_scsi_user_scan() 4001 if (!sata_pmp_attached(ap)) { ata_scsi_user_scan() 4011 spin_lock_irqsave(ap->lock, flags); ata_scsi_user_scan() 4016 ata_for_each_link(link, ap, EDGE) { ata_for_each_link() 4022 struct ata_device *dev = ata_find_dev(ap, devno); 4033 ata_port_schedule_eh(ap); 4034 spin_unlock_irqrestore(ap->lock, flags); 4035 ata_port_wait_eh(ap); 4037 spin_unlock_irqrestore(ap->lock, flags); 4054 struct ata_port *ap = ata_scsi_dev_rescan() local 4060 mutex_lock(&ap->scsi_scan_mutex); ata_scsi_dev_rescan() 4061 spin_lock_irqsave(ap->lock, flags); ata_scsi_dev_rescan() 4063 ata_for_each_link(link, ap, EDGE) { ata_for_each_dev() 4072 spin_unlock_irqrestore(ap->lock, flags); ata_for_each_dev() 4075 spin_lock_irqsave(ap->lock, flags); ata_for_each_dev() 4079 spin_unlock_irqrestore(ap->lock, flags); 4080 mutex_unlock(&ap->scsi_scan_mutex); 4100 struct ata_port *ap; ata_sas_port_alloc() local 4102 ap = ata_port_alloc(host); ata_sas_port_alloc() 4103 if (!ap) ata_sas_port_alloc() 4106 ap->port_no = 0; ata_sas_port_alloc() 4107 ap->lock = &host->lock; ata_sas_port_alloc() 4108 ap->pio_mask = port_info->pio_mask; ata_sas_port_alloc() 4109 ap->mwdma_mask = port_info->mwdma_mask; ata_sas_port_alloc() 4110 ap->udma_mask = port_info->udma_mask; ata_sas_port_alloc() 4111 ap->flags |= port_info->flags; ata_sas_port_alloc() 4112 ap->ops = port_info->port_ops; ata_sas_port_alloc() 4113 ap->cbl = ATA_CBL_SATA; ata_sas_port_alloc() 4115 return ap; ata_sas_port_alloc() 4121 * @ap: Port to initialize 4131 int ata_sas_port_start(struct ata_port *ap) ata_sas_port_start() argument 4137 if (!ap->ops->error_handler) ata_sas_port_start() 4138 ap->pflags &= ~ATA_PFLAG_FROZEN; ata_sas_port_start() 4145 * @ap: Port to shut down 4153 void ata_sas_port_stop(struct ata_port *ap) ata_sas_port_stop() argument 4160 * @ap: Port to probe 4165 void ata_sas_async_probe(struct ata_port *ap) ata_sas_async_probe() argument 4167 __ata_port_probe(ap); ata_sas_async_probe() 4171 int ata_sas_sync_probe(struct ata_port *ap) ata_sas_sync_probe() argument 4173 return ata_port_probe(ap); ata_sas_sync_probe() 4180 * @ap: SATA port to initialize 4189 int ata_sas_port_init(struct ata_port *ap) ata_sas_port_init() argument 4191 int rc = ap->ops->port_start(ap); ata_sas_port_init() 4195 ap->print_id = atomic_inc_return(&ata_print_id); ata_sas_port_init() 4202 * @ap: SATA port to destroy 4206 void ata_sas_port_destroy(struct ata_port *ap) ata_sas_port_destroy() argument 4208 if (ap->ops->port_stop) ata_sas_port_destroy() 4209 ap->ops->port_stop(ap); ata_sas_port_destroy() 4210 kfree(ap); ata_sas_port_destroy() 4217 * @ap: ATA port to which SCSI device is attached 4223 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) ata_sas_slave_configure() argument 4226 ata_scsi_dev_config(sdev, ap->link.device); ata_sas_slave_configure() 4234 * @ap: ATA port to which the command is being sent 4241 int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) ata_sas_queuecmd() argument 4245 ata_scsi_dump_cdb(ap, cmd); ata_sas_queuecmd() 4247 if (likely(ata_dev_enabled(ap->link.device))) ata_sas_queuecmd() 4248 rc = __ata_scsi_queuecmd(cmd, ap->link.device); ata_sas_queuecmd() 4257 int ata_sas_allocate_tag(struct ata_port *ap) ata_sas_allocate_tag() argument 4259 unsigned int max_queue = ap->host->n_tags; ata_sas_allocate_tag() 4262 for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { ata_sas_allocate_tag() 4269 if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) { ata_sas_allocate_tag() 4270 ap->sas_last_tag = tag; ata_sas_allocate_tag() 4277 void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) ata_sas_free_tag() argument 4279 clear_bit(tag, &ap->sas_tag_allocated); ata_sas_free_tag()
|
H A D | pata_sc1200.c | 79 * @ap: ATA interface 85 static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev) sc1200_set_piomode() argument 98 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sc1200_set_piomode() 100 unsigned int reg = 0x40 + 0x10 * ap->port_no; sc1200_set_piomode() 112 * @ap: ATA interface 119 static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev) sc1200_set_dmamode() argument 134 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sc1200_set_dmamode() 135 unsigned int reg = 0x40 + 0x10 * ap->port_no; sc1200_set_dmamode() 167 struct ata_port *ap = qc->ap; sc1200_qc_issue() local 169 struct ata_device *prev = ap->private_data; sc1200_qc_issue() 177 sc1200_set_dmamode(ap, adev); sc1200_qc_issue() 192 struct ata_host *host = qc->ap->host; sc1200_qc_defer() 193 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; sc1200_qc_defer()
|
H A D | pata_triflex.c | 62 struct ata_port *ap = link->ap; triflex_prereset() local 63 struct pci_dev *pdev = to_pci_dev(ap->host->dev); triflex_prereset() 65 if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) triflex_prereset() 75 * @ap: ATA interface 85 static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed) triflex_load_timing() argument 87 struct pci_dev *pdev = to_pci_dev(ap->host->dev); triflex_load_timing() 90 int channel_offset = ap->port_no ? 0x74: 0x70; triflex_load_timing() 131 * @ap: ATA interface 138 static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev) triflex_set_piomode() argument 140 triflex_load_timing(ap, adev, adev->pio_mode); triflex_set_piomode() 156 triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode); triflex_bmdma_start() 162 * @ap: ATA interface 173 triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode); triflex_bmdma_stop()
|
H A D | pata_at91.c | 262 static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev) pata_at91_set_piomode() argument 264 struct at91_ide_info *info = ap->host->private_data; pata_at91_set_piomode() 271 dev_warn(ap->dev, "Failed to compute ATA timing %d, " pata_at91_set_piomode() 275 set_smc_timing(ap->dev, adev, info, &timing); pata_at91_set_piomode() 281 struct at91_ide_info *info = dev->link->ap->host->private_data; pata_at91_data_xfer_noirq() 322 struct ata_port *ap; pata_at91_probe() local 351 ap = host->ports[0]; pata_at91_probe() 352 ap->ops = &pata_at91_port_ops; pata_at91_probe() 353 ap->flags |= ATA_FLAG_SLAVE_POSS; pata_at91_probe() 354 ap->pio_mask = ATA_PIO4; pata_at91_probe() 357 ap->flags |= ATA_FLAG_PIO_POLLING; pata_at91_probe() 358 ata_port_desc(ap, "no IRQ, using PIO polling"); pata_at91_probe() 398 ap->ioaddr.cmd_addr = info->ide_addr; pata_at91_probe() 399 ap->ioaddr.ctl_addr = info->alt_addr + 0x06; pata_at91_probe() 400 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; pata_at91_probe() 402 ata_sff_std_ports(&ap->ioaddr); pata_at91_probe() 404 ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx", pata_at91_probe()
|
H A D | pata_cypress.c | 45 * @ap: ATA interface 51 static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev) cy82c693_set_piomode() argument 53 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cy82c693_set_piomode() 94 * @ap: ATA interface 100 static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev) cy82c693_set_dmamode() argument 102 int reg = CY82_INDEX_CHANNEL0 + ap->port_no; cy82c693_set_dmamode()
|
H A D | pata_cs5520.c | 62 * @ap: ATA port 69 static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio) cs5520_set_timings() argument 71 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cs5520_set_timings() 77 pci_write_config_byte(pdev, 0x62 + ap->port_no, cs5520_set_timings() 82 pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave, cs5520_set_timings() 86 pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave, cs5520_set_timings() 93 * @ap: ATA port 100 static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev) cs5520_set_piomode() argument 102 cs5520_set_timings(ap, adev, adev->pio_mode); cs5520_set_piomode() 216 struct ata_port *ap = host->ports[i]; cs5520_init_one() local 218 if (ata_port_is_dummy(ap)) cs5520_init_one() 221 rc = devm_request_irq(&pdev->dev, irq[ap->port_no], cs5520_init_one() 226 ata_port_desc(ap, "irq %d", irq[i]); cs5520_init_one()
|
H A D | pata_radisys.c | 30 * @ap: ATA port 39 static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev) radisys_set_piomode() argument 42 struct pci_dev *dev = to_pci_dev(ap->host->dev); radisys_set_piomode() 76 ap->private_data = adev; radisys_set_piomode() 81 * @ap: Port whose timings we are configuring 90 static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev) radisys_set_dmamode() argument 92 struct pci_dev *dev = to_pci_dev(ap->host->dev); radisys_set_dmamode() 153 ap->private_data = adev; radisys_set_dmamode() 169 struct ata_port *ap = qc->ap; radisys_qc_issue() local 172 if (adev != ap->private_data) { radisys_qc_issue() 176 radisys_set_dmamode(ap, adev); radisys_qc_issue() 178 radisys_set_piomode(ap, adev); radisys_qc_issue()
|
H A D | pata_sis.c | 87 return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno); sis_old_port_base() 100 struct ata_port *ap = adev->link->ap; sis_port_base() local 101 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_port_base() 110 return port + (8 * ap->port_no) + (4 * adev->devno); sis_port_base() 115 * @ap: Port 122 static int sis_133_cable_detect(struct ata_port *ap) sis_133_cable_detect() argument 124 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_133_cable_detect() 128 pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp); sis_133_cable_detect() 136 * @ap: Port 142 static int sis_66_cable_detect(struct ata_port *ap) sis_66_cable_detect() argument 144 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_66_cable_detect() 149 tmp >>= ap->port_no; sis_66_cable_detect() 171 struct ata_port *ap = link->ap; sis_pre_reset() local 172 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_pre_reset() 174 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) sis_pre_reset() 186 * @ap: Port 194 static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev) sis_set_fifo() argument 196 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_set_fifo() 200 mask <<= (2 * ap->port_no); sis_set_fifo() 215 * @ap: Port whose timings we are configuring 226 static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev) sis_old_set_piomode() argument 228 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_old_set_piomode() 236 sis_set_fifo(ap, adev); sis_old_set_piomode() 253 * @ap: Port whose timings we are configuring 263 static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev) sis_100_set_piomode() argument 265 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_100_set_piomode() 271 sis_set_fifo(ap, adev); sis_100_set_piomode() 278 * @ap: Port whose timings we are configuring 288 static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev) sis_133_set_piomode() argument 290 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_133_set_piomode() 310 sis_set_fifo(ap, adev); sis_133_set_piomode() 325 * @ap: Port whose timings we are configuring 336 static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev) sis_old_set_dmamode() argument 338 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_old_set_dmamode() 364 * @ap: Port whose timings we are configuring 375 static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev) sis_66_set_dmamode() argument 377 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_66_set_dmamode() 404 * @ap: Port whose timings we are configuring 414 static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev) sis_100_set_dmamode() argument 416 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_100_set_dmamode() 438 * @ap: Port whose timings we are configuring 448 static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev) sis_133_early_set_dmamode() argument 450 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_133_early_set_dmamode() 472 * @ap: Port whose timings we are configuring 481 static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev) sis_133_set_dmamode() argument 483 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_133_set_dmamode() 529 struct ata_port *ap = adev->link->ap; sis_133_mode_filter() local 530 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sis_133_mode_filter()
|
H A D | pata_hpt37x.c | 203 * @ap: ATA port 210 static u32 hpt37x_find_mode(struct ata_port *ap, int speed) hpt37x_find_mode() argument 212 struct hpt_clock *clocks = ap->host->private_data; hpt37x_find_mode() 329 * @ap: ATA port to detect on 334 static int hpt37x_cable_detect(struct ata_port *ap) hpt37x_cable_detect() argument 336 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt37x_cable_detect() 349 if (ata66 & (2 >> ap->port_no)) hpt37x_cable_detect() 357 * @ap: ATA port to detect on 362 static int hpt374_fn1_cable_detect(struct ata_port *ap) hpt374_fn1_cable_detect() argument 364 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt374_fn1_cable_detect() 365 unsigned int mcrbase = 0x50 + 4 * ap->port_no; hpt374_fn1_cable_detect() 377 if (ata66 & (2 >> ap->port_no)) hpt374_fn1_cable_detect() 393 struct ata_port *ap = link->ap; hpt37x_pre_reset() local 394 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt37x_pre_reset() 400 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) hpt37x_pre_reset() 404 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); hpt37x_pre_reset() 410 static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev, hpt370_set_mode() argument 413 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt370_set_mode() 418 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); hpt370_set_mode() 419 addr2 = 0x51 + 4 * ap->port_no; hpt370_set_mode() 435 timing = hpt37x_find_mode(ap, mode); hpt370_set_mode() 443 * @ap: ATA interface 449 static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) hpt370_set_piomode() argument 451 hpt370_set_mode(ap, adev, adev->pio_mode); hpt370_set_piomode() 456 * @ap: ATA interface 462 static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) hpt370_set_dmamode() argument 464 hpt370_set_mode(ap, adev, adev->dma_mode); hpt370_set_dmamode() 476 struct ata_port *ap = qc->ap; hpt370_bmdma_stop() local 477 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt370_bmdma_stop() 478 void __iomem *bmdma = ap->ioaddr.bmdma_addr; hpt370_bmdma_stop() 488 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); hpt370_bmdma_stop() 498 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); hpt370_bmdma_stop() 504 static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev, hpt372_set_mode() argument 507 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt372_set_mode() 512 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); hpt372_set_mode() 513 addr2 = 0x51 + 4 * ap->port_no; hpt372_set_mode() 528 timing = hpt37x_find_mode(ap, mode); hpt372_set_mode() 537 * @ap: ATA interface 543 static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) hpt372_set_piomode() argument 545 hpt372_set_mode(ap, adev, adev->pio_mode); hpt372_set_piomode() 550 * @ap: ATA interface 556 static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) hpt372_set_dmamode() argument 558 hpt372_set_mode(ap, adev, adev->dma_mode); hpt372_set_dmamode() 570 struct ata_port *ap = qc->ap; hpt37x_bmdma_stop() local 571 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt37x_bmdma_stop() 572 int mscreg = 0x50 + 4 * ap->port_no; hpt37x_bmdma_stop() 577 if (bwsr_stat & (1 << ap->port_no)) hpt37x_bmdma_stop()
|
H A D | pata_sch.c | 53 static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev); 54 static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev); 100 * @ap: Port whose timings we are configuring 109 static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev) sch_set_piomode() argument 112 struct pci_dev *dev = to_pci_dev(ap->host->dev); sch_set_piomode() 129 * @ap: Port whose timings we are configuring 138 static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev) sch_set_dmamode() argument 141 struct pci_dev *dev = to_pci_dev(ap->host->dev); sch_set_dmamode()
|
H A D | pata_cs5530.c | 38 static void __iomem *cs5530_port_base(struct ata_port *ap) cs5530_port_base() argument 40 unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr; cs5530_port_base() 42 return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no); cs5530_port_base() 47 * @ap: ATA interface 54 static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev) cs5530_set_piomode() argument 60 void __iomem *base = cs5530_port_base(ap); cs5530_set_piomode() 77 * @ap: ATA interface 85 static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) cs5530_set_dmamode() argument 87 void __iomem *base = cs5530_port_base(ap); cs5530_set_dmamode() 124 reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); cs5530_set_dmamode() 126 iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); cs5530_set_dmamode() 130 ap->private_data = adev; cs5530_set_dmamode() 145 struct ata_port *ap = qc->ap; cs5530_qc_issue() local 147 struct ata_device *prev = ap->private_data; cs5530_qc_issue() 155 cs5530_set_dmamode(ap, adev); cs5530_qc_issue()
|
H A D | pata_pcmcia.c | 107 struct ata_port *ap = dev->link->ap; ata_data_xfer_8bit() local 110 ioread8_rep(ap->ioaddr.data_addr, buf, buflen); ata_data_xfer_8bit() 112 iowrite8_rep(ap->ioaddr.data_addr, buf, buflen); ata_data_xfer_8bit() 130 struct ata_port *ap; pcmcia_8bit_drain_fifo() local 136 ap = qc->ap; pcmcia_8bit_drain_fifo() 139 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) pcmcia_8bit_drain_fifo() 141 ioread8(ap->ioaddr.data_addr); pcmcia_8bit_drain_fifo() 144 ata_port_warn(ap, "drained %d bytes to clear DRQ\n", count); pcmcia_8bit_drain_fifo() 202 struct ata_port *ap; pcmcia_init_one() local 266 ap = host->ports[p]; pcmcia_init_one() 268 ap->ops = ops; pcmcia_init_one() 269 ap->pio_mask = ATA_PIO0; /* ISA so PIO 0 cycles */ pcmcia_init_one() 270 ap->flags |= ATA_FLAG_SLAVE_POSS; pcmcia_init_one() 271 ap->ioaddr.cmd_addr = io_addr + 0x10 * p; pcmcia_init_one() 272 ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p; pcmcia_init_one() 273 ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p; pcmcia_init_one() 274 ata_sff_std_ports(&ap->ioaddr); pcmcia_init_one() 276 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); pcmcia_init_one()
|
H A D | acard-ahci.c | 77 static int acard_ahci_port_start(struct ata_port *ap); 262 struct ata_port *ap = qc->ap; acard_ahci_qc_prep() local 263 struct ahci_port_priv *pp = ap->private_data; acard_ahci_qc_prep() 302 struct ahci_port_priv *pp = qc->ap->private_data; acard_ahci_qc_fill_rtf() 324 static int acard_ahci_port_start(struct ata_port *ap) acard_ahci_port_start() argument 326 struct ahci_host_priv *hpriv = ap->host->private_data; acard_ahci_port_start() 327 struct device *dev = ap->host->dev; acard_ahci_port_start() 338 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { acard_ahci_port_start() 339 void __iomem *port_mmio = ahci_port_base(ap); acard_ahci_port_start() 345 ap->port_no); acard_ahci_port_start() 349 ap->port_no); acard_ahci_port_start() 397 ap->private_data = pp; acard_ahci_port_start() 400 return ahci_port_resume(ap); acard_ahci_port_start() 473 struct ata_port *ap = host->ports[i]; acard_ahci_init_one() local 475 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); acard_ahci_init_one() 476 ata_port_pbar_desc(ap, AHCI_PCI_BAR, acard_ahci_init_one() 477 0x100 + ap->port_no * 0x80, "port"); acard_ahci_init_one() 481 ap->pm_policy = NOT_AVAILABLE; acard_ahci_init_one() 485 ap->ops = &ata_dummy_port_ops; acard_ahci_init_one()
|
H A D | pata_ali.c | 84 * @ap: ATA port 89 static int ali_c2_cable_detect(struct ata_port *ap) ali_c2_cable_detect() argument 91 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ali_c2_cable_detect() 103 if (ata66 & (1 << ap->port_no)) ali_c2_cable_detect() 111 * @ap: ALi ATA port 132 * @ap: ALi channel to control 141 static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on) ali_fifo_control() argument 143 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ali_fifo_control() 144 int pio_fifo = 0x54 + ap->port_no; ali_fifo_control() 160 * @ap: ALi channel to load 170 static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra) ali_program_modes() argument 172 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ali_program_modes() 173 int cas = 0x58 + 4 * ap->port_no; /* Command timing */ ali_program_modes() 174 int cbt = 0x59 + 4 * ap->port_no; /* Command timing */ ali_program_modes() 175 int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */ ali_program_modes() 176 int udmat = 0x56 + ap->port_no; /* UDMA timing */ ali_program_modes() 201 * @ap: ATA interface 207 static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev) ali_set_piomode() argument 226 ali_fifo_control(ap, adev, 0x00); ali_set_piomode() 227 ali_program_modes(ap, adev, &t, 0); ali_set_piomode() 229 ali_fifo_control(ap, adev, 0x05); ali_set_piomode() 235 * @ap: ATA interface 241 static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev) ali_set_dmamode() argument 247 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ali_set_dmamode() 251 ali_fifo_control(ap, adev, 0x08); ali_set_dmamode() 254 ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]); ali_set_dmamode() 272 ali_program_modes(ap, adev, &t, 0); ali_set_dmamode() 344 int port_bit = 4 << link->ap->port_no; ali_c2_c3_postreset()
|
H A D | pata_marvell.c | 69 struct ata_port *ap = link->ap; marvell_pre_reset() local 70 struct pci_dev *pdev = to_pci_dev(ap->host->dev); marvell_pre_reset() 72 if (pdev->device == 0x6145 && ap->port_no == 0 && marvell_pre_reset() 79 static int marvell_cable_detect(struct ata_port *ap) marvell_cable_detect() argument 82 switch(ap->port_no) marvell_cable_detect() 85 if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) marvell_cable_detect()
|
H A D | pata_hpt366.c | 115 * @ap: ATA port 122 static u32 hpt36x_find_mode(struct ata_port *ap, int speed) hpt36x_find_mode() argument 124 struct hpt_clock *clocks = ap->host->private_data; hpt36x_find_mode() 216 static int hpt36x_cable_detect(struct ata_port *ap) hpt36x_cable_detect() argument 218 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt36x_cable_detect() 231 static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev, hpt366_set_mode() argument 234 struct pci_dev *pdev = to_pci_dev(ap->host->dev); hpt366_set_mode() 246 t = hpt36x_find_mode(ap, mode); hpt366_set_mode() 260 * @ap: ATA interface 266 static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) hpt366_set_piomode() argument 268 hpt366_set_mode(ap, adev, adev->pio_mode); hpt366_set_piomode() 273 * @ap: ATA interface 280 static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) hpt366_set_dmamode() argument 282 hpt366_set_mode(ap, adev, adev->dma_mode); hpt366_set_dmamode()
|
H A D | ahci_xgene.c | 113 * @ap : ATA port of interest. 119 static int xgene_ahci_poll_reg_val(struct ata_port *ap, xgene_ahci_poll_reg_val() argument 131 ata_msleep(ap, interval); xgene_ahci_poll_reg_val() 140 * @ap : ATA port of interest 145 static int xgene_ahci_restart_engine(struct ata_port *ap) xgene_ahci_restart_engine() argument 147 struct ahci_host_priv *hpriv = ap->host->private_data; xgene_ahci_restart_engine() 148 struct ahci_port_priv *pp = ap->private_data; xgene_ahci_restart_engine() 149 void __iomem *port_mmio = ahci_port_base(ap); xgene_ahci_restart_engine() 158 if (xgene_ahci_poll_reg_val(ap, port_mmio + xgene_ahci_restart_engine() 162 ahci_stop_engine(ap); xgene_ahci_restart_engine() 163 ahci_start_fis_rx(ap); xgene_ahci_restart_engine() 175 hpriv->start_engine(ap); xgene_ahci_restart_engine() 198 struct ata_port *ap = qc->ap; xgene_ahci_qc_issue() local 199 struct ahci_host_priv *hpriv = ap->host->private_data; xgene_ahci_qc_issue() 203 void *port_mmio = ahci_port_base(ap); xgene_ahci_qc_issue() 209 if (ctx->class[ap->port_no] == ATA_DEV_PMP) { xgene_ahci_qc_issue() 216 if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || xgene_ahci_qc_issue() 217 (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) || xgene_ahci_qc_issue() 218 (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART))) xgene_ahci_qc_issue() 219 xgene_ahci_restart_engine(ap); xgene_ahci_qc_issue() 224 ctx->last_cmd[ap->port_no] = qc->tf.command; xgene_ahci_qc_issue() 361 struct ata_port *ap = link->ap; xgene_ahci_do_hardreset() local 362 struct ahci_host_priv *hpriv = ap->host->private_data; xgene_ahci_do_hardreset() 364 struct ahci_port_priv *pp = ap->private_data; xgene_ahci_do_hardreset() 366 void __iomem *port_mmio = ahci_port_base(ap); xgene_ahci_do_hardreset() 400 struct ata_port *ap = link->ap; xgene_ahci_hardreset() local 401 struct ahci_host_priv *hpriv = ap->host->private_data; xgene_ahci_hardreset() 402 void __iomem *port_mmio = ahci_port_base(ap); xgene_ahci_hardreset() 418 ahci_stop_engine(ap); xgene_ahci_hardreset() 429 hpriv->start_engine(ap); xgene_ahci_hardreset() 432 *class = ahci_dev_classify(ap); xgene_ahci_hardreset() 462 struct ata_port *ap = link->ap; xgene_ahci_pmp_softreset() local 464 void *port_mmio = ahci_port_base(ap); xgene_ahci_pmp_softreset() 506 struct ata_port *ap = link->ap; xgene_ahci_softreset() local 507 struct ahci_host_priv *hpriv = ap->host->private_data; xgene_ahci_softreset() 509 void *port_mmio = ahci_port_base(ap); xgene_ahci_softreset() 530 ctx->class[ap->port_no] = *class; xgene_ahci_softreset()
|
H A D | pata_macio.c | 368 static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device) pata_macio_apply_timings() argument 370 struct pata_macio_priv *priv = ap->private_data; pata_macio_apply_timings() 371 void __iomem *rbase = ap->ioaddr.cmd_addr; pata_macio_apply_timings() 382 static void pata_macio_dev_select(struct ata_port *ap, unsigned int device) pata_macio_dev_select() argument 384 ata_sff_dev_select(ap, device); pata_macio_dev_select() 387 pata_macio_apply_timings(ap, device); pata_macio_dev_select() 390 static void pata_macio_set_timings(struct ata_port *ap, pata_macio_set_timings() argument 393 struct pata_macio_priv *priv = ap->private_data; pata_macio_set_timings() 435 pata_macio_apply_timings(ap, adev->devno); pata_macio_set_timings() 472 static int pata_macio_cable_detect(struct ata_port *ap) pata_macio_cable_detect() argument 474 struct pata_macio_priv *priv = ap->private_data; pata_macio_cable_detect() 513 struct ata_port *ap = qc->ap; pata_macio_qc_prep() local 514 struct pata_macio_priv *priv = ap->private_data; pata_macio_qc_prep() 571 static void pata_macio_freeze(struct ata_port *ap) pata_macio_freeze() argument 573 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; pata_macio_freeze() 584 ata_sff_freeze(ap); pata_macio_freeze() 590 struct ata_port *ap = qc->ap; pata_macio_bmdma_setup() local 591 struct pata_macio_priv *priv = ap->private_data; pata_macio_bmdma_setup() 592 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; pata_macio_bmdma_setup() 605 void __iomem *rbase = ap->ioaddr.cmd_addr; pata_macio_bmdma_setup() 614 ap->ops->sff_exec_command(ap, &qc->tf); pata_macio_bmdma_setup() 619 struct ata_port *ap = qc->ap; pata_macio_bmdma_start() local 620 struct pata_macio_priv *priv = ap->private_data; pata_macio_bmdma_start() 621 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; pata_macio_bmdma_start() 632 struct ata_port *ap = qc->ap; pata_macio_bmdma_stop() local 633 struct pata_macio_priv *priv = ap->private_data; pata_macio_bmdma_stop() 634 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; pata_macio_bmdma_stop() 645 static u8 pata_macio_bmdma_status(struct ata_port *ap) pata_macio_bmdma_status() argument 647 struct pata_macio_priv *priv = ap->private_data; pata_macio_bmdma_status() 648 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; pata_macio_bmdma_status() 704 static int pata_macio_port_start(struct ata_port *ap) pata_macio_port_start() argument 706 struct pata_macio_priv *priv = ap->private_data; pata_macio_port_start() 708 if (ap->ioaddr.bmdma_addr == NULL) pata_macio_port_start() 722 ap->ioaddr.bmdma_addr = NULL; pata_macio_port_start() 723 ap->mwdma_mask = 0; pata_macio_port_start() 724 ap->udma_mask = 0; pata_macio_port_start() 729 static void pata_macio_irq_clear(struct ata_port *ap) pata_macio_irq_clear() argument 731 struct pata_macio_priv *priv = ap->private_data; pata_macio_irq_clear() 796 struct ata_port *ap = ata_shost_to_port(sdev->host); pata_macio_slave_config() local 797 struct pata_macio_priv *priv = ap->private_data; pata_macio_slave_config() 808 dev = &ap->link.device[sdev->id]; pata_macio_slave_config() 1229 struct ata_port *ap; pata_macio_mb_event() local 1236 ap = host->ports[0]; pata_macio_mb_event() 1237 spin_lock_irqsave(ap->lock, flags); pata_macio_mb_event() 1238 ehi = &ap->link.eh_info; pata_macio_mb_event() 1242 ata_port_freeze(ap); pata_macio_mb_event() 1245 ata_for_each_dev(dev, &ap->link, ALL) pata_macio_mb_event() 1247 ata_port_abort(ap); pata_macio_mb_event() 1249 spin_unlock_irqrestore(ap->lock, flags); pata_macio_mb_event()
|
H A D | pata_cs5536.c | 122 struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev); cs5536_program_dtc() 134 * @ap: Port to detect on 141 static int cs5536_cable_detect(struct ata_port *ap) cs5536_cable_detect() argument 143 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cs5536_cable_detect() 156 * @ap: ATA interface 160 static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev) cs5536_set_piomode() argument 174 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cs5536_set_piomode() 199 * @ap: ATA interface 204 static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev) cs5536_set_dmamode() argument 214 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cs5536_set_dmamode()
|
H A D | ata_piix.c | 502 static int piix_port_start(struct ata_port *ap) piix_port_start() argument 504 if (!(ap->flags & PIIX_FLAG_PIO16)) piix_port_start() 505 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; piix_port_start() 507 return ata_bmdma_port_start(ap); piix_port_start() 512 * @ap: Port for which cable detect info is desired 521 static int ich_pata_cable_detect(struct ata_port *ap) ich_pata_cable_detect() argument 523 struct pci_dev *pdev = to_pci_dev(ap->host->dev); ich_pata_cable_detect() 524 struct piix_host_priv *hpriv = ap->host->private_data; ich_pata_cable_detect() 539 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; ich_pata_cable_detect() 555 struct ata_port *ap = link->ap; piix_pata_prereset() local 556 struct pci_dev *pdev = to_pci_dev(ap->host->dev); piix_pata_prereset() 558 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) piix_pata_prereset() 565 static void piix_set_timings(struct ata_port *ap, struct ata_device *adev, piix_set_timings() argument 568 struct pci_dev *dev = to_pci_dev(ap->host->dev); piix_set_timings() 571 unsigned int master_port= ap->port_no ? 0x42 : 0x40; piix_set_timings() 618 slave_data &= (ap->port_no ? 0x0f : 0xf0); piix_set_timings() 621 << (ap->port_no ? 4 : 0); piix_set_timings() 642 if (ap->udma_mask) { piix_set_timings() 644 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); piix_set_timings() 653 * @ap: Port whose timings we are configuring 662 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) piix_set_piomode() argument 664 piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0); piix_set_piomode() 669 * @ap: Port whose timings we are configuring 679 static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) do_pata_set_dmamode() argument 681 struct pci_dev *dev = to_pci_dev(ap->host->dev); do_pata_set_dmamode() 684 int devid = adev->devno + 2 * ap->port_no; do_pata_set_dmamode() 742 piix_set_timings(ap, adev, pio); do_pata_set_dmamode() 748 * @ap: Port whose timings we are configuring 757 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) piix_set_dmamode() argument 759 do_pata_set_dmamode(ap, adev, 0); piix_set_dmamode() 764 * @ap: Port whose timings we are configuring 773 static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) ich_set_dmamode() argument 775 do_pata_set_dmamode(ap, adev, 1); ich_set_dmamode() 794 struct ata_port *ap = link->ap; piix_sidpr_sel() local 795 struct piix_host_priv *hpriv = ap->host->private_data; piix_sidpr_sel() 797 iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg], piix_sidpr_sel() 804 struct piix_host_priv *hpriv = link->ap->host->private_data; piix_sidpr_scr_read() 817 struct piix_host_priv *hpriv = link->ap->host->private_data; piix_sidpr_scr_write() 833 static bool piix_irq_check(struct ata_port *ap) piix_irq_check() argument 835 if (unlikely(!ap->ioaddr.bmdma_addr)) piix_irq_check() 838 return ap->ops->bmdma_status(ap) & ATA_DMA_INTR; piix_irq_check() 1072 static u8 piix_vmw_bmdma_status(struct ata_port *ap) piix_vmw_bmdma_status() argument 1074 return ata_bmdma_status(ap) & ~ATA_DMA_ERR; piix_vmw_bmdma_status() 1503 struct ata_port *ap = host->ports[i]; piix_init_sidpr() local 1505 ap->ops = &piix_sidpr_sata_ops; piix_init_sidpr() 1507 if (ap->flags & ATA_FLAG_SLAVE_POSS) { piix_init_sidpr() 1508 rc = ata_slave_link_init(ap); piix_init_sidpr()
|
H A D | pata_piccolo.c | 31 static void tosh_set_piomode(struct ata_port *ap, struct ata_device *adev) tosh_set_piomode() argument 36 struct pci_dev *pdev = to_pci_dev(ap->host->dev); tosh_set_piomode() 44 static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev) tosh_set_dmamode() argument 46 struct pci_dev *pdev = to_pci_dev(ap->host->dev); tosh_set_dmamode()
|
H A D | libata-transport.c | 209 struct ata_port *ap = transport_class_to_port(dev); \ 211 return snprintf(buf, 20, format_string, cast ap->field); \ 256 void ata_tport_delete(struct ata_port *ap) ata_tport_delete() argument 258 struct device *dev = &ap->tdev; ata_tport_delete() 260 ata_tlink_delete(&ap->link); ata_tport_delete() 271 * @ap: existing ata_port structure 279 struct ata_port *ap) ata_tport_add() 282 struct device *dev = &ap->tdev; ata_tport_add() 289 dev_set_name(dev, "ata%d", ap->print_id); ata_tport_add() 291 ata_acpi_bind_port(ap); ata_tport_add() 305 error = ata_tlink_add(&ap->link); ata_tport_add() 408 struct ata_port *ap = link->ap; ata_tlink_add() local 413 dev->parent = get_device(&ap->tdev); ata_tlink_add() 416 dev_set_name(dev, "link%d", ap->print_id); ata_tlink_add() 418 dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); ata_tlink_add() 660 struct ata_port *ap = link->ap; ata_tdev_add() local 667 dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); ata_tdev_add() 669 dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp); ata_tdev_add() 278 ata_tport_add(struct device *parent, struct ata_port *ap) ata_tport_add() argument
|
H A D | sata_highbank.c | 140 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state, ecx_transmit_led_message() argument 143 struct ahci_host_priv *hpriv = ap->host->private_data; ecx_transmit_led_message() 145 struct ahci_port_priv *pp = ap->private_data; ecx_transmit_led_message() 162 ecx_parse_sgpio(pdata, ap->port_no, state); ecx_transmit_led_message() 403 struct ata_port *ap = link->ap; ahci_highbank_hardreset() local 404 struct ahci_port_priv *pp = ap->private_data; ahci_highbank_hardreset() 405 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_highbank_hardreset() 413 ahci_stop_engine(ap); ahci_highbank_hardreset() 421 highbank_cphy_disable_overrides(link->ap->port_no); ahci_highbank_hardreset() 423 highbank_cphy_override_lane(link->ap->port_no); ahci_highbank_hardreset() 434 hpriv->start_engine(ap); ahci_highbank_hardreset() 437 *class = ahci_dev_classify(ap); ahci_highbank_hardreset() 550 struct ata_port *ap = host->ports[i]; ahci_highbank_probe() local 552 ata_port_desc(ap, "mmio %pR", mem); ahci_highbank_probe() 553 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); ahci_highbank_probe() 556 if (ap->flags & ATA_FLAG_EM) ahci_highbank_probe() 557 ap->em_message_type = hpriv->em_msg_type; ahci_highbank_probe() 561 ap->ops = &ata_dummy_port_ops; ahci_highbank_probe()
|
H A D | pata_cs5535.c | 71 * @ap: Port to detect on 77 static int cs5535_cable_detect(struct ata_port *ap) cs5535_cable_detect() argument 80 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cs5535_cable_detect() 91 * @ap: ATA interface 97 static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev) cs5535_set_piomode() argument 131 * @ap: ATA interface 136 static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev) cs5535_set_dmamode() argument
|
H A D | sata_uli.c | 103 static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) get_scr_cfg_addr() argument 105 struct uli_priv *hpriv = ap->host->private_data; get_scr_cfg_addr() 106 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); get_scr_cfg_addr() 111 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); uli_scr_cfg_read() 112 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); uli_scr_cfg_read() 121 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); uli_scr_cfg_write() 122 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr); uli_scr_cfg_write()
|
H A D | ahci.h | 242 /* ap->flags bits */ 349 void (*start_engine)(struct ata_port *ap); 373 unsigned int ahci_dev_classify(struct ata_port *ap); 386 int ahci_stop_engine(struct ata_port *ap); 387 void ahci_start_fis_rx(struct ata_port *ap); 388 void ahci_start_engine(struct ata_port *ap); 390 int ahci_kick_engine(struct ata_port *ap); 391 int ahci_port_resume(struct ata_port *ap); 398 void ahci_error_handler(struct ata_port *ap); 409 static inline void __iomem *ahci_port_base(struct ata_port *ap) ahci_port_base() argument 411 return __ahci_port_base(ap->host, ap->port_no); ahci_port_base()
|
H A D | ahci_imx.c | 296 static void ahci_imx_error_handler(struct ata_port *ap) ahci_imx_error_handler() argument 300 struct ata_host *host = dev_get_drvdata(ap->dev); ahci_imx_error_handler() 305 ahci_error_handler(ap); ahci_imx_error_handler() 312 ata_for_each_dev(dev, &ap->link, ENABLED) ahci_imx_error_handler() 324 dev_info(ap->dev, "no device found, disabling link.\n"); ahci_imx_error_handler() 325 dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n"); ahci_imx_error_handler() 331 struct ata_port *ap = link->ap; ahci_imx_softreset() local 332 struct ata_host *host = dev_get_drvdata(ap->dev); ahci_imx_softreset()
|
H A D | ata_generic.c | 53 struct ata_port *ap = link->ap; generic_set_mode() local 54 const struct pci_device_id *id = ap->host->private_data; generic_set_mode() 60 } else if (ap->ioaddr.bmdma_addr) { generic_set_mode() 62 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); generic_set_mode()
|
H A D | ahci.c | 667 struct ata_port *ap = link->ap; ahci_vt8251_hardreset() local 668 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_vt8251_hardreset() 674 ahci_stop_engine(ap); ahci_vt8251_hardreset() 679 hpriv->start_engine(ap); ahci_vt8251_hardreset() 692 struct ata_port *ap = link->ap; ahci_p5wdh_hardreset() local 693 struct ahci_port_priv *pp = ap->private_data; ahci_p5wdh_hardreset() 694 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_p5wdh_hardreset() 700 ahci_stop_engine(ap); ahci_p5wdh_hardreset() 710 hpriv->start_engine(ap); ahci_p5wdh_hardreset() 729 ahci_kick_engine(ap); ahci_p5wdh_hardreset() 753 struct ata_port *ap = link->ap; ahci_avn_hardreset() local 754 struct ahci_port_priv *pp = ap->private_data; ahci_avn_hardreset() 755 struct ahci_host_priv *hpriv = ap->host->private_data; ahci_avn_hardreset() 764 ahci_stop_engine(ap); ahci_avn_hardreset() 769 int port = ap->port_no; ahci_avn_hardreset() 770 struct ata_host *host = ap->host; ahci_avn_hardreset() 791 ata_msleep(ap, 1000); ahci_avn_hardreset() 797 hpriv->start_engine(ap); ahci_avn_hardreset() 800 *class = ahci_dev_classify(ap); ahci_avn_hardreset() 955 struct ata_port *ap = host->ports[1]; ahci_p5wdh_workaround() local 960 ap->ops = &ahci_p5wdh_ops; ahci_p5wdh_workaround() 961 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; ahci_p5wdh_workaround() 1324 struct ata_port *ap = host->ports[i]; ahci_gtf_filter_workaround() local 1328 ata_for_each_link(link, ap, EDGE) ahci_gtf_filter_workaround() 1551 struct ata_port *ap = host->ports[i]; ahci_init_one() local 1553 ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar"); ahci_init_one() 1554 ata_port_pbar_desc(ap, ahci_pci_bar, ahci_init_one() 1555 0x100 + ap->port_no * 0x80, "port"); ahci_init_one() 1558 if (ap->flags & ATA_FLAG_EM) ahci_init_one() 1559 ap->em_message_type = hpriv->em_msg_type; ahci_init_one() 1564 ap->ops = &ata_dummy_port_ops; ahci_init_one()
|
/linux-4.1.27/drivers/net/wireless/hostap/ |
H A D | hostap_ap.c | 55 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); 71 struct ap_data *ap = m->private; ap_debug_proc_show() local 73 seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast); ap_debug_proc_show() 74 seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast); ap_debug_proc_show() 75 seq_printf(m, "max_inactivity=%u\n", ap->max_inactivity / HZ); ap_debug_proc_show() 76 seq_printf(m, "bridge_packets=%u\n", ap->bridge_packets); ap_debug_proc_show() 77 seq_printf(m, "nullfunc_ack=%u\n", ap->nullfunc_ack); ap_debug_proc_show() 78 seq_printf(m, "autom_ap_wds=%u\n", ap->autom_ap_wds); ap_debug_proc_show() 79 seq_printf(m, "auth_algs=%u\n", ap->local->auth_algs); ap_debug_proc_show() 80 seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc); ap_debug_proc_show() 98 static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta) ap_sta_hash_add() argument 100 sta->hnext = ap->sta_hash[STA_HASH(sta->addr)]; ap_sta_hash_add() 101 ap->sta_hash[STA_HASH(sta->addr)] = sta; ap_sta_hash_add() 104 static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) ap_sta_hash_del() argument 108 s = ap->sta_hash[STA_HASH(sta->addr)]; ap_sta_hash_del() 111 ap->sta_hash[STA_HASH(sta->addr)] = s->hnext; ap_sta_hash_del() 124 static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) ap_free_sta() argument 126 if (sta->ap && sta->local) ap_free_sta() 129 if (ap->proc != NULL) { ap_free_sta() 132 remove_proc_entry(name, ap->proc); ap_free_sta() 143 ap->num_sta--; ap_free_sta() 146 ap->sta_aid[sta->aid - 1] = NULL; ap_free_sta() 148 if (!sta->ap) ap_free_sta() 191 struct ap_data *ap; ap_handle_timer() local 195 if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) { ap_handle_timer() 201 ap = local->ap; ap_handle_timer() 207 next_time = jiffies + ap->max_inactivity; ap_handle_timer() 209 if (time_before(jiffies, sta->last_rx + ap->max_inactivity)) { ap_handle_timer() 212 next_time = sta->last_rx + ap->max_inactivity; ap_handle_timer() 217 next_time = jiffies + ap->max_inactivity; ap_handle_timer() 226 if (sta->ap) ap_handle_timer() 230 spin_lock(&ap->sta_table_lock); ap_handle_timer() 231 ap_sta_hash_del(ap, sta); ap_handle_timer() 233 spin_unlock(&ap->sta_table_lock); ap_handle_timer() 238 if (was_assoc && !(sta->flags & WLAN_STA_ASSOC) && !sta->ap) ap_handle_timer() 247 if (sta->ap) { ap_handle_timer() 248 if (ap->autom_ap_wds) { ap_handle_timer() 263 sta->addr, ap->tx_callback_poll); ap_handle_timer() 288 ap_free_sta(ap, sta); ap_handle_timer() 304 void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap, hostap_deauth_all_stas() argument 325 if (!resend || ap->num_sta <= 0) hostap_deauth_all_stas() 335 struct ap_data *ap = m->private; ap_control_proc_show() local 340 switch (ap->mac_restrictions.policy) { ap_control_proc_show() 355 seq_printf(m, "MAC entries: %u\n", ap->mac_restrictions.entries); ap_control_proc_show() 367 struct ap_data *ap = m->private; ap_control_proc_start() local 368 spin_lock_bh(&ap->mac_restrictions.lock); ap_control_proc_start() 369 return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos); ap_control_proc_start() 374 struct ap_data *ap = m->private; ap_control_proc_next() local 375 return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos); ap_control_proc_next() 380 struct ap_data *ap = m->private; ap_control_proc_stop() local 381 spin_unlock_bh(&ap->mac_restrictions.lock); ap_control_proc_stop() 497 int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac) ap_control_kick_mac() argument 502 spin_lock_bh(&ap->sta_table_lock); ap_control_kick_mac() 503 sta = ap_get_sta(ap, mac); ap_control_kick_mac() 505 ap_sta_hash_del(ap, sta); ap_control_kick_mac() 508 spin_unlock_bh(&ap->sta_table_lock); ap_control_kick_mac() 517 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap) ap_control_kick_mac() 520 ap_free_sta(ap, sta); ap_control_kick_mac() 528 void ap_control_kickall(struct ap_data *ap) ap_control_kickall() argument 533 spin_lock_bh(&ap->sta_table_lock); ap_control_kickall() 534 for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list; ap_control_kickall() 537 ap_sta_hash_del(ap, sta); ap_control_kickall() 539 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) ap_control_kickall() 541 ap_free_sta(ap, sta); ap_control_kickall() 543 spin_unlock_bh(&ap->sta_table_lock); ap_control_kickall() 559 if (!sta->ap) prism2_ap_proc_show() 564 sta->u.ap.channel, sta->last_rx_signal, prism2_ap_proc_show() 567 for (i = 0; i < sta->u.ap.ssid_len; i++) { prism2_ap_proc_show() 568 if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127) prism2_ap_proc_show() 569 seq_putc(m, sta->u.ap.ssid[i]); prism2_ap_proc_show() 571 seq_printf(m, "<%02x>", sta->u.ap.ssid[i]); prism2_ap_proc_show() 587 struct ap_data *ap = m->private; prism2_ap_proc_start() local 588 spin_lock_bh(&ap->sta_table_lock); prism2_ap_proc_start() 589 return seq_list_start_head(&ap->sta_list, *_pos); prism2_ap_proc_start() 594 struct ap_data *ap = m->private; prism2_ap_proc_next() local 595 return seq_list_next(v, &ap->sta_list, _pos); prism2_ap_proc_next() 600 struct ap_data *ap = m->private; prism2_ap_proc_stop() local 601 spin_unlock_bh(&ap->sta_table_lock); prism2_ap_proc_stop() 630 void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver) hostap_check_sta_fw_version() argument 632 if (!ap) hostap_check_sta_fw_version() 638 ap->nullfunc_ack = 1; hostap_check_sta_fw_version() 640 ap->nullfunc_ack = 0; hostap_check_sta_fw_version() 645 ap->local->dev->name); hostap_check_sta_fw_version() 653 struct ap_data *ap = data; hostap_ap_tx_cb() local 656 if (!ap->local->hostapd || !ap->local->apdev) { hostap_ap_tx_cb() 668 skb->dev = ap->local->apdev; hostap_ap_tx_cb() 681 struct ap_data *ap = data; hostap_ap_tx_cb_auth() local 682 struct net_device *dev = ap->local->dev; hostap_ap_tx_cb_auth() 689 if (ap->local->hostapd) { hostap_ap_tx_cb_auth() 713 spin_lock(&ap->sta_table_lock); hostap_ap_tx_cb_auth() 714 sta = ap_get_sta(ap, hdr->addr1); hostap_ap_tx_cb_auth() 717 spin_unlock(&ap->sta_table_lock); hostap_ap_tx_cb_auth() 749 struct ap_data *ap = data; hostap_ap_tx_cb_assoc() local 750 struct net_device *dev = ap->local->dev; hostap_ap_tx_cb_assoc() 757 if (ap->local->hostapd) { hostap_ap_tx_cb_assoc() 777 spin_lock(&ap->sta_table_lock); hostap_ap_tx_cb_assoc() 778 sta = ap_get_sta(ap, hdr->addr1); hostap_ap_tx_cb_assoc() 781 spin_unlock(&ap->sta_table_lock); hostap_ap_tx_cb_assoc() 814 struct ap_data *ap = data; hostap_ap_tx_cb_poll() local 822 spin_lock(&ap->sta_table_lock); hostap_ap_tx_cb_poll() 823 sta = ap_get_sta(ap, hdr->addr1); hostap_ap_tx_cb_poll() 826 spin_unlock(&ap->sta_table_lock); hostap_ap_tx_cb_poll() 830 ap->local->dev->name, hdr->addr1); hostap_ap_tx_cb_poll() 841 struct ap_data *ap = local->ap; hostap_init_data() local 843 if (ap == NULL) { hostap_init_data() 844 printk(KERN_WARNING "hostap_init_data: ap == NULL\n"); hostap_init_data() 847 memset(ap, 0, sizeof(struct ap_data)); hostap_init_data() 848 ap->local = local; hostap_init_data() 850 ap->ap_policy = GET_INT_PARM(other_ap_policy, local->card_idx); hostap_init_data() 851 ap->bridge_packets = GET_INT_PARM(ap_bridge_packets, local->card_idx); hostap_init_data() 852 ap->max_inactivity = hostap_init_data() 854 ap->autom_ap_wds = GET_INT_PARM(autom_ap_wds, local->card_idx); hostap_init_data() 856 spin_lock_init(&ap->sta_table_lock); hostap_init_data() 857 INIT_LIST_HEAD(&ap->sta_list); hostap_init_data() 860 INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue); hostap_init_data() 862 ap->tx_callback_idx = hostap_init_data() 863 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); hostap_init_data() 864 if (ap->tx_callback_idx == 0) hostap_init_data() 868 INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue); hostap_init_data() 870 ap->tx_callback_auth = hostap_init_data() 871 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); hostap_init_data() 872 ap->tx_callback_assoc = hostap_init_data() 873 hostap_tx_callback_register(local, hostap_ap_tx_cb_assoc, ap); hostap_init_data() 874 ap->tx_callback_poll = hostap_init_data() 875 hostap_tx_callback_register(local, hostap_ap_tx_cb_poll, ap); hostap_init_data() 876 if (ap->tx_callback_auth == 0 || ap->tx_callback_assoc == 0 || hostap_init_data() 877 ap->tx_callback_poll == 0) hostap_init_data() 881 spin_lock_init(&ap->mac_restrictions.lock); hostap_init_data() 882 INIT_LIST_HEAD(&ap->mac_restrictions.mac_list); hostap_init_data() 885 ap->initialized = 1; hostap_init_data() 891 struct ap_data *ap = local->ap; hostap_init_ap_proc() local 893 ap->proc = local->proc; hostap_init_ap_proc() 894 if (ap->proc == NULL) hostap_init_ap_proc() 898 proc_create_data("ap_debug", 0, ap->proc, &ap_debug_proc_fops, ap); hostap_init_ap_proc() 902 proc_create_data("ap_control", 0, ap->proc, &ap_control_proc_fops, ap); hostap_init_ap_proc() 903 proc_create_data("ap", 0, ap->proc, &prism2_ap_proc_fops, ap); hostap_init_ap_proc() 909 void hostap_free_data(struct ap_data *ap) hostap_free_data() argument 913 if (ap == NULL || !ap->initialized) { hostap_free_data() 914 printk(KERN_DEBUG "hostap_free_data: ap has not yet been " hostap_free_data() 919 flush_work(&ap->add_sta_proc_queue); hostap_free_data() 922 flush_work(&ap->wds_oper_queue); hostap_free_data() 923 if (ap->crypt) hostap_free_data() 924 ap->crypt->deinit(ap->crypt_priv); hostap_free_data() 925 ap->crypt = ap->crypt_priv = NULL; hostap_free_data() 928 list_for_each_entry_safe(sta, n, &ap->sta_list, list) { hostap_free_data() 929 ap_sta_hash_del(ap, sta); hostap_free_data() 931 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) hostap_free_data() 933 ap_free_sta(ap, sta); hostap_free_data() 937 if (ap->proc != NULL) { hostap_free_data() 938 remove_proc_entry("ap_debug", ap->proc); hostap_free_data() 943 if (ap->proc != NULL) { hostap_free_data() 944 remove_proc_entry("ap", ap->proc); hostap_free_data() 945 remove_proc_entry("ap_control", ap->proc); hostap_free_data() 947 ap_control_flush_macs(&ap->mac_restrictions); hostap_free_data() 950 ap->initialized = 0; hostap_free_data() 955 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta) ap_get_sta() argument 959 s = ap->sta_hash[STA_HASH(sta)]; ap_get_sta() 1054 sta->ap ? "AP" : "STA", prism2_sta_proc_show() 1093 if (sta->ap) { prism2_sta_proc_show() 1094 if (sta->u.ap.channel >= 0) prism2_sta_proc_show() 1095 seq_printf(m, "channel=%d\n", sta->u.ap.channel); prism2_sta_proc_show() 1097 for (i = 0; i < sta->u.ap.ssid_len; i++) { prism2_sta_proc_show() 1098 if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127) prism2_sta_proc_show() 1099 seq_putc(m, sta->u.ap.ssid[i]); prism2_sta_proc_show() 1101 seq_printf(m, "<%02x>", sta->u.ap.ssid[i]); prism2_sta_proc_show() 1124 struct ap_data *ap = container_of(work, struct ap_data, handle_add_proc_queue() local 1130 entry = ap->add_sta_proc_entries; handle_add_proc_queue() 1131 ap->add_sta_proc_entries = NULL; handle_add_proc_queue() 1134 spin_lock_bh(&ap->sta_table_lock); handle_add_proc_queue() 1135 sta = ap_get_sta(ap, entry->addr); handle_add_proc_queue() 1138 spin_unlock_bh(&ap->sta_table_lock); handle_add_proc_queue() 1143 name, 0, ap->proc, handle_add_proc_queue() 1156 static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr) ap_add_sta() argument 1167 sta->local = ap->local; ap_add_sta() 1172 spin_lock_bh(&ap->sta_table_lock); ap_add_sta() 1173 list_add(&sta->list, &ap->sta_list); ap_add_sta() 1174 ap->num_sta++; ap_add_sta() 1175 ap_sta_hash_add(ap, sta); ap_add_sta() 1176 spin_unlock_bh(&ap->sta_table_lock); ap_add_sta() 1178 if (ap->proc) { ap_add_sta() 1185 entry->next = ap->add_sta_proc_entries; ap_add_sta() 1186 ap->add_sta_proc_entries = entry; ap_add_sta() 1187 schedule_work(&ap->add_sta_proc_queue); ap_add_sta() 1194 sta->timer.expires = jiffies + ap->max_inactivity; ap_add_sta() 1197 if (!ap->local->hostapd) ap_add_sta() 1269 static void ap_crypt_init(struct ap_data *ap) ap_crypt_init() argument 1271 ap->crypt = lib80211_get_crypto_ops("WEP"); ap_crypt_init() 1273 if (ap->crypt) { ap_crypt_init() 1274 if (ap->crypt->init) { ap_crypt_init() 1275 ap->crypt_priv = ap->crypt->init(0); ap_crypt_init() 1276 if (ap->crypt_priv == NULL) ap_crypt_init() 1277 ap->crypt = NULL; ap_crypt_init() 1281 ap->crypt->set_key(key, WEP_KEY_LEN, NULL, ap_crypt_init() 1282 ap->crypt_priv); ap_crypt_init() 1287 if (ap->crypt == NULL) { ap_crypt_init() 1302 static char * ap_auth_make_challenge(struct ap_data *ap) ap_auth_make_challenge() argument 1307 if (ap->crypt == NULL) { ap_auth_make_challenge() 1308 ap_crypt_init(ap); ap_auth_make_challenge() 1309 if (ap->crypt == NULL) ap_auth_make_challenge() 1320 ap->crypt->extra_mpdu_prefix_len + ap_auth_make_challenge() 1321 ap->crypt->extra_mpdu_postfix_len); ap_auth_make_challenge() 1327 skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len); ap_auth_make_challenge() 1330 if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) { ap_auth_make_challenge() 1336 skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len, ap_auth_make_challenge() 1351 struct ap_data *ap = local->ap; handle_authen() local 1371 spin_lock_bh(&local->ap->sta_table_lock); handle_authen() 1372 sta = ap_get_sta(local->ap, hdr->addr2); handle_authen() 1375 spin_unlock_bh(&local->ap->sta_table_lock); handle_authen() 1395 ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) { handle_authen() 1428 if (sta && sta->ap) { handle_authen() 1429 if (time_after(jiffies, sta->u.ap.last_beacon + handle_authen() 1434 sta->ap = 0; handle_authen() 1458 if (local->ap->num_sta >= MAX_STA_COUNT) { handle_authen() 1465 sta = ap_add_sta(local->ap, hdr->addr2); handle_authen() 1488 ap_auth_make_challenge(local->ap); handle_authen() 1540 body, olen, hdr->addr2, ap->tx_callback_auth); handle_authen() 1581 spin_lock_bh(&local->ap->sta_table_lock); handle_assoc() 1582 sta = ap_get_sta(local->ap, hdr->addr2); handle_assoc() 1584 spin_unlock_bh(&local->ap->sta_table_lock); handle_assoc() 1592 spin_unlock_bh(&local->ap->sta_table_lock); handle_assoc() 1673 spin_lock_bh(&local->ap->sta_table_lock); handle_assoc() 1675 if (local->ap->sta_aid[sta->aid - 1] == NULL) handle_assoc() 1679 spin_unlock_bh(&local->ap->sta_table_lock); handle_assoc() 1683 local->ap->sta_aid[sta->aid - 1] = sta; handle_assoc() 1684 spin_unlock_bh(&local->ap->sta_table_lock); handle_assoc() 1743 send_deauth ? 0 : local->ap->tx_callback_assoc); handle_assoc() 1792 spin_lock_bh(&local->ap->sta_table_lock); handle_deauth() 1793 sta = ap_get_sta(local->ap, hdr->addr2); handle_deauth() 1795 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap) handle_deauth() 1799 spin_unlock_bh(&local->ap->sta_table_lock); handle_deauth() 1834 spin_lock_bh(&local->ap->sta_table_lock); handle_disassoc() 1835 sta = ap_get_sta(local->ap, hdr->addr2); handle_disassoc() 1837 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap) handle_disassoc() 1841 spin_unlock_bh(&local->ap->sta_table_lock); handle_disassoc() 1875 spin_lock_bh(&local->ap->sta_table_lock); ap_handle_dropped_data() 1876 sta = ap_get_sta(local->ap, hdr->addr2); ap_handle_dropped_data() 1879 spin_unlock_bh(&local->ap->sta_table_lock); ap_handle_dropped_data() 1957 spin_lock_bh(&local->ap->sta_table_lock); handle_pspoll() 1958 sta = ap_get_sta(local->ap, hdr->addr2); handle_pspoll() 1961 spin_unlock_bh(&local->ap->sta_table_lock); handle_pspoll() 2015 struct ap_data *ap = container_of(work, struct ap_data, handle_wds_oper_queue() local 2017 local_info_t *local = ap->local; handle_wds_oper_queue() 2021 entry = local->ap->wds_oper_entries; handle_wds_oper_queue() 2022 local->ap->wds_oper_entries = NULL; handle_wds_oper_queue() 2078 if (local->ap->ap_policy != AP_OTHER_AP_EVEN_IBSS && handle_beacon() 2096 if (local->ap->ap_policy == AP_OTHER_AP_SAME_SSID && handle_beacon() 2144 spin_lock_bh(&local->ap->sta_table_lock); handle_beacon() 2145 sta = ap_get_sta(local->ap, hdr->addr2); handle_beacon() 2148 spin_unlock_bh(&local->ap->sta_table_lock); handle_beacon() 2153 sta = ap_add_sta(local->ap, hdr->addr2); handle_beacon() 2165 if (local->ap->autom_ap_wds) { handle_beacon() 2170 sta->ap = 1; handle_beacon() 2172 sta->u.ap.ssid_len = ssid_len; handle_beacon() 2173 memcpy(sta->u.ap.ssid, ssid, ssid_len); handle_beacon() 2174 sta->u.ap.ssid[ssid_len] = '\0'; handle_beacon() 2176 sta->u.ap.ssid_len = 0; handle_beacon() 2177 sta->u.ap.ssid[0] = '\0'; handle_beacon() 2179 sta->u.ap.channel = channel; handle_beacon() 2182 sta->u.ap.last_beacon = sta->last_rx = jiffies; handle_beacon() 2239 if (local->ap->nullfunc_ack && handle_ap_item() 2337 if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL && hostap_rx() 2391 struct ap_data *ap = local->ap; prism2_ap_get_sta_qual() local 2395 spin_lock_bh(&ap->sta_table_lock); prism2_ap_get_sta_qual() 2397 for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list; prism2_ap_get_sta_qual() 2401 if (aplist && !sta->ap) prism2_ap_get_sta_qual() 2421 spin_unlock_bh(&ap->sta_table_lock); prism2_ap_get_sta_qual() 2434 struct ap_data *ap; prism2_ap_translate_scan() local 2445 ap = local->ap; prism2_ap_translate_scan() 2447 spin_lock_bh(&ap->sta_table_lock); prism2_ap_translate_scan() 2449 for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list; prism2_ap_translate_scan() 2466 if (sta->ap) prism2_ap_translate_scan() 2491 if (sta->ap) { prism2_ap_translate_scan() 2494 iwe.u.data.length = sta->u.ap.ssid_len; prism2_ap_translate_scan() 2498 sta->u.ap.ssid); prism2_ap_translate_scan() 2509 sta->u.ap.ssid); prism2_ap_translate_scan() 2511 if (sta->u.ap.channel > 0 && prism2_ap_translate_scan() 2512 sta->u.ap.channel <= FREQ_COUNT) { prism2_ap_translate_scan() 2515 iwe.u.freq.m = freq_list[sta->u.ap.channel - 1] prism2_ap_translate_scan() 2538 spin_unlock_bh(&ap->sta_table_lock); prism2_ap_translate_scan() 2544 static int prism2_hostapd_add_sta(struct ap_data *ap, prism2_hostapd_add_sta() argument 2549 spin_lock_bh(&ap->sta_table_lock); prism2_hostapd_add_sta() 2550 sta = ap_get_sta(ap, param->sta_addr); prism2_hostapd_add_sta() 2553 spin_unlock_bh(&ap->sta_table_lock); prism2_hostapd_add_sta() 2556 sta = ap_add_sta(ap, param->sta_addr); prism2_hostapd_add_sta() 2561 if (!(sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) prism2_hostapd_add_sta() 2583 static int prism2_hostapd_remove_sta(struct ap_data *ap, prism2_hostapd_remove_sta() argument 2588 spin_lock_bh(&ap->sta_table_lock); prism2_hostapd_remove_sta() 2589 sta = ap_get_sta(ap, param->sta_addr); prism2_hostapd_remove_sta() 2591 ap_sta_hash_del(ap, sta); prism2_hostapd_remove_sta() 2594 spin_unlock_bh(&ap->sta_table_lock); prism2_hostapd_remove_sta() 2599 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) prism2_hostapd_remove_sta() 2601 ap_free_sta(ap, sta); prism2_hostapd_remove_sta() 2607 static int prism2_hostapd_get_info_sta(struct ap_data *ap, prism2_hostapd_get_info_sta() argument 2612 spin_lock_bh(&ap->sta_table_lock); prism2_hostapd_get_info_sta() 2613 sta = ap_get_sta(ap, param->sta_addr); prism2_hostapd_get_info_sta() 2616 spin_unlock_bh(&ap->sta_table_lock); prism2_hostapd_get_info_sta() 2629 static int prism2_hostapd_set_flags_sta(struct ap_data *ap, prism2_hostapd_set_flags_sta() argument 2634 spin_lock_bh(&ap->sta_table_lock); prism2_hostapd_set_flags_sta() 2635 sta = ap_get_sta(ap, param->sta_addr); prism2_hostapd_set_flags_sta() 2640 spin_unlock_bh(&ap->sta_table_lock); prism2_hostapd_set_flags_sta() 2649 static int prism2_hostapd_sta_clear_stats(struct ap_data *ap, prism2_hostapd_sta_clear_stats() argument 2655 spin_lock_bh(&ap->sta_table_lock); prism2_hostapd_sta_clear_stats() 2656 sta = ap_get_sta(ap, param->sta_addr); prism2_hostapd_sta_clear_stats() 2665 spin_unlock_bh(&ap->sta_table_lock); prism2_hostapd_sta_clear_stats() 2674 int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param) prism2_hostapd() argument 2678 ap_control_kickall(ap); prism2_hostapd() 2681 return prism2_hostapd_add_sta(ap, param); prism2_hostapd() 2683 return prism2_hostapd_remove_sta(ap, param); prism2_hostapd() 2685 return prism2_hostapd_get_info_sta(ap, param); prism2_hostapd() 2687 return prism2_hostapd_set_flags_sta(ap, param); prism2_hostapd() 2689 return prism2_hostapd_sta_clear_stats(ap, param); prism2_hostapd() 2754 if (local->ap == NULL || skb->len < 10 || hostap_handle_sta_tx() 2762 if (local->ap->num_sta <= 0) hostap_handle_sta_tx() 2768 spin_lock(&local->ap->sta_table_lock); hostap_handle_sta_tx() 2769 sta = ap_get_sta(local->ap, hdr->addr1); hostap_handle_sta_tx() 2772 spin_unlock(&local->ap->sta_table_lock); hostap_handle_sta_tx() 2789 local->ap->tx_drop_nonassoc++; hostap_handle_sta_tx() 2802 local->ap->last_tx_rate = meta->rate = hostap_handle_sta_tx() 2894 spin_lock(&local->ap->sta_table_lock); hostap_handle_sta_tx_exc() 2895 sta = ap_get_sta(local->ap, hdr->addr1); hostap_handle_sta_tx_exc() 2897 spin_unlock(&local->ap->sta_table_lock); hostap_handle_sta_tx_exc() 2933 spin_unlock(&local->ap->sta_table_lock); hostap_handle_sta_tx_exc() 2964 spin_lock(&local->ap->sta_table_lock); hostap_update_sta_ps() 2965 sta = ap_get_sta(local->ap, hdr->addr2); hostap_update_sta_ps() 2968 spin_unlock(&local->ap->sta_table_lock); hostap_update_sta_ps() 2995 if (local->ap == NULL) hostap_handle_sta_rx() 3004 spin_lock(&local->ap->sta_table_lock); hostap_handle_sta_rx() 3005 sta = ap_get_sta(local->ap, hdr->addr2); hostap_handle_sta_rx() 3008 spin_unlock(&local->ap->sta_table_lock); hostap_handle_sta_rx() 3096 if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC && hostap_handle_sta_rx() 3131 spin_lock(&local->ap->sta_table_lock); hostap_handle_sta_crypto() 3132 sta = ap_get_sta(local->ap, hdr->addr2); hostap_handle_sta_crypto() 3135 spin_unlock(&local->ap->sta_table_lock); hostap_handle_sta_crypto() 3153 int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr) hostap_is_sta_assoc() argument 3158 spin_lock(&ap->sta_table_lock); hostap_is_sta_assoc() 3159 sta = ap_get_sta(ap, sta_addr); hostap_is_sta_assoc() 3160 if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap) hostap_is_sta_assoc() 3162 spin_unlock(&ap->sta_table_lock); hostap_is_sta_assoc() 3169 int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr) hostap_is_sta_authorized() argument 3174 spin_lock(&ap->sta_table_lock); hostap_is_sta_authorized() 3175 sta = ap_get_sta(ap, sta_addr); hostap_is_sta_authorized() 3176 if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap && hostap_is_sta_authorized() 3178 ap->local->ieee_802_1x == 0)) hostap_is_sta_authorized() 3180 spin_unlock(&ap->sta_table_lock); hostap_is_sta_authorized() 3187 int hostap_add_sta(struct ap_data *ap, u8 *sta_addr) hostap_add_sta() argument 3192 if (!ap) hostap_add_sta() 3195 spin_lock(&ap->sta_table_lock); hostap_add_sta() 3196 sta = ap_get_sta(ap, sta_addr); hostap_add_sta() 3199 spin_unlock(&ap->sta_table_lock); hostap_add_sta() 3202 sta = ap_add_sta(ap, sta_addr); hostap_add_sta() 3206 sta->ap = 1; hostap_add_sta() 3226 int hostap_update_rx_stats(struct ap_data *ap, hostap_update_rx_stats() argument 3232 if (!ap) hostap_update_rx_stats() 3235 spin_lock(&ap->sta_table_lock); hostap_update_rx_stats() 3236 sta = ap_get_sta(ap, hdr->addr2); hostap_update_rx_stats() 3251 spin_unlock(&ap->sta_table_lock); hostap_update_rx_stats() 3260 struct ap_data *ap = local->ap; hostap_update_rates() local 3262 if (!ap) hostap_update_rates() 3265 spin_lock_bh(&ap->sta_table_lock); hostap_update_rates() 3266 list_for_each_entry(sta, &ap->sta_list, list) { hostap_update_rates() 3269 spin_unlock_bh(&ap->sta_table_lock); hostap_update_rates() 3273 void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, ap_crypt_get_ptrs() argument 3278 spin_lock_bh(&ap->sta_table_lock); ap_crypt_get_ptrs() 3279 sta = ap_get_sta(ap, addr); ap_crypt_get_ptrs() 3282 spin_unlock_bh(&ap->sta_table_lock); ap_crypt_get_ptrs() 3285 sta = ap_add_sta(ap, addr); ap_crypt_get_ptrs() 3301 struct ap_data *ap = local->ap; hostap_add_wds_links() local 3304 spin_lock_bh(&ap->sta_table_lock); hostap_add_wds_links() 3305 list_for_each_entry(sta, &ap->sta_list, list) { hostap_add_wds_links() 3306 if (sta->ap) hostap_add_wds_links() 3309 spin_unlock_bh(&ap->sta_table_lock); hostap_add_wds_links() 3311 schedule_work(&local->ap->wds_oper_queue); hostap_add_wds_links() 3325 entry->next = local->ap->wds_oper_entries; hostap_wds_link_oper() 3326 local->ap->wds_oper_entries = entry; hostap_wds_link_oper() 3329 schedule_work(&local->ap->wds_oper_queue); hostap_wds_link_oper()
|
H A D | hostap.h | 63 int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac); 64 void ap_control_kickall(struct ap_data *ap); 65 void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, 72 int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
|
H A D | hostap_ap.h | 79 int ap; /* whether this station is an AP */ member in struct:sta_info 94 } ap; member in union:sta_info::__anon7840 222 void hostap_free_data(struct ap_data *ap); 223 void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver); 249 int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr); 250 int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr); 251 int hostap_add_sta(struct ap_data *ap, u8 *sta_addr); 252 int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr, 259 void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
|
H A D | hostap_ioctl.c | 413 if (local->iw_mode == IW_MODE_MASTER && local->ap != NULL && prism2_ioctl_giwrate() 418 rrq->value = local->ap->last_tx_rate > 0 ? prism2_ioctl_giwrate() 419 local->ap->last_tx_rate * 100000 : 11000000; prism2_ioctl_giwrate() 2388 if (local->ap != NULL) prism2_ioctl_priv_prism2_param() 2389 local->ap->ap_policy = value; prism2_ioctl_priv_prism2_param() 2397 if (local->ap != NULL) prism2_ioctl_priv_prism2_param() 2398 local->ap->max_inactivity = value * HZ; prism2_ioctl_priv_prism2_param() 2402 if (local->ap != NULL) prism2_ioctl_priv_prism2_param() 2403 local->ap->bridge_packets = value; prism2_ioctl_priv_prism2_param() 2419 if (local->ap != NULL) prism2_ioctl_priv_prism2_param() 2420 local->ap->nullfunc_ack = value; prism2_ioctl_priv_prism2_param() 2428 if (local->ap != NULL) { prism2_ioctl_priv_prism2_param() 2429 if (!local->ap->autom_ap_wds && value) { prism2_ioctl_priv_prism2_param() 2433 local->ap->autom_ap_wds = value; prism2_ioctl_priv_prism2_param() 2700 if (local->ap != NULL) prism2_ioctl_priv_get_prism2_param() 2701 *param = local->ap->ap_policy; prism2_ioctl_priv_get_prism2_param() 2707 if (local->ap != NULL) prism2_ioctl_priv_get_prism2_param() 2708 *param = local->ap->max_inactivity / HZ; prism2_ioctl_priv_get_prism2_param() 2714 if (local->ap != NULL) prism2_ioctl_priv_get_prism2_param() 2715 *param = local->ap->bridge_packets; prism2_ioctl_priv_get_prism2_param() 2725 if (local->ap != NULL) prism2_ioctl_priv_get_prism2_param() 2726 *param = local->ap->nullfunc_ack; prism2_ioctl_priv_get_prism2_param() 2736 if (local->ap != NULL) prism2_ioctl_priv_get_prism2_param() 2737 *param = local->ap->autom_ap_wds; prism2_ioctl_priv_get_prism2_param() 3009 local->ap->mac_restrictions.policy = MAC_POLICY_OPEN; ap_mac_cmd_ioctl() 3012 local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW; ap_mac_cmd_ioctl() 3015 local->ap->mac_restrictions.policy = MAC_POLICY_DENY; ap_mac_cmd_ioctl() 3018 ap_control_flush_macs(&local->ap->mac_restrictions); ap_mac_cmd_ioctl() 3021 ap_control_kickall(local->ap); ap_mac_cmd_ioctl() 3022 hostap_deauth_all_stas(local->dev, local->ap, 0); ap_mac_cmd_ioctl() 3231 sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt); prism2_ioctl_siwencodeext() 3402 sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt); prism2_ioctl_giwencodeext() 3469 local->ap, param->sta_addr, prism2_ioctl_set_encryption() 3600 sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0, prism2_ioctl_get_encryption() 3842 ret = prism2_hostapd(local->ap, param); prism2_ioctl_priv_hostapd() 4017 else ret = ap_control_add_mac(&local->ap->mac_restrictions, hostap_ioctl() 4022 else ret = ap_control_del_mac(&local->ap->mac_restrictions, hostap_ioctl() 4027 else ret = ap_control_kick_mac(local->ap, local->dev, hostap_ioctl()
|
/linux-4.1.27/drivers/net/wireless/orinoco/ |
H A D | fw.h | 14 void orinoco_cache_fw(struct orinoco_private *priv, int ap); 17 #define orinoco_cache_fw(priv, ap) do { } while (0)
|
/linux-4.1.27/drivers/isdn/hardware/eicon/ |
H A D | debuglib.c | 39 { va_list ap; \ 41 { va_start(ap, format); \ 43 (myDriverDebugHandle.id, DLI_##name, format, ap); \ 44 va_end(ap); \ 54 { va_list ap; DBG_FUNC() local 56 { va_start(ap, msgID); DBG_FUNC() 58 (myDriverDebugHandle.id, (unsigned long)msgID, ap); DBG_FUNC() 59 va_end(ap); DBG_FUNC() 141 va_list ap; xdi_dbg_xlog() local 142 va_start(ap, x); xdi_dbg_xlog() 148 (x[0] != 0) ? DLI_TRC : DLI_XLOG, x, ap); xdi_dbg_xlog() 150 (*(myDriverDebugHandle.dbg_old))(myDriverDebugHandle.id, x, ap); xdi_dbg_xlog() 153 va_end(ap); xdi_dbg_xlog()
|
/linux-4.1.27/net/bridge/netfilter/ |
H A D | ebt_arpreply.c | 24 const struct arphdr *ap; ebt_arpreply_tg() local 29 ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah); ebt_arpreply_tg() 30 if (ap == NULL) ebt_arpreply_tg() 33 if (ap->ar_op != htons(ARPOP_REQUEST) || ebt_arpreply_tg() 34 ap->ar_hln != ETH_ALEN || ebt_arpreply_tg() 35 ap->ar_pro != htons(ETH_P_IP) || ebt_arpreply_tg() 36 ap->ar_pln != 4) ebt_arpreply_tg()
|
H A D | ebt_snat.c | 30 const struct arphdr *ap; ebt_snat_tg() local 33 ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah); ebt_snat_tg() 34 if (ap == NULL) ebt_snat_tg() 36 if (ap->ar_hln != ETH_ALEN) ebt_snat_tg()
|
H A D | ebt_log.c | 159 const struct arppayload *ap; ebt_log_packet() local 162 ap = skb_header_pointer(skb, sizeof(_arph), ebt_log_packet() 164 if (ap == NULL) { ebt_log_packet() 169 ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); ebt_log_packet()
|
/linux-4.1.27/drivers/input/misc/ |
H A D | apanel.c | 91 struct apanel *ap = ipdev->private; apanel_poll() local 97 data = i2c_smbus_read_word_data(ap->client, cmd); apanel_poll() 102 i2c_smbus_write_word_data(ap->client, cmd, 0); apanel_poll() 110 report_key(idev, ap->keymap[i]); apanel_poll() 116 struct apanel *ap = container_of(work, struct apanel, led_work); led_update() local 118 i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits); led_update() 124 struct apanel *ap = container_of(led, struct apanel, mail_led); mail_led_set() local 127 ap->led_bits |= 0x8000; mail_led_set() 129 ap->led_bits &= ~0x8000; mail_led_set() 131 schedule_work(&ap->led_work); mail_led_set() 136 struct apanel *ap = i2c_get_clientdata(client); apanel_remove() local 139 led_classdev_unregister(&ap->mail_led); apanel_remove() 141 input_unregister_polled_device(ap->ipdev); apanel_remove() 142 input_free_polled_device(ap->ipdev); apanel_remove() 191 struct apanel *ap; apanel_probe() local 197 ap = &apanel; apanel_probe() 203 ap->ipdev = ipdev; apanel_probe() 204 ap->client = client; apanel_probe() 206 i2c_set_clientdata(client, ap); apanel_probe() 217 ipdev->private = ap; apanel_probe() 227 idev->keycode = ap->keymap; apanel_probe() 228 idev->keycodesize = sizeof(ap->keymap[0]); apanel_probe() 232 if (ap->keymap[i]) apanel_probe() 233 set_bit(ap->keymap[i], idev->keybit); apanel_probe() 239 INIT_WORK(&ap->led_work, led_update); apanel_probe() 241 err = led_classdev_register(&client->dev, &ap->mail_led); apanel_probe()
|
/linux-4.1.27/drivers/isdn/gigaset/ |
H A D | capi.c | 197 struct gigaset_capi_appl *ap; get_appl() local 199 list_for_each_entry(ap, &iif->appls, ctrlist) get_appl() 200 if (ap->id == appl) get_appl() 201 return ap; get_appl() 374 struct gigaset_capi_appl *ap = bcs->ap; gigaset_skb_sent() local 381 if (!ap) { gigaset_skb_sent() 398 send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req), gigaset_skb_sent() 419 struct gigaset_capi_appl *ap = bcs->ap; gigaset_skb_rcvd() local 425 if (!ap) { gigaset_skb_rcvd() 445 CAPIMSG_SETAPPID(skb->data, ap->id); gigaset_skb_rcvd() 448 CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++); gigaset_skb_rcvd() 460 capi_ctr_handle_message(&iif->ctr, ap->id, skb); gigaset_skb_rcvd() 500 struct gigaset_capi_appl *ap; gigaset_isdn_icall() local 630 if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) { gigaset_isdn_icall() 632 __func__, bcs->ap, bcs->apconnstate); gigaset_isdn_icall() 633 bcs->ap = NULL; gigaset_isdn_icall() 638 list_for_each_entry(ap, &iif->appls, ctrlist) gigaset_isdn_icall() 639 if (actCIPmask & ap->listenCIPmask) { gigaset_isdn_icall() 641 iif->hcmsg.ApplId = ap->id; gigaset_isdn_icall() 642 iif->hcmsg.Messagenumber = ap->nextMessageNumber++; gigaset_isdn_icall() 661 ap->bcnext = bcs->ap; gigaset_isdn_icall() 662 bcs->ap = ap; gigaset_isdn_icall() 668 capi_ctr_handle_message(&iif->ctr, ap->id, skb); gigaset_isdn_icall() 676 return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE; gigaset_isdn_icall() 684 struct gigaset_capi_appl *ap, u16 reason) send_disconnect_ind() 693 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND, send_disconnect_ind() 694 ap->nextMessageNumber++, send_disconnect_ind() 709 capi_ctr_handle_message(&iif->ctr, ap->id, skb); send_disconnect_ind() 718 struct gigaset_capi_appl *ap) send_disconnect_b3_ind() 729 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, send_disconnect_b3_ind() 730 ap->nextMessageNumber++, send_disconnect_b3_ind() 744 capi_ctr_handle_message(&iif->ctr, ap->id, skb); send_disconnect_b3_ind() 758 struct gigaset_capi_appl *ap; gigaset_isdn_connD() local 764 ap = bcs->ap; gigaset_isdn_connD() 765 if (!ap) { gigaset_isdn_connD() 773 __func__, ap->id); gigaset_isdn_connD() 777 while (ap->bcnext) { gigaset_isdn_connD() 780 __func__, ap->bcnext->id); gigaset_isdn_connD() 781 send_disconnect_ind(bcs, ap->bcnext, gigaset_isdn_connD() 783 ap->bcnext = ap->bcnext->bcnext; gigaset_isdn_connD() 789 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND, gigaset_isdn_connD() 790 ap->nextMessageNumber++, gigaset_isdn_connD() 813 capi_ctr_handle_message(&iif->ctr, ap->id, skb); gigaset_isdn_connD() 825 struct gigaset_capi_appl *ap; gigaset_isdn_hupD() local 834 while (bcs->ap != NULL) { gigaset_isdn_hupD() 835 ap = bcs->ap; gigaset_isdn_hupD() 836 bcs->ap = ap->bcnext; gigaset_isdn_hupD() 838 send_disconnect_b3_ind(bcs, ap); gigaset_isdn_hupD() 839 send_disconnect_ind(bcs, ap, 0); gigaset_isdn_hupD() 857 struct gigaset_capi_appl *ap; gigaset_isdn_connB() local 864 ap = bcs->ap; gigaset_isdn_connB() 865 if (!ap) { gigaset_isdn_connB() 873 __func__, ap->id); gigaset_isdn_connB() 894 while (ap->bcnext) { gigaset_isdn_connB() 897 __func__, ap->bcnext->id); gigaset_isdn_connB() 898 send_disconnect_ind(bcs, ap->bcnext, gigaset_isdn_connB() 900 ap->bcnext = ap->bcnext->bcnext; gigaset_isdn_connB() 903 capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND, gigaset_isdn_connB() 904 ap->nextMessageNumber++, gigaset_isdn_connB() 917 capi_ctr_handle_message(&iif->ctr, ap->id, skb); gigaset_isdn_connB() 929 struct gigaset_capi_appl *ap = bcs->ap; gigaset_isdn_hupB() local 933 if (!ap) { gigaset_isdn_hupB() 938 send_disconnect_b3_ind(bcs, ap); gigaset_isdn_hupB() 1003 struct gigaset_capi_appl *ap; gigaset_register_appl() local 1008 list_for_each_entry(ap, &iif->appls, ctrlist) gigaset_register_appl() 1009 if (ap->id == appl) { gigaset_register_appl() 1015 ap = kzalloc(sizeof(*ap), GFP_KERNEL); gigaset_register_appl() 1016 if (!ap) { gigaset_register_appl() 1020 ap->id = appl; gigaset_register_appl() 1021 ap->rp = *rp; gigaset_register_appl() 1023 list_add(&ap->ctrlist, &iif->appls); gigaset_register_appl() 1024 dev_info(cs->dev, "application %u registered\n", ap->id); gigaset_register_appl() 1033 struct gigaset_capi_appl *ap) remove_appl_from_channel() 1041 bcap = bcs->ap; remove_appl_from_channel() 1048 if (bcap == ap) { remove_appl_from_channel() 1049 bcs->ap = ap->bcnext; remove_appl_from_channel() 1050 if (bcs->ap != NULL) { remove_appl_from_channel() 1072 if (bcap->bcnext == ap) { remove_appl_from_channel() 1090 struct gigaset_capi_appl *ap, *tmp; gigaset_release_appl() local 1095 list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist) gigaset_release_appl() 1096 if (ap->id == appl) { gigaset_release_appl() 1099 remove_appl_from_channel(&cs->bcs[ch], ap); gigaset_release_appl() 1102 list_del(&ap->ctrlist); gigaset_release_appl() 1103 kfree(ap); gigaset_release_appl() 1118 struct gigaset_capi_appl *ap, send_conf() 1137 capi_ctr_handle_message(&iif->ctr, ap->id, skb); send_conf() 1144 struct gigaset_capi_appl *ap, do_facility_req() 1188 send_conf(iif, ap, skb, CapiIllMessageParmCoding); do_facility_req() 1206 send_conf(iif, ap, skb, do_facility_req() 1275 capi_ctr_handle_message(&iif->ctr, ap->id, cskb); do_facility_req() 1284 struct gigaset_capi_appl *ap, do_listen_req() 1298 ap->listenInfoMask = iif->acmsg.InfoMask; do_listen_req() 1299 ap->listenCIPmask = iif->acmsg.CIPmask; do_listen_req() 1300 send_conf(iif, ap, skb, CapiSuccess); do_listen_req() 1308 struct gigaset_capi_appl *ap, do_alert_req() 1320 send_conf(iif, ap, skb, CapiAlertAlreadySent); do_alert_req() 1329 struct gigaset_capi_appl *ap, do_connect_req() 1355 send_conf(iif, ap, skb, CapiNoPlciAvailable); do_connect_req() 1359 if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) do_connect_req() 1361 __func__, bcs->ap, bcs->apconnstate); do_connect_req() 1362 ap->bcnext = NULL; do_connect_req() 1363 bcs->ap = ap; do_connect_req() 1367 bcs->rx_bufsize = ap->rp.datablklen; do_connect_req() 1603 send_conf(iif, ap, skb, CapiSuccess); do_connect_req() 1615 send_conf(iif, ap, skb, info); do_connect_req() 1623 struct gigaset_capi_appl *ap, do_connect_resp() 1655 while (bcs->ap != NULL) { do_connect_resp() 1656 oap = bcs->ap; do_connect_resp() 1657 bcs->ap = oap->bcnext; do_connect_resp() 1658 if (oap != ap) { do_connect_resp() 1665 ap->bcnext = NULL; do_connect_resp() 1666 bcs->ap = ap; do_connect_resp() 1669 bcs->rx_bufsize = ap->rp.datablklen; do_connect_resp() 1736 send_disconnect_ind(bcs, ap, 0); do_connect_resp() 1740 if (bcs->ap == ap) { do_connect_resp() 1741 bcs->ap = ap->bcnext; do_connect_resp() 1742 if (bcs->ap == NULL) { do_connect_resp() 1750 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) { do_connect_resp() 1751 if (oap->bcnext == ap) { do_connect_resp() 1759 __func__, ap->id); do_connect_resp() 1765 while (bcs->ap != NULL) { do_connect_resp() 1766 oap = bcs->ap; do_connect_resp() 1767 bcs->ap = oap->bcnext; do_connect_resp() 1768 if (oap != ap) { do_connect_resp() 1775 ap->bcnext = NULL; do_connect_resp() 1776 bcs->ap = ap; do_connect_resp() 1795 struct gigaset_capi_appl *ap, do_connect_b3_req() 1816 send_conf(iif, ap, skb, CapiIllContrPlciNcci); do_connect_b3_req() 1829 send_conf(iif, ap, skb, do_connect_b3_req() 1842 struct gigaset_capi_appl *ap, do_connect_b3_resp() 1894 capi_cmsg_header(cmsg, ap->id, command, CAPI_IND, do_connect_b3_resp() 1895 ap->nextMessageNumber++, cmsg->adr.adrNCCI); do_connect_b3_resp() 1903 capi_ctr_handle_message(&iif->ctr, ap->id, skb); do_connect_b3_resp() 1912 struct gigaset_capi_appl *ap, do_disconnect_req() 1935 send_conf(iif, ap, skb, CapiIllContrPlciNcci); do_disconnect_req() 1969 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); do_disconnect_req() 1972 capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, do_disconnect_req() 1973 ap->nextMessageNumber++, do_disconnect_req() 1979 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); do_disconnect_req() 1993 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); do_disconnect_req() 1998 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); do_disconnect_req() 2004 send_conf(iif, ap, skb, CapiSuccess); do_disconnect_req() 2012 struct gigaset_capi_appl *ap, do_disconnect_b3_req() 2034 send_conf(iif, ap, skb, CapiIllContrPlciNcci); do_disconnect_b3_req() 2041 send_conf(iif, ap, skb, do_disconnect_b3_req() 2048 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); do_disconnect_b3_req() 2056 send_conf(iif, ap, skb, do_disconnect_b3_req() 2065 struct gigaset_capi_appl *ap, do_data_b3_req() 2085 send_conf(iif, ap, skb, CapiIllContrPlciNcci); do_data_b3_req() 2097 send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */ do_data_b3_req() 2103 send_conf(iif, ap, skb, CapiIllMessageParmCoding); do_data_b3_req() 2109 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); do_data_b3_req() 2120 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); do_data_b3_req() 2129 send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle, do_data_b3_req() 2139 struct gigaset_capi_appl *ap, do_reset_b3_req() 2151 send_conf(iif, ap, skb, do_reset_b3_req() 2159 struct gigaset_capi_appl *ap, do_unsupported() 2171 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); do_unsupported() 2178 struct gigaset_capi_appl *ap, do_nothing() 2194 struct gigaset_capi_appl *ap, do_data_b3_resp() 2273 struct gigaset_capi_appl *ap; gigaset_send_message() local 2283 ap = get_appl(iif, CAPIMSG_APPID(skb->data)); gigaset_send_message() 2284 if (!ap) { gigaset_send_message() 2308 handler(iif, ap, skb); gigaset_send_message() 2318 ap = get_appl(iif, CAPIMSG_APPID(skb->data)); gigaset_send_message() 2319 if (!ap) { gigaset_send_message() 2332 handler(iif, ap, skb); gigaset_send_message() 683 send_disconnect_ind(struct bc_state *bcs, struct gigaset_capi_appl *ap, u16 reason) send_disconnect_ind() argument 717 send_disconnect_b3_ind(struct bc_state *bcs, struct gigaset_capi_appl *ap) send_disconnect_b3_ind() argument 1032 remove_appl_from_channel(struct bc_state *bcs, struct gigaset_capi_appl *ap) remove_appl_from_channel() argument 1117 send_conf(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb, u16 info) send_conf() argument 1143 do_facility_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_facility_req() argument 1283 do_listen_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_listen_req() argument 1307 do_alert_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_alert_req() argument 1328 do_connect_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_connect_req() argument 1622 do_connect_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_connect_resp() argument 1794 do_connect_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_connect_b3_req() argument 1841 do_connect_b3_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_connect_b3_resp() argument 1911 do_disconnect_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_disconnect_req() argument 2011 do_disconnect_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_disconnect_b3_req() argument 2064 do_data_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_data_b3_req() argument 2138 do_reset_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_reset_b3_req() argument 2158 do_unsupported(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_unsupported() argument 2177 do_nothing(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_nothing() argument 2193 do_data_b3_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) do_data_b3_resp() argument
|
/linux-4.1.27/drivers/scsi/libsas/ |
H A D | sas_ata.c | 105 struct ata_port *ap; sas_ata_task_done() local 121 ap = qc->ap; sas_ata_task_done() 122 link = &ap->link; sas_ata_task_done() 124 spin_lock_irqsave(ap->lock, flags); sas_ata_task_done() 126 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) { sas_ata_task_done() 127 spin_unlock_irqrestore(ap->lock, flags); sas_ata_task_done() 171 spin_unlock_irqrestore(ap->lock, flags); sas_ata_task_done() 184 struct ata_port *ap = qc->ap; sas_ata_qc_issue() local 185 struct domain_device *dev = ap->private_data; sas_ata_qc_issue() 194 spin_unlock(ap->lock); sas_ata_qc_issue() 257 spin_lock(ap->lock); sas_ata_qc_issue() 264 struct domain_device *dev = qc->ap->private_data; sas_ata_qc_fill_rtf() 326 struct ata_port *ap = link->ap; smp_ata_check_ready() local 327 struct domain_device *dev = ap->private_data; smp_ata_check_ready() 356 struct ata_port *ap = link->ap; local_ata_check_ready() local 357 struct domain_device *dev = ap->private_data; local_ata_check_ready() 374 struct ata_port *ap = ddev->sata_dev.ap; sas_ata_printk() local 386 level, ap->print_id, dev_name(dev), &vaf); sas_ata_printk() 398 struct ata_port *ap = link->ap; sas_ata_hard_reset() local 400 struct domain_device *dev = ap->private_data; sas_ata_hard_reset() 423 ap->cbl = ATA_CBL_SATA; sas_ata_hard_reset() 498 static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev) sas_ata_set_dmamode() argument 500 struct domain_device *dev = ap->private_data; sas_ata_set_dmamode() 507 static void sas_ata_sched_eh(struct ata_port *ap) sas_ata_sched_eh() argument 509 struct domain_device *dev = ap->private_data; sas_ata_sched_eh() 516 ata_std_sched_eh(ap); sas_ata_sched_eh() 520 void sas_ata_end_eh(struct ata_port *ap) sas_ata_end_eh() argument 522 struct domain_device *dev = ap->private_data; sas_ata_end_eh() 562 struct ata_port *ap; sas_ata_init() local 566 ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host, sas_ata_init() 569 if (!ap) { sas_ata_init() 574 ap->private_data = found_dev; sas_ata_init() 575 ap->cbl = ATA_CBL_SATA; sas_ata_init() 576 ap->scsi_host = shost; sas_ata_init() 577 rc = ata_sas_port_init(ap); sas_ata_init() 579 ata_sas_port_destroy(ap); sas_ata_init() 582 found_dev->sata_dev.ap = ap; sas_ata_init() 634 ata_sas_async_probe(dev->sata_dev.ap); sas_probe_sata() 681 if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND) sas_suspend_sata() 684 ata_sas_port_suspend(sata->ap); sas_suspend_sata() 703 if (sata->ap->pm_mesg.event == PM_EVENT_ON) sas_resume_sata() 706 ata_sas_port_resume(sata->ap); sas_resume_sata() 742 struct ata_port *ap = dev->sata_dev.ap; async_sas_ata_eh() local 746 ata_scsi_port_error_handler(ha->core.shost, ap); async_sas_ata_eh() 815 struct ata_port *ap = eh_dev->sata_dev.ap; local 818 ata_scsi_cmd_error_handler(shost, ap, &sata_q); 839 struct ata_port *ap; sas_ata_schedule_reset() local 845 ap = dev->sata_dev.ap; sas_ata_schedule_reset() 846 ehi = &ap->link.eh_info; sas_ata_schedule_reset() 848 spin_lock_irqsave(ap->lock, flags); sas_ata_schedule_reset() 851 ata_port_schedule_eh(ap); sas_ata_schedule_reset() 852 spin_unlock_irqrestore(ap->lock, flags); sas_ata_schedule_reset() 858 struct ata_port *ap; sas_ata_wait_eh() local 863 ap = dev->sata_dev.ap; sas_ata_wait_eh() 864 ata_port_wait_eh(ap); sas_ata_wait_eh()
|
/linux-4.1.27/arch/x86/tools/ |
H A D | relocs_common.c | 5 va_list ap; die() local 6 va_start(ap, fmt); die() 7 vfprintf(stderr, fmt, ap); die() 8 va_end(ap); die()
|
/linux-4.1.27/net/9p/ |
H A D | protocol.c | 103 va_list ap) p9pdu_vreadf() 111 int8_t *val = va_arg(ap, int8_t *); p9pdu_vreadf() 119 int16_t *val = va_arg(ap, int16_t *); p9pdu_vreadf() 129 int32_t *val = va_arg(ap, int32_t *); p9pdu_vreadf() 139 int64_t *val = va_arg(ap, int64_t *); p9pdu_vreadf() 149 char **sptr = va_arg(ap, char **); p9pdu_vreadf() 171 kuid_t *uid = va_arg(ap, kuid_t *); p9pdu_vreadf() 181 kgid_t *gid = va_arg(ap, kgid_t *); p9pdu_vreadf() 192 va_arg(ap, struct p9_qid *); p9pdu_vreadf() 201 va_arg(ap, struct p9_wstat *); p9pdu_vreadf() 224 uint32_t *count = va_arg(ap, uint32_t *); p9pdu_vreadf() 225 void **data = va_arg(ap, void **); p9pdu_vreadf() 238 uint16_t *nwname = va_arg(ap, uint16_t *); p9pdu_vreadf() 239 char ***wnames = va_arg(ap, char ***); p9pdu_vreadf() 278 uint16_t *nwqid = va_arg(ap, uint16_t *); p9pdu_vreadf() 280 va_arg(ap, struct p9_qid **); p9pdu_vreadf() 317 va_arg(ap, struct p9_stat_dotl *); p9pdu_vreadf() 361 va_list ap) p9pdu_vwritef() 369 int8_t val = va_arg(ap, int); p9pdu_vwritef() 375 __le16 val = cpu_to_le16(va_arg(ap, int)); p9pdu_vwritef() 381 __le32 val = cpu_to_le32(va_arg(ap, int32_t)); p9pdu_vwritef() 387 __le64 val = cpu_to_le64(va_arg(ap, int64_t)); p9pdu_vwritef() 393 const char *sptr = va_arg(ap, const char *); p9pdu_vwritef() 406 kuid_t uid = va_arg(ap, kuid_t); p9pdu_vwritef() 413 kgid_t gid = va_arg(ap, kgid_t); p9pdu_vwritef() 421 va_arg(ap, const struct p9_qid *); p9pdu_vwritef() 429 va_arg(ap, const struct p9_wstat *); p9pdu_vwritef() 443 uint32_t count = va_arg(ap, uint32_t); p9pdu_vwritef() 445 va_arg(ap, struct iov_iter *); p9pdu_vwritef() 453 uint16_t nwname = va_arg(ap, int); p9pdu_vwritef() 454 const char **wnames = va_arg(ap, const char **); p9pdu_vwritef() 474 uint16_t nwqid = va_arg(ap, int); p9pdu_vwritef() 476 va_arg(ap, struct p9_qid *); p9pdu_vwritef() 496 struct p9_iattr_dotl *p9attr = va_arg(ap, p9pdu_vwritef() 531 va_list ap; p9pdu_readf() local 534 va_start(ap, fmt); p9pdu_readf() 535 ret = p9pdu_vreadf(pdu, proto_version, fmt, ap); p9pdu_readf() 536 va_end(ap); p9pdu_readf() 544 va_list ap; p9pdu_writef() local 547 va_start(ap, fmt); p9pdu_writef() 548 ret = p9pdu_vwritef(pdu, proto_version, fmt, ap); p9pdu_writef() 549 va_end(ap); p9pdu_writef() 102 p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, va_list ap) p9pdu_vreadf() argument 360 p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, va_list ap) p9pdu_vwritef() argument
|
/linux-4.1.27/drivers/isdn/hardware/mISDN/ |
H A D | iohelper.h | 38 #define IOFUNC_IO(name, hws, ap) \ 41 return inb(hw->ap.port + off); \ 45 outb(val, hw->ap.port + off); \ 49 insb(hw->ap.port + off, dp, size); \ 53 outsb(hw->ap.port + off, dp, size); \ 56 #define IOFUNC_IND(name, hws, ap) \ 59 outb(off, hw->ap.ale); \ 60 return inb(hw->ap.port); \ 64 outb(off, hw->ap.ale); \ 65 outb(val, hw->ap.port); \ 69 outb(off, hw->ap.ale); \ 70 insb(hw->ap.port, dp, size); \ 74 outb(off, hw->ap.ale); \ 75 outsb(hw->ap.port, dp, size); \
|
/linux-4.1.27/drivers/gpu/drm/mgag200/ |
H A D | mgag200_drv.c | 45 struct apertures_struct *ap; mgag200_kick_out_firmware_fb() local 48 ap = alloc_apertures(1); mgag200_kick_out_firmware_fb() 49 if (!ap) mgag200_kick_out_firmware_fb() 52 ap->ranges[0].base = pci_resource_start(pdev, 0); mgag200_kick_out_firmware_fb() 53 ap->ranges[0].size = pci_resource_len(pdev, 0); mgag200_kick_out_firmware_fb() 58 remove_conflicting_framebuffers(ap, "mgag200drmfb", primary); mgag200_kick_out_firmware_fb() 59 kfree(ap); mgag200_kick_out_firmware_fb()
|
/linux-4.1.27/include/linux/ |
H A D | libata.h | 75 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) 237 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 622 struct ata_port *ap; member in struct:ata_queued_cmd 780 struct ata_port *ap; member in struct:ata_link 814 /* Flags that change dynamically, protected by ap->lock */ 903 int (*cable_detect)(struct ata_port *ap); 905 void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); 906 void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); 912 void (*freeze)(struct ata_port *ap); 913 void (*thaw)(struct ata_port *ap); 922 void (*error_handler)(struct ata_port *ap); 923 void (*lost_interrupt)(struct ata_port *ap); 925 void (*sched_eh)(struct ata_port *ap); 926 void (*end_eh)(struct ata_port *ap); 933 void (*pmp_attach)(struct ata_port *ap); 934 void (*pmp_detach)(struct ata_port *ap); 941 int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); 942 int (*port_resume)(struct ata_port *ap); 943 int (*port_start)(struct ata_port *ap); 944 void (*port_stop)(struct ata_port *ap); 951 void (*sff_dev_select)(struct ata_port *ap, unsigned int device); 952 void (*sff_set_devctl)(struct ata_port *ap, u8 ctl); 953 u8 (*sff_check_status)(struct ata_port *ap); 954 u8 (*sff_check_altstatus)(struct ata_port *ap); 955 void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); 956 void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); 957 void (*sff_exec_command)(struct ata_port *ap, 970 u8 (*bmdma_status)(struct ata_port *ap); 974 ssize_t (*em_show)(struct ata_port *ap, char *buf); 975 ssize_t (*em_store)(struct ata_port *ap, const char *message, 980 ssize_t (*transmit_led_message)(struct ata_port *ap, u32 state, 986 void (*phy_reset)(struct ata_port *ap); 987 void (*eng_timeout)(struct ata_port *ap); 1101 static inline int ata_port_is_dummy(struct ata_port *ap) ata_port_is_dummy() argument 1103 return ap->ops == &ata_dummy_port_ops; ata_port_is_dummy() 1126 extern int ata_slave_link_init(struct ata_port *ap); 1138 extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, 1143 extern void ata_sas_async_probe(struct ata_port *ap); 1144 extern int ata_sas_sync_probe(struct ata_port *ap); 1146 extern int ata_sas_port_start(struct ata_port *ap); 1147 extern void ata_sas_port_stop(struct ata_port *ap); 1149 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); 1159 extern void ata_sas_port_suspend(struct ata_port *ap); 1160 extern void ata_sas_port_resume(struct ata_port *ap); 1162 static inline void ata_sas_port_suspend(struct ata_port *ap) ata_sas_port_suspend() argument 1165 static inline void ata_sas_port_resume(struct ata_port *ap) ata_sas_port_resume() argument 1170 extern void ata_msleep(struct ata_port *ap, unsigned int msecs); 1171 extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, 1200 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); 1210 extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1214 extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); 1215 extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); 1218 extern int ata_cable_40wire(struct ata_port *ap); 1219 extern int ata_cable_80wire(struct ata_port *ap); 1220 extern int ata_cable_sata(struct ata_port *ap); 1221 extern int ata_cable_ignore(struct ata_port *ap); 1222 extern int ata_cable_unknown(struct ata_port *ap); 1264 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) ata_acpi_init_gtm() argument 1266 if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID) ata_acpi_init_gtm() 1267 return &ap->__acpi_init_gtm; ata_acpi_init_gtm() 1270 int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); 1271 int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); 1274 int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); 1276 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) ata_acpi_init_gtm() argument 1281 static inline int ata_acpi_stm(const struct ata_port *ap, ata_acpi_stm() argument 1287 static inline int ata_acpi_gtm(const struct ata_port *ap, ata_acpi_gtm() argument 1299 static inline int ata_acpi_cbl_80wire(struct ata_port *ap, ata_acpi_cbl_80wire() argument 1309 extern void ata_port_schedule_eh(struct ata_port *ap); 1310 extern void ata_port_wait_eh(struct ata_port *ap); 1312 extern int ata_port_abort(struct ata_port *ap); 1313 extern int ata_port_freeze(struct ata_port *ap); 1314 extern int sata_async_notification(struct ata_port *ap); 1316 extern void ata_eh_freeze_port(struct ata_port *ap); 1317 extern void ata_eh_thaw_port(struct ata_port *ap); 1323 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 1326 extern void ata_std_error_handler(struct ata_port *ap); 1327 extern void ata_std_sched_eh(struct ata_port *ap); 1328 extern void ata_std_end_eh(struct ata_port *ap); 1388 static inline bool sata_pmp_supported(struct ata_port *ap) sata_pmp_supported() argument 1390 return ap->flags & ATA_FLAG_PMP; sata_pmp_supported() 1393 static inline bool sata_pmp_attached(struct ata_port *ap) sata_pmp_attached() argument 1395 return ap->nr_pmp_links != 0; sata_pmp_attached() 1400 return link == &link->ap->link || link == link->ap->slave_link; ata_is_host_link() 1403 static inline bool sata_pmp_supported(struct ata_port *ap) sata_pmp_supported() argument 1408 static inline bool sata_pmp_attached(struct ata_port *ap) sata_pmp_attached() argument 1421 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) sata_srst_pmp() 1430 void ata_port_printk(const struct ata_port *ap, const char *level, 1439 #define ata_port_err(ap, fmt, ...) \ 1440 ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) 1441 #define ata_port_warn(ap, fmt, ...) \ 1442 ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) 1443 #define ata_port_notice(ap, fmt, ...) \ 1444 ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) 1445 #define ata_port_info(ap, fmt, ...) \ 1446 ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) 1447 #define ata_port_dbg(ap, fmt, ...) \ 1448 ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) 1495 void ata_port_desc(struct ata_port *ap, const char *fmt, ...); 1497 extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 1552 if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) ata_link_max_devices() 1590 struct ata_port *ap, 1600 * ata_for_each_link() iterates over each link of @ap according to 1611 #define ata_for_each_link(link, ap, mode) \ 1612 for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ 1613 (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) 1647 static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, __ata_qc_from_tag() argument 1651 return &ap->qcmd[tag]; __ata_qc_from_tag() 1655 static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, ata_qc_from_tag() argument 1658 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_qc_from_tag() 1660 if (unlikely(!qc) || !ap->ops->error_handler) ata_qc_from_tag() 1680 tf->ctl = dev->link->ap->ctl; ata_tf_init() 1787 extern void sata_pmp_error_handler(struct ata_port *ap); 1812 extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); 1813 extern u8 ata_sff_check_status(struct ata_port *ap); 1814 extern void ata_sff_pause(struct ata_port *ap); 1815 extern void ata_sff_dma_pause(struct ata_port *ap); 1816 extern int ata_sff_busy_sleep(struct ata_port *ap, 1819 extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 1820 extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 1821 extern void ata_sff_exec_command(struct ata_port *ap, 1829 extern void ata_sff_irq_on(struct ata_port *ap); 1830 extern void ata_sff_irq_clear(struct ata_port *ap); 1831 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1839 extern unsigned int ata_sff_port_intr(struct ata_port *ap, 1842 extern void ata_sff_lost_interrupt(struct ata_port *ap); 1843 extern void ata_sff_freeze(struct ata_port *ap); 1844 extern void ata_sff_thaw(struct ata_port *ap); 1856 extern void ata_sff_error_handler(struct ata_port *ap); 1883 extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, 1886 extern void ata_bmdma_error_handler(struct ata_port *ap); 1888 extern void ata_bmdma_irq_clear(struct ata_port *ap); 1892 extern u8 ata_bmdma_status(struct ata_port *ap); 1893 extern int ata_bmdma_port_start(struct ata_port *ap); 1894 extern int ata_bmdma_port_start32(struct ata_port *ap); 1911 * @ap: Port to wait for. 1922 static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, ata_sff_busy_wait() argument 1929 status = ap->ops->sff_check_status(ap); ata_sff_busy_wait() 1938 * @ap: Port to wait for. 1946 static inline u8 ata_wait_idle(struct ata_port *ap) ata_wait_idle() argument 1948 u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); ata_wait_idle() 1952 ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", ata_wait_idle()
|
H A D | kernelcapi.h | 22 void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb); 37 u16 capi20_register(struct capi20_appl *ap); 38 u16 capi20_release(struct capi20_appl *ap); 39 u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | io_no.h | 56 volatile unsigned char *ap = (volatile unsigned char *) addr; io_outsb() local 59 *ap = *bp++; io_outsb() 64 volatile unsigned short *ap = (volatile unsigned short *) addr; io_outsw() local 67 *ap = _swapw(*bp++); io_outsw() 72 volatile unsigned int *ap = (volatile unsigned int *) addr; io_outsl() local 75 *ap = _swapl(*bp++); io_outsl() 80 volatile unsigned char *ap = (volatile unsigned char *) addr; io_insb() local 83 *bp++ = *ap; io_insb() 88 volatile unsigned short *ap = (volatile unsigned short *) addr; io_insw() local 91 *bp++ = _swapw(*ap); io_insw() 96 volatile unsigned int *ap = (volatile unsigned int *) addr; io_insl() local 99 *bp++ = _swapl(*ap); io_insl()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/ |
H A D | tp.c | 17 static void tp_init(adapter_t * ap, const struct tp_params *p, tp_init() argument 22 if (!t1_is_asic(ap)) tp_init() 31 writel(val, ap->regs + A_TP_IN_CONFIG); tp_init() 35 F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG); tp_init() 39 V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG); tp_init() 43 if (is_T2(ap) && ap->params.nports > 1) { tp_init() 49 ap->regs + A_TP_TX_DROP_CONFIG); tp_init()
|
H A D | cxgb2.c | 60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs) schedule_mac_stats_update() argument 62 schedule_delayed_work(&ap->stats_update_task, secs * HZ); schedule_mac_stats_update() 65 static inline void cancel_mac_stats_update(struct adapter *ap) cancel_mac_stats_update() argument 67 cancel_delayed_work(&ap->stats_update_task); cancel_mac_stats_update() 539 static inline void reg_block_dump(struct adapter *ap, void *buf, reg_block_dump() argument 545 *p++ = readl(ap->regs + start); reg_block_dump() 551 struct adapter *ap = dev->ml_priv; get_regs() local 559 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); get_regs() 560 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); get_regs() 561 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); get_regs() 562 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); get_regs() 563 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); get_regs() 564 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); get_regs() 565 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); get_regs() 566 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); get_regs() 567 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); get_regs() 568 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); get_regs() 773 #define EEPROM_MAGIC(ap) \ 774 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
|
/linux-4.1.27/tools/perf/ui/gtk/ |
H A D | helpline.c | 27 static int gtk_helpline_show(const char *fmt, va_list ap) gtk_helpline_show() argument 34 sizeof(ui_helpline__current) - backlog, fmt, ap); gtk_helpline_show()
|
/linux-4.1.27/tools/perf/ui/tui/ |
H A D | helpline.c | 29 static int tui_helpline__show(const char *format, va_list ap) tui_helpline__show() argument 36 sizeof(ui_helpline__last_msg) - backlog, format, ap); tui_helpline__show()
|
/linux-4.1.27/arch/mips/mm/ |
H A D | uasm-micromips.c | 154 va_list ap; build_insn() local 167 va_start(ap, opc); build_insn() 170 op |= build_rt(va_arg(ap, u32)); build_insn() 172 op |= build_rs(va_arg(ap, u32)); build_insn() 176 op |= build_rs(va_arg(ap, u32)); build_insn() 178 op |= build_rt(va_arg(ap, u32)); build_insn() 181 op |= build_rd(va_arg(ap, u32)); build_insn() 183 op |= build_re(va_arg(ap, u32)); build_insn() 185 op |= build_simm(va_arg(ap, s32)); build_insn() 187 op |= build_uimm(va_arg(ap, u32)); build_insn() 189 op |= build_bimm(va_arg(ap, s32)); build_insn() 191 op |= build_jimm(va_arg(ap, u32)); build_insn() 193 op |= build_func(va_arg(ap, u32)); build_insn() 195 op |= build_set(va_arg(ap, u32)); build_insn() 197 op |= build_scimm(va_arg(ap, u32)); build_insn() 198 va_end(ap); build_insn()
|
H A D | uasm-mips.c | 187 va_list ap; build_insn() local 200 va_start(ap, opc); build_insn() 202 op |= build_rs(va_arg(ap, u32)); build_insn() 204 op |= build_rt(va_arg(ap, u32)); build_insn() 206 op |= build_rd(va_arg(ap, u32)); build_insn() 208 op |= build_re(va_arg(ap, u32)); build_insn() 210 op |= build_simm(va_arg(ap, s32)); build_insn() 212 op |= build_uimm(va_arg(ap, u32)); build_insn() 214 op |= build_bimm(va_arg(ap, s32)); build_insn() 216 op |= build_jimm(va_arg(ap, u32)); build_insn() 218 op |= build_func(va_arg(ap, u32)); build_insn() 220 op |= build_set(va_arg(ap, u32)); build_insn() 222 op |= build_scimm(va_arg(ap, u32)); build_insn() 224 op |= build_scimm9(va_arg(ap, u32)); build_insn() 225 va_end(ap); build_insn()
|
/linux-4.1.27/arch/m68k/emu/ |
H A D | natfeat.c | 57 va_list ap; nfprint() local 60 va_start(ap, fmt); nfprint() 61 n = vsnprintf(buf, 256, fmt, ap); nfprint() 63 va_end(ap); nfprint()
|
/linux-4.1.27/drivers/gpu/drm/bochs/ |
H A D | bochs_drv.c | 148 struct apertures_struct *ap; bochs_kick_out_firmware_fb() local 150 ap = alloc_apertures(1); bochs_kick_out_firmware_fb() 151 if (!ap) bochs_kick_out_firmware_fb() 154 ap->ranges[0].base = pci_resource_start(pdev, 0); bochs_kick_out_firmware_fb() 155 ap->ranges[0].size = pci_resource_len(pdev, 0); bochs_kick_out_firmware_fb() 156 remove_conflicting_framebuffers(ap, "bochsdrmfb", false); bochs_kick_out_firmware_fb() 157 kfree(ap); bochs_kick_out_firmware_fb()
|
/linux-4.1.27/drivers/gpu/drm/cirrus/ |
H A D | cirrus_drv.c | 46 struct apertures_struct *ap; cirrus_kick_out_firmware_fb() local 49 ap = alloc_apertures(1); cirrus_kick_out_firmware_fb() 50 if (!ap) cirrus_kick_out_firmware_fb() 53 ap->ranges[0].base = pci_resource_start(pdev, 0); cirrus_kick_out_firmware_fb() 54 ap->ranges[0].size = pci_resource_len(pdev, 0); cirrus_kick_out_firmware_fb() 59 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); cirrus_kick_out_firmware_fb() 60 kfree(ap); cirrus_kick_out_firmware_fb()
|
/linux-4.1.27/net/ipv4/netfilter/ |
H A D | nf_log_arp.c | 51 const struct arppayload *ap; dump_arp_packet() local 70 ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp); dump_arp_packet() 71 if (ap == NULL) { dump_arp_packet() 77 ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); dump_arp_packet()
|
/linux-4.1.27/tools/perf/util/ |
H A D | strbuf.c | 88 va_list ap; strbuf_addf() local 92 va_start(ap, fmt); strbuf_addf() 93 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); strbuf_addf() 94 va_end(ap); strbuf_addf() 99 va_start(ap, fmt); strbuf_addf() 100 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); strbuf_addf() 101 va_end(ap); strbuf_addf()
|
H A D | build-id.c | 96 va_list ap; asnprintf() local 102 va_start(ap, fmt); asnprintf() 104 ret = vsnprintf(*strp, size, fmt, ap); asnprintf() 106 ret = vasprintf(strp, fmt, ap); asnprintf() 107 va_end(ap); asnprintf()
|
/linux-4.1.27/tools/testing/selftests/mount/ |
H A D | unprivileged-remount-test.c | 49 va_list ap; die() local 50 va_start(ap, fmt); die() 51 vfprintf(stderr, fmt, ap); die() 52 va_end(ap); die() 56 static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap) vmaybe_write_file() argument 63 buf_len = vsnprintf(buf, sizeof(buf), fmt, ap); vmaybe_write_file() 96 va_list ap; maybe_write_file() local 98 va_start(ap, fmt); maybe_write_file() 99 vmaybe_write_file(true, filename, fmt, ap); maybe_write_file() 100 va_end(ap); maybe_write_file() 106 va_list ap; write_file() local 108 va_start(ap, fmt); write_file() 109 vmaybe_write_file(false, filename, fmt, ap); write_file() 110 va_end(ap); write_file()
|
/linux-4.1.27/drivers/isdn/capi/ |
H A D | kcapi.c | 182 struct capi20_appl *ap; notify_up() local 199 ap = __get_capi_appl_by_nr(applid); notify_up() 200 if (ap) notify_up() 201 register_appl(ctr, applid, &ap->rparam); notify_up() 214 struct capi20_appl *ap; ctr_down() local 228 ap = __get_capi_appl_by_nr(applid); ctr_down() 229 if (ap) ctr_down() 316 struct capi20_appl *ap = recv_handler() local 319 if ((!ap) || (ap->release_in_progress)) recv_handler() 322 mutex_lock(&ap->recv_mtx); recv_handler() 323 while ((skb = skb_dequeue(&ap->recv_queue))) { recv_handler() 325 ap->nrecvdatapkt++; recv_handler() 327 ap->nrecvctlpkt++; recv_handler() 329 ap->recv_message(ap, skb); recv_handler() 331 mutex_unlock(&ap->recv_mtx); recv_handler() 346 struct capi20_appl *ap; capi_ctr_handle_message() local 397 ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data)); capi_ctr_handle_message() 398 if (!ap) { capi_ctr_handle_message() 411 skb_queue_tail(&ap->recv_queue, skb); capi_ctr_handle_message() 412 queue_work(kcapi_wq, &ap->recv_work); capi_ctr_handle_message() 654 * @ap: CAPI application descriptor structure. 657 * A unique application ID is assigned and stored in @ap->applid. 659 * callback function @ap->recv_message() may be called at any time 660 * until capi20_release() has been called for the same @ap. 664 u16 capi20_register(struct capi20_appl *ap) capi20_register() argument 671 if (ap->rparam.datablklen < 128) capi20_register() 674 ap->nrecvctlpkt = 0; capi20_register() 675 ap->nrecvdatapkt = 0; capi20_register() 676 ap->nsentctlpkt = 0; capi20_register() 677 ap->nsentdatapkt = 0; capi20_register() 678 mutex_init(&ap->recv_mtx); capi20_register() 679 skb_queue_head_init(&ap->recv_queue); capi20_register() 680 INIT_WORK(&ap->recv_work, recv_handler); capi20_register() 681 ap->release_in_progress = 0; capi20_register() 694 ap->applid = applid; capi20_register() 695 capi_applications[applid - 1] = ap; capi20_register() 701 register_appl(capi_controller[i], applid, &ap->rparam); capi20_register() 717 * @ap: CAPI application descriptor structure. 721 * callback function @ap->recv_message() will no longer be called. 725 u16 capi20_release(struct capi20_appl *ap) capi20_release() argument 729 DBG("applid %#x", ap->applid); capi20_release() 733 ap->release_in_progress = 1; capi20_release() 734 capi_applications[ap->applid - 1] = NULL; capi20_release() 742 release_appl(capi_controller[i], ap->applid); capi20_release() 748 skb_queue_purge(&ap->recv_queue); capi20_release() 751 printk(KERN_DEBUG "kcapi: appl %d down\n", ap->applid); capi20_release() 761 * @ap: CAPI application descriptor structure. 768 u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb) capi20_put_message() argument 774 DBG("applid %#x", ap->applid); capi20_put_message() 778 if ((ap->applid == 0) || ap->release_in_progress) capi20_put_message() 801 ap->nsentdatapkt++; capi20_put_message() 806 ap->nsentctlpkt++; capi20_put_message()
|
H A D | kcapi_proc.c | 173 struct capi20_appl *ap = *(struct capi20_appl **) v; applications_show() local 175 if (!ap) applications_show() 179 ap->applid, applications_show() 180 ap->rparam.level3cnt, applications_show() 181 ap->rparam.datablkcnt, applications_show() 182 ap->rparam.datablklen); applications_show() 190 struct capi20_appl *ap = *(struct capi20_appl **) v; applstats_show() local 192 if (!ap) applstats_show() 196 ap->applid, applstats_show() 197 ap->nrecvctlpkt, applstats_show() 198 ap->nrecvdatapkt, applstats_show() 199 ap->nsentctlpkt, applstats_show() 200 ap->nsentdatapkt); applstats_show()
|
H A D | capi.c | 82 struct capi20_appl *ap; member in struct:capiminor 115 struct capi20_appl ap; member in struct:capidev 206 static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci) capiminor_alloc() argument 218 mp->ap = ap; capiminor_alloc() 297 np->minorp = capiminor_alloc(&cdev->ap, np->ncci); capincci_alloc_minor() 391 capimsg_setu16(s, 2, mp->ap->applid); gen_data_b3_resp_for() 447 errcode = capi20_put_message(mp->ap, nskb); handle_recv_skb() 519 capimsg_setu16(skb->data, 2, mp->ap->applid); handle_minor_send() 539 errcode = capi20_put_message(mp->ap, skb); handle_minor_send() 568 static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) capi_recv_message() argument 570 struct capidev *cdev = ap->private; capi_recv_message() 653 if (!cdev->ap.applid) capi_read() 687 if (!cdev->ap.applid) capi_write() 710 CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); capi_write() 718 cdev->errcode = capi20_put_message(&cdev->ap, skb); capi_write() 733 if (!cdev->ap.applid) capi_poll() 755 if (cdev->ap.applid) { capi_ioctl() 759 if (copy_from_user(&cdev->ap.rparam, argp, capi_ioctl() 764 cdev->ap.private = cdev; capi_ioctl() 765 cdev->ap.recv_message = capi_recv_message; capi_ioctl() 766 cdev->errcode = capi20_register(&cdev->ap); capi_ioctl() 767 retval = (int)cdev->ap.applid; capi_ioctl() 769 cdev->ap.applid = 0; capi_ioctl() 973 if (cdev->ap.applid) capi_release() 974 capi20_release(&cdev->ap); capi_release() 1333 cdev->ap.applid, capi20_proc_show() 1334 cdev->ap.nrecvctlpkt, capi20_proc_show() 1335 cdev->ap.nrecvdatapkt, capi20_proc_show() 1336 cdev->ap.nsentctlpkt, capi20_proc_show() 1337 cdev->ap.nsentdatapkt); capi20_proc_show() 1369 seq_printf(m, "%d 0x%x\n", cdev->ap.applid, np->ncci); capi20ncci_proc_show()
|
H A D | capidrv.c | 128 struct capi20_appl ap; member in struct:capidrv_data 520 if (capi20_put_message(&global.ap, skb) != CAPI_NOERROR) send_message() 668 global.ap.applid, n0() 1151 global.ap.applid, handle_incoming_call() 1292 global.ap.applid, handle_plci() 1417 global.ap.applid, handle_ncci() 1435 global.ap.applid, handle_ncci() 1582 static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb) capidrv_recv_message() argument 1586 ap->applid); capidrv_recv_message() 1595 ap->applid, cdb->buf); capidrv_recv_message() 1599 __func__, ap->applid, capidrv_recv_message() 1850 global.ap.applid, capidrv_command() 1896 global.ap.applid, capidrv_command() 1947 global.ap.applid, capidrv_command() 1968 global.ap.applid, capidrv_command() 2094 capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++, if_sendbuf() 2122 errcode = capi20_put_message(&global.ap, nskb); if_sendbuf() 2136 errcode = capi20_put_message(&global.ap, skb); if_sendbuf() 2204 capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid, enable_dchannel_trace() 2213 capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid, enable_dchannel_trace() 2227 capi_fill_LISTEN_REQ(&cmdcmsg, global.ap.applid, send_listen() 2458 global.ap.nrecvctlpkt, capidrv_proc_show() 2459 global.ap.nrecvdatapkt, capidrv_proc_show() 2460 global.ap.nsentctlpkt, capidrv_proc_show() 2461 global.ap.nsentdatapkt); capidrv_proc_show() 2498 global.ap.rparam.level3cnt = -2; /* number of bchannels twice */ capidrv_init() 2499 global.ap.rparam.datablkcnt = 16; capidrv_init() 2500 global.ap.rparam.datablklen = 2048; capidrv_init() 2502 global.ap.recv_message = capidrv_recv_message; capidrv_init() 2503 errcode = capi20_register(&global.ap); capidrv_init() 2513 capi20_release(&global.ap); capidrv_init() 2532 capi20_release(&global.ap); capidrv_exit()
|
/linux-4.1.27/net/wireless/ |
H A D | Makefile | 13 cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o
|
/linux-4.1.27/kernel/ |
H A D | kprobes.c | 337 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) copy_kprobe() argument 339 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); copy_kprobe() 340 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); copy_kprobe() 668 static void reuse_unused_kprobe(struct kprobe *ap) reuse_unused_kprobe() argument 672 BUG_ON(!kprobe_unused(ap)); reuse_unused_kprobe() 677 op = container_of(ap, struct optimized_kprobe, kp); reuse_unused_kprobe() 680 "aggrprobe@%p\n", ap->addr); reuse_unused_kprobe() 682 ap->flags &= ~KPROBE_FLAG_DISABLED; reuse_unused_kprobe() 684 BUG_ON(!kprobe_optready(ap)); reuse_unused_kprobe() 685 optimize_kprobe(ap); reuse_unused_kprobe() 739 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 747 struct kprobe *ap; try_to_optimize_kprobe() local 758 ap = alloc_aggr_kprobe(p); try_to_optimize_kprobe() 759 if (!ap) try_to_optimize_kprobe() 762 op = container_of(ap, struct optimized_kprobe, kp); try_to_optimize_kprobe() 770 init_aggr_kprobe(ap, p); try_to_optimize_kprobe() 771 optimize_kprobe(ap); /* This just kicks optimizer thread */ try_to_optimize_kprobe() 898 static void reuse_unused_kprobe(struct kprobe *ap) reuse_unused_kprobe() argument 901 BUG_ON(kprobe_unused(ap)); reuse_unused_kprobe() 1204 * Add the new probe to ap->list. Fail if this is the 1207 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) add_new_kprobe() argument 1209 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); add_new_kprobe() 1212 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ add_new_kprobe() 1215 if (ap->break_handler) add_new_kprobe() 1217 list_add_tail_rcu(&p->list, &ap->list); add_new_kprobe() 1218 ap->break_handler = aggr_break_handler; add_new_kprobe() 1220 list_add_rcu(&p->list, &ap->list); add_new_kprobe() 1221 if (p->post_handler && !ap->post_handler) add_new_kprobe() 1222 ap->post_handler = aggr_post_handler; add_new_kprobe() 1231 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) init_aggr_kprobe() argument 1233 /* Copy p's insn slot to ap */ init_aggr_kprobe() 1234 copy_kprobe(p, ap); init_aggr_kprobe() 1235 flush_insn_slot(ap); init_aggr_kprobe() 1236 ap->addr = p->addr; init_aggr_kprobe() 1237 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; init_aggr_kprobe() 1238 ap->pre_handler = aggr_pre_handler; init_aggr_kprobe() 1239 ap->fault_handler = aggr_fault_handler; init_aggr_kprobe() 1242 ap->post_handler = aggr_post_handler; init_aggr_kprobe() 1244 ap->break_handler = aggr_break_handler; init_aggr_kprobe() 1246 INIT_LIST_HEAD(&ap->list); init_aggr_kprobe() 1247 INIT_HLIST_NODE(&ap->hlist); init_aggr_kprobe() 1249 list_add_rcu(&p->list, &ap->list); init_aggr_kprobe() 1250 hlist_replace_rcu(&p->hlist, &ap->hlist); init_aggr_kprobe() 1260 struct kprobe *ap = orig_p; register_aggr_kprobe() local 1273 ap = alloc_aggr_kprobe(orig_p); register_aggr_kprobe() 1274 if (!ap) { register_aggr_kprobe() 1278 init_aggr_kprobe(ap, orig_p); register_aggr_kprobe() 1279 } else if (kprobe_unused(ap)) register_aggr_kprobe() 1281 reuse_unused_kprobe(ap); register_aggr_kprobe() 1283 if (kprobe_gone(ap)) { register_aggr_kprobe() 1290 ret = arch_prepare_kprobe(ap); register_aggr_kprobe() 1300 prepare_optimized_kprobe(ap); register_aggr_kprobe() 1306 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) register_aggr_kprobe() 1310 /* Copy ap's insn slot to p */ register_aggr_kprobe() 1311 copy_kprobe(ap, p); register_aggr_kprobe() 1312 ret = add_new_kprobe(ap, p); register_aggr_kprobe() 1319 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { register_aggr_kprobe() 1320 ap->flags &= ~KPROBE_FLAG_DISABLED; register_aggr_kprobe() 1323 arm_kprobe(ap); register_aggr_kprobe() 1384 struct kprobe *ap, *list_p; __get_valid_kprobe() local 1386 ap = get_kprobe(p->addr); __get_valid_kprobe() 1387 if (unlikely(!ap)) __get_valid_kprobe() 1390 if (p != ap) { __get_valid_kprobe() 1391 list_for_each_entry_rcu(list_p, &ap->list, list) __get_valid_kprobe() 1398 return ap; __get_valid_kprobe() 1543 static int aggr_kprobe_disabled(struct kprobe *ap) aggr_kprobe_disabled() argument 1547 list_for_each_entry_rcu(kp, &ap->list, list) aggr_kprobe_disabled() 1551 * We can't disable this ap. aggr_kprobe_disabled() 1594 struct kprobe *ap, *list_p; __unregister_kprobe_top() local 1597 ap = __disable_kprobe(p); __unregister_kprobe_top() 1598 if (ap == NULL) __unregister_kprobe_top() 1601 if (ap == p) __unregister_kprobe_top() 1609 WARN_ON(!kprobe_aggrprobe(ap)); __unregister_kprobe_top() 1611 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) __unregister_kprobe_top() 1620 ap->break_handler = NULL; __unregister_kprobe_top() 1622 list_for_each_entry_rcu(list_p, &ap->list, list) { __unregister_kprobe_top() 1626 ap->post_handler = NULL; __unregister_kprobe_top() 1634 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) __unregister_kprobe_top() 1639 optimize_kprobe(ap); __unregister_kprobe_top() 1644 BUG_ON(!kprobe_disarmed(ap)); __unregister_kprobe_top() 1645 hlist_del_rcu(&ap->hlist); __unregister_kprobe_top() 1651 struct kprobe *ap; __unregister_kprobe_bottom() local 1658 ap = list_entry(p->list.next, struct kprobe, list); __unregister_kprobe_bottom() 1660 free_aggr_kprobe(ap); __unregister_kprobe_bottom()
|
/linux-4.1.27/fs/gfs2/ |
H A D | quota.h | 28 struct gfs2_alloc_parms *ap); 42 struct gfs2_alloc_parms *ap) gfs2_quota_lock_check() 53 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap); gfs2_quota_lock_check() 41 gfs2_quota_lock_check(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) gfs2_quota_lock_check() argument
|
H A D | quota.c | 795 struct gfs2_alloc_parms ap = { .aflags = 0, }; do_sync() local 848 ap.target = reserved; do_sync() 849 error = gfs2_inplace_reserve(ip, &ap); do_sync() 1102 * @ap: The allocation parameters. ap->target contains the requested 1103 * blocks. ap->min_target, if set, contains the minimum blks 1107 * min_req = ap->min_target ? ap->min_target : ap->target; 1109 * ap->allowed is set to the number of blocks allowed 1111 * -EDQUOT otherwise, quota violation. ap->allowed is set to number 1115 struct gfs2_alloc_parms *ap) gfs2_quota_check() 1123 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ gfs2_quota_check() 1144 if (limit > 0 && (limit - value) < ap->allowed) gfs2_quota_check() 1145 ap->allowed = limit - value; gfs2_quota_check() 1147 if (limit && limit < (value + (s64)ap->target)) { gfs2_quota_check() 1150 if (!ap->min_target || ap->min_target > ap->allowed) { gfs2_quota_check() 1632 struct gfs2_alloc_parms ap = { .aflags = 0, }; gfs2_set_dqblk() local 1636 ap.target = blocks; gfs2_set_dqblk() 1637 error = gfs2_inplace_reserve(ip, &ap); gfs2_set_dqblk() 1114 gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, struct gfs2_alloc_parms *ap) gfs2_quota_check() argument
|
H A D | file.c | 387 struct gfs2_alloc_parms ap = { .aflags = 0, }; gfs2_page_mkwrite() local 432 ap.target = data_blocks + ind_blocks; gfs2_page_mkwrite() 433 ret = gfs2_quota_lock_check(ip, &ap); gfs2_page_mkwrite() 436 ret = gfs2_inplace_reserve(ip, &ap); gfs2_page_mkwrite() 805 struct gfs2_alloc_parms ap = { .aflags = 0, }; __gfs2_fallocate() local 830 ap.min_target = data_blocks + ind_blocks; __gfs2_fallocate() 852 ap.target = data_blocks + ind_blocks; __gfs2_fallocate() 854 error = gfs2_quota_lock_check(ip, &ap); __gfs2_fallocate() 857 /* ap.allowed tells us how many blocks quota will allow __gfs2_fallocate() 859 if (ap.allowed && ap.allowed < max_blks) __gfs2_fallocate() 860 max_blks = ap.allowed; __gfs2_fallocate() 862 error = gfs2_inplace_reserve(ip, &ap); __gfs2_fallocate() 867 if (ap.allowed && ap.allowed < max_blks) __gfs2_fallocate() 868 max_blks = ap.allowed; __gfs2_fallocate()
|
H A D | inode.c | 382 struct gfs2_alloc_parms ap = { .target = *dblocks, .aflags = flags, }; alloc_dinode() local 385 error = gfs2_quota_lock_check(ip, &ap); alloc_dinode() 389 error = gfs2_inplace_reserve(ip, &ap); alloc_dinode() 524 struct gfs2_alloc_parms ap = { .target = da->nr_blocks, }; link_dinode() local 528 error = gfs2_quota_lock_check(dip, &ap); link_dinode() 532 error = gfs2_inplace_reserve(dip, &ap); link_dinode() 955 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; gfs2_link() local 956 error = gfs2_quota_lock_check(dip, &ap); gfs2_link() 960 error = gfs2_inplace_reserve(dip, &ap); gfs2_link() 1472 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; gfs2_rename() local 1473 error = gfs2_quota_lock_check(ndip, &ap); gfs2_rename() 1477 error = gfs2_inplace_reserve(ndip, &ap); gfs2_rename() 1672 struct gfs2_alloc_parms ap; setattr_chown() local 1700 ap.target = gfs2_get_inode_blocks(&ip->i_inode); setattr_chown() 1704 error = gfs2_quota_check(ip, nuid, ngid, &ap); setattr_chown() 1719 gfs2_quota_change(ip, -ap.target, ouid, ogid); setattr_chown() 1720 gfs2_quota_change(ip, ap.target, nuid, ngid); setattr_chown()
|
/linux-4.1.27/kernel/trace/ |
H A D | trace_printk.c | 190 va_list ap; __trace_bprintk() local 198 va_start(ap, fmt); __trace_bprintk() 199 ret = trace_vbprintk(ip, fmt, ap); __trace_bprintk() 200 va_end(ap); __trace_bprintk() 205 int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap) __ftrace_vbprintk() argument 213 return trace_vbprintk(ip, fmt, ap); __ftrace_vbprintk() 220 va_list ap; __trace_printk() local 225 va_start(ap, fmt); __trace_printk() 226 ret = trace_vprintk(ip, fmt, ap); __trace_printk() 227 va_end(ap); __trace_printk() 232 int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) __ftrace_vprintk() argument 237 return trace_vprintk(ip, fmt, ap); __ftrace_vprintk()
|
/linux-4.1.27/net/ipv6/netfilter/ |
H A D | ip6t_rt.c | 49 const struct in6_addr *ap; rt_mt6() local 133 ap = skb_header_pointer(skb, rt_mt6() 140 BUG_ON(ap == NULL); rt_mt6() 142 if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { rt_mt6() 163 ap = skb_header_pointer(skb, rt_mt6() 169 BUG_ON(ap == NULL); rt_mt6() 171 if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp])) rt_mt6()
|
H A D | nf_conntrack_l3proto_ipv6.c | 40 const u_int32_t *ap; ipv6_pkt_to_tuple() local 43 ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr), ipv6_pkt_to_tuple() 45 if (ap == NULL) ipv6_pkt_to_tuple() 48 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); ipv6_pkt_to_tuple() 49 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); ipv6_pkt_to_tuple()
|
/linux-4.1.27/crypto/ |
H A D | gf128mul.c | 362 u8 *ap = (u8 *)a; gf128mul_64k_lle() local 366 *r = t->t[0]->t[ap[0]]; gf128mul_64k_lle() 368 be128_xor(r, r, &t->t[i]->t[ap[i]]); gf128mul_64k_lle() 375 u8 *ap = (u8 *)a; gf128mul_64k_bbe() local 379 *r = t->t[0]->t[ap[15]]; gf128mul_64k_bbe() 381 be128_xor(r, r, &t->t[i]->t[ap[15 - i]]); gf128mul_64k_bbe() 448 u8 *ap = (u8 *)a; gf128mul_4k_lle() local 452 *r = t->t[ap[15]]; gf128mul_4k_lle() 455 be128_xor(r, r, &t->t[ap[i]]); gf128mul_4k_lle() 463 u8 *ap = (u8 *)a; gf128mul_4k_bbe() local 467 *r = t->t[ap[0]]; gf128mul_4k_bbe() 470 be128_xor(r, r, &t->t[ap[i]]); gf128mul_4k_bbe()
|
/linux-4.1.27/drivers/scsi/arm/ |
H A D | msgqueue.c | 121 va_list ap; msgqueue_addmsg() local 127 va_start(ap, length); msgqueue_addmsg() 129 mq->msg.msg[i] = va_arg(ap, unsigned int); msgqueue_addmsg() 130 va_end(ap); msgqueue_addmsg()
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | uprobes.h | 39 void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
|
H A D | nmi.h | 54 __u32 ap : 1; /* 44 ancillary report */ member in struct:mci
|
/linux-4.1.27/scripts/kconfig/ |
H A D | util.c | 117 va_list ap; str_printf() local 119 va_start(ap, fmt); str_printf() 120 vsnprintf(s, sizeof(s), fmt, ap); str_printf() 122 va_end(ap); str_printf()
|
H A D | zconf.y | 556 va_list ap; 559 va_start(ap, err); 560 vfprintf(stderr, err, ap); 561 va_end(ap); 567 va_list ap; 571 va_start(ap, err); 572 vfprintf(stderr, err, ap); 573 va_end(ap);
|
/linux-4.1.27/drivers/hwmon/ |
H A D | vt1211.c | 90 /* Auto points numbered 0-3 (ap) */ 94 #define VT1211_REG_PWM_AUTO_TEMP(ap) (0x55 - (ap)) 95 #define VT1211_REG_PWM_AUTO_PWM(ix, ap) (0x58 + 2 * (ix) - (ap)) 736 * ap = [0-3] 740 * pwm[ix+1]_auto_point[ap+1]_temp mapping table: 745 * ix ap : description 765 int ap = sensor_attr_2->nr; show_pwm_auto_point_temp() local 768 data->pwm_auto_temp[ap])); show_pwm_auto_point_temp() 779 int ap = sensor_attr_2->nr; set_pwm_auto_point_temp() local 796 data->pwm_auto_temp[ap] = TEMP_TO_REG(data->pwm_ctl[ix] & 7, val); set_pwm_auto_point_temp() 797 vt1211_write8(data, VT1211_REG_PWM_AUTO_TEMP(ap), set_pwm_auto_point_temp() 798 data->pwm_auto_temp[ap]); set_pwm_auto_point_temp() 805 * pwm[ix+1]_auto_point[ap+1]_pwm mapping table: 809 * ix ap : description 829 int ap = sensor_attr_2->nr; show_pwm_auto_point_pwm() local 831 return sprintf(buf, "%d\n", data->pwm_auto_pwm[ix][ap]); show_pwm_auto_point_pwm() 842 int ap = sensor_attr_2->nr; set_pwm_auto_point_pwm() local 851 data->pwm_auto_pwm[ix][ap] = clamp_val(val, 0, 255); set_pwm_auto_point_pwm() 852 vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap), set_pwm_auto_point_pwm() 853 data->pwm_auto_pwm[ix][ap]); set_pwm_auto_point_pwm() 1039 #define SENSOR_ATTR_PWM_AUTO_POINT_TEMP(ix, ap) \ 1040 SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_temp, S_IRUGO | S_IWUSR, \ 1042 ap-1, ix-1) 1044 #define SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(ix, ap) \ 1045 SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_temp, S_IRUGO, \ 1047 ap-1, ix-1) 1049 #define SENSOR_ATTR_PWM_AUTO_POINT_PWM(ix, ap) \ 1050 SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_pwm, S_IRUGO | S_IWUSR, \ 1052 ap-1, ix-1) 1054 #define SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(ix, ap) \ 1055 SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_pwm, S_IRUGO, \ 1057 ap-1, ix-1)
|
/linux-4.1.27/drivers/net/wireless/ti/wlcore/ |
H A D | cmd.c | 380 *hlid == wlvif->ap.bcast_hlid) { wl12xx_free_link() 630 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); wl12xx_cmd_role_start_ap() 645 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid); wl12xx_cmd_role_start_ap() 649 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid); wl12xx_cmd_role_start_ap() 654 wl->links[wlvif->ap.bcast_hlid].total_freed_pkts = wl12xx_cmd_role_start_ap() 658 cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); wl12xx_cmd_role_start_ap() 659 cmd->ap.bss_index = WL1271_AP_BSS_INDEX; wl12xx_cmd_role_start_ap() 660 cmd->ap.global_hlid = wlvif->ap.global_hlid; wl12xx_cmd_role_start_ap() 661 cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid; wl12xx_cmd_role_start_ap() 662 cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid]; wl12xx_cmd_role_start_ap() 663 cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid]; wl12xx_cmd_role_start_ap() 664 cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); wl12xx_cmd_role_start_ap() 665 cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int); wl12xx_cmd_role_start_ap() 666 cmd->ap.dtim_interval = bss_conf->dtim_period; wl12xx_cmd_role_start_ap() 667 cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP; wl12xx_cmd_role_start_ap() 669 cmd->ap.reset_tsf = 1; /* By default reset AP TSF */ wl12xx_cmd_role_start_ap() 670 cmd->ap.wmm = wlvif->wmm_enabled; wl12xx_cmd_role_start_ap() 676 cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC; wl12xx_cmd_role_start_ap() 677 cmd->ap.ssid_len = wlvif->ssid_len; wl12xx_cmd_role_start_ap() 678 memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len); wl12xx_cmd_role_start_ap() 680 cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN; wl12xx_cmd_role_start_ap() 681 cmd->ap.ssid_len = bss_conf->ssid_len; wl12xx_cmd_role_start_ap() 682 memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len); wl12xx_cmd_role_start_ap() 690 wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x", wl12xx_cmd_role_start_ap() 693 cmd->ap.local_rates = cpu_to_le32(supported_rates); wl12xx_cmd_role_start_ap() 703 wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); wl12xx_cmd_role_start_ap() 710 wl1271_error("failed to initiate cmd role start ap"); wl12xx_cmd_role_start_ap() 717 wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); wl12xx_cmd_role_start_ap() 720 wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); wl12xx_cmd_role_start_ap() 740 wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id); wl12xx_cmd_role_stop_ap() 746 wl1271_error("failed to initiate cmd role stop ap"); wl12xx_cmd_role_stop_ap() 750 wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); wl12xx_cmd_role_stop_ap() 751 wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); wl12xx_cmd_role_stop_ap() 1195 wl1271_debug(DEBUG_SCAN, "set ap probe request template"); wl1271_cmd_build_ap_probe_req() 1208 wl1271_error("Unable to set ap probe request template."); wl1271_cmd_build_ap_probe_req() 1443 if (hlid == wlvif->ap.bcast_hlid) { wl1271_cmd_set_ap_key() 1452 wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d" wl1271_cmd_set_ap_key() 1483 wl1271_warning("could not set ap keys"); wl1271_cmd_set_ap_key()
|
/linux-4.1.27/drivers/crypto/nx/ |
H A D | nx-aes-xcbc.c | 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; nx_xcbc_set_key() 94 nx_ctx->ap->sglen); nx_xcbc_empty() 100 nx_ctx->ap->sglen); nx_xcbc_empty() 122 nx_ctx->ap->sglen); nx_xcbc_empty() 129 nx_ctx->ap->sglen); nx_xcbc_empty() 211 nx_ctx->ap->sglen); nx_xcbc_update() 213 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_xcbc_update() 217 &len, nx_ctx->ap->sglen); nx_xcbc_update() 340 &len, nx_ctx->ap->sglen); nx_xcbc_final() 349 nx_ctx->ap->sglen); nx_xcbc_final()
|
H A D | nx-aes-cbc.c | 45 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; cbc_aes_nx_set_key() 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; cbc_aes_nx_set_key() 53 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; cbc_aes_nx_set_key()
|
H A D | nx-aes-ecb.c | 45 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; ecb_aes_nx_set_key() 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; ecb_aes_nx_set_key() 53 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; ecb_aes_nx_set_key()
|
H A D | nx-aes-gcm.c | 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; gcm_aes_nx_set_key() 54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; gcm_aes_nx_set_key() 59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; gcm_aes_nx_set_key() 147 nx_ctx->ap->sglen); nx_gca() 149 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_gca() 157 nx_ctx->ap->databytelen); nx_gca() 210 nx_ctx->ap->sglen); gmac() 212 nx_ctx->ap->databytelen/NX_PAGE_SIZE); gmac() 223 nx_ctx->ap->databytelen); gmac() 293 &len, nx_ctx->ap->sglen); gcm_empty() 300 nx_ctx->ap->sglen); gcm_empty()
|
H A D | nx.c | 285 max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_build_sg_lists() 288 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_build_sg_lists() 293 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); nx_build_sg_lists() 405 props->ap[msc->fc][msc->mode][0].databytelen = nx_of_update_msc() 407 props->ap[msc->fc][msc->mode][0].sglen = nx_of_update_msc() 411 props->ap[msc->fc][msc->mode][1].databytelen = nx_of_update_msc() 413 props->ap[msc->fc][msc->mode][1].sglen = nx_of_update_msc() 418 props->ap[msc->fc][msc->mode][2]. nx_of_update_msc() 420 props->ap[msc->fc][msc->mode][2].sglen = nx_of_update_msc() 424 props->ap[msc->fc][msc->mode][1]. nx_of_update_msc() 426 props->ap[msc->fc][msc->mode][1].sglen = nx_of_update_msc() 435 props->ap[msc->fc][msc->mode][2].databytelen = nx_of_update_msc() 437 props->ap[msc->fc][msc->mode][2].sglen = nx_of_update_msc() 629 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode], nx_crypto_ctx_init()
|
H A D | nx-sha256.c | 43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; nx_crypto_ctx_sha256_init() 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_sha256_update() 102 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_sha256_update() 202 max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_sha256_final() 205 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_sha256_final()
|
H A D | nx-sha512.c | 42 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; nx_crypto_ctx_sha512_init() 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_sha512_update() 102 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_sha512_update() 206 max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_sha512_final() 209 nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_sha512_final()
|
H A D | nx-aes-ctr.c | 46 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; ctr_aes_nx_set_key() 50 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; ctr_aes_nx_set_key() 54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; ctr_aes_nx_set_key()
|
H A D | nx-aes-ccm.c | 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; ccm_aes_nx_set_key() 254 nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); generate_pat() 260 nx_ctx->ap->sglen); generate_pat() 291 max_sg_len = min_t(u64, nx_ctx->ap->sglen, generate_pat() 294 nx_ctx->ap->databytelen/NX_PAGE_SIZE); generate_pat() 298 nx_ctx->ap->databytelen); generate_pat() 301 nx_ctx->ap->sglen, generate_pat()
|
H A D | nx.h | 60 struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3]; member in struct:nx_of 139 struct alg_props *ap; /* pointer into props based on our key size */ member in struct:nx_crypto_ctx
|
/linux-4.1.27/drivers/clk/ |
H A D | clkdev.c | 247 va_list ap) vclkdev_alloc() 262 vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); vclkdev_alloc() 273 va_list ap; clkdev_alloc() local 275 va_start(ap, dev_fmt); clkdev_alloc() 276 cl = vclkdev_alloc(clk, con_id, dev_fmt, ap); clkdev_alloc() 277 va_end(ap); clkdev_alloc() 331 va_list ap; clk_register_clkdev() local 336 va_start(ap, dev_fmt); clk_register_clkdev() 337 cl = vclkdev_alloc(clk, con_id, dev_fmt, ap); clk_register_clkdev() 338 va_end(ap); clk_register_clkdev() 246 vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, va_list ap) vclkdev_alloc() argument
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_bmap.c | 3354 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ xfs_bmap_adjacent() 3357 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_bmap_adjacent() 3359 int nullfb; /* true if ap->firstblock isn't set */ xfs_bmap_adjacent() 3369 mp = ap->ip->i_mount; xfs_bmap_adjacent() 3370 nullfb = *ap->firstblock == NULLFSBLOCK; xfs_bmap_adjacent() 3371 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; xfs_bmap_adjacent() 3372 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); xfs_bmap_adjacent() 3377 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && xfs_bmap_adjacent() 3378 !isnullstartblock(ap->prev.br_startblock) && xfs_bmap_adjacent() 3379 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, xfs_bmap_adjacent() 3380 ap->prev.br_startblock)) { xfs_bmap_adjacent() 3381 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; xfs_bmap_adjacent() 3385 adjust = ap->offset - xfs_bmap_adjacent() 3386 (ap->prev.br_startoff + ap->prev.br_blockcount); xfs_bmap_adjacent() 3388 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) xfs_bmap_adjacent() 3389 ap->blkno += adjust; xfs_bmap_adjacent() 3396 else if (!ap->eof) { xfs_bmap_adjacent() 3406 if (ap->prev.br_startoff != NULLFILEOFF && xfs_bmap_adjacent() 3407 !isnullstartblock(ap->prev.br_startblock) && xfs_bmap_adjacent() 3408 (prevbno = ap->prev.br_startblock + xfs_bmap_adjacent() 3409 ap->prev.br_blockcount) && xfs_bmap_adjacent() 3410 ISVALID(prevbno, ap->prev.br_startblock)) { xfs_bmap_adjacent() 3414 adjust = prevdiff = ap->offset - xfs_bmap_adjacent() 3415 (ap->prev.br_startoff + xfs_bmap_adjacent() 3416 ap->prev.br_blockcount); xfs_bmap_adjacent() 3425 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && xfs_bmap_adjacent() 3427 ap->prev.br_startblock)) xfs_bmap_adjacent() 3448 if (!isnullstartblock(ap->got.br_startblock)) { xfs_bmap_adjacent() 3452 adjust = gotdiff = ap->got.br_startoff - ap->offset; xfs_bmap_adjacent() 3457 gotbno = ap->got.br_startblock; xfs_bmap_adjacent() 3465 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && xfs_bmap_adjacent() 3468 else if (ISVALID(gotbno - ap->length, gotbno)) { xfs_bmap_adjacent() 3469 gotbno -= ap->length; xfs_bmap_adjacent() 3470 gotdiff += adjust - ap->length; xfs_bmap_adjacent() 3488 * one, else ap->blkno is already set (to 0 or the inode block). xfs_bmap_adjacent() 3491 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; xfs_bmap_adjacent() 3493 ap->blkno = prevbno; xfs_bmap_adjacent() 3495 ap->blkno = gotbno; xfs_bmap_adjacent() 3535 struct xfs_bmalloca *ap, xfs_bmap_select_minlen() 3540 if (notinit || *blen < ap->minlen) { xfs_bmap_select_minlen() 3545 args->minlen = ap->minlen; xfs_bmap_select_minlen() 3563 struct xfs_bmalloca *ap, xfs_bmap_btalloc_nullfb() 3567 struct xfs_mount *mp = ap->ip->i_mount; xfs_bmap_btalloc_nullfb() 3573 args->total = ap->total; xfs_bmap_btalloc_nullfb() 3591 xfs_bmap_select_minlen(ap, args, blen, notinit); xfs_bmap_btalloc_nullfb() 3597 struct xfs_bmalloca *ap, xfs_bmap_btalloc_filestreams() 3601 struct xfs_mount *mp = ap->ip->i_mount; xfs_bmap_btalloc_filestreams() 3607 args->total = ap->total; xfs_bmap_btalloc_filestreams() 3618 error = xfs_filestream_new_ag(ap, &ag); xfs_bmap_btalloc_filestreams() 3629 xfs_bmap_select_minlen(ap, args, blen, notinit); xfs_bmap_btalloc_filestreams() 3635 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); xfs_bmap_btalloc_filestreams() 3641 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ xfs_bmap_btalloc() 3646 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_bmap_btalloc() 3651 int nullfb; /* true if ap->firstblock isn't set */ xfs_bmap_btalloc() 3657 ASSERT(ap->length); xfs_bmap_btalloc() 3659 mp = ap->ip->i_mount; xfs_bmap_btalloc() 3668 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; xfs_bmap_btalloc() 3670 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, xfs_bmap_btalloc() 3671 align, 0, ap->eof, 0, ap->conv, xfs_bmap_btalloc() 3672 &ap->offset, &ap->length); xfs_bmap_btalloc() 3674 ASSERT(ap->length); xfs_bmap_btalloc() 3678 nullfb = *ap->firstblock == NULLFSBLOCK; xfs_bmap_btalloc() 3679 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); xfs_bmap_btalloc() 3681 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) { xfs_bmap_btalloc() 3682 ag = xfs_filestream_lookup_ag(ap->ip); xfs_bmap_btalloc() 3684 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); xfs_bmap_btalloc() 3686 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); xfs_bmap_btalloc() 3689 ap->blkno = *ap->firstblock; xfs_bmap_btalloc() 3691 xfs_bmap_adjacent(ap); xfs_bmap_btalloc() 3694 * If allowed, use ap->blkno; otherwise must use firstblock since xfs_bmap_btalloc() 3697 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) xfs_bmap_btalloc() 3700 ap->blkno = *ap->firstblock; xfs_bmap_btalloc() 3706 args.tp = ap->tp; xfs_bmap_btalloc() 3708 args.fsbno = ap->blkno; xfs_bmap_btalloc() 3711 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp)); xfs_bmap_btalloc() 3712 args.firstblock = *ap->firstblock; xfs_bmap_btalloc() 3720 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) xfs_bmap_btalloc() 3721 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); xfs_bmap_btalloc() 3723 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); xfs_bmap_btalloc() 3726 } else if (ap->flist->xbf_low) { xfs_bmap_btalloc() 3727 if (xfs_inode_is_filestream(ap->ip)) xfs_bmap_btalloc() 3731 args.total = args.minlen = ap->minlen; xfs_bmap_btalloc() 3734 args.total = ap->total; xfs_bmap_btalloc() 3735 args.minlen = ap->minlen; xfs_bmap_btalloc() 3740 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) xfs_bmap_btalloc() 3747 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) xfs_bmap_btalloc() 3755 * NOTE: ap->aeof is only set if the allocation length xfs_bmap_btalloc() 3759 if (!ap->flist->xbf_low && ap->aeof) { xfs_bmap_btalloc() 3760 if (!ap->offset) { xfs_bmap_btalloc() 3801 args.minleft = ap->minleft; xfs_bmap_btalloc() 3802 args.wasdel = ap->wasdel; xfs_bmap_btalloc() 3804 args.userdata = ap->userdata; xfs_bmap_btalloc() 3813 args.fsbno = ap->blkno; xfs_bmap_btalloc() 3827 args.fsbno = ap->blkno; xfs_bmap_btalloc() 3833 args.minlen > ap->minlen) { xfs_bmap_btalloc() 3834 args.minlen = ap->minlen; xfs_bmap_btalloc() 3836 args.fsbno = ap->blkno; xfs_bmap_btalloc() 3843 args.total = ap->minlen; xfs_bmap_btalloc() 3847 ap->flist->xbf_low = 1; xfs_bmap_btalloc() 3854 ASSERT(*ap->firstblock == NULLFSBLOCK || xfs_bmap_btalloc() 3855 XFS_FSB_TO_AGNO(mp, *ap->firstblock) == xfs_bmap_btalloc() 3857 (ap->flist->xbf_low && xfs_bmap_btalloc() 3858 XFS_FSB_TO_AGNO(mp, *ap->firstblock) < xfs_bmap_btalloc() 3861 ap->blkno = args.fsbno; xfs_bmap_btalloc() 3862 if (*ap->firstblock == NULLFSBLOCK) xfs_bmap_btalloc() 3863 *ap->firstblock = args.fsbno; xfs_bmap_btalloc() 3865 (ap->flist->xbf_low && fb_agno < args.agno)); xfs_bmap_btalloc() 3866 ap->length = args.len; xfs_bmap_btalloc() 3867 ap->ip->i_d.di_nblocks += args.len; xfs_bmap_btalloc() 3868 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); xfs_bmap_btalloc() 3869 if (ap->wasdel) xfs_bmap_btalloc() 3870 ap->ip->i_delayed_blks -= args.len; xfs_bmap_btalloc() 3875 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, xfs_bmap_btalloc() 3876 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : xfs_bmap_btalloc() 3880 ap->blkno = NULLFSBLOCK; xfs_bmap_btalloc() 3881 ap->length = 0; xfs_bmap_btalloc() 3892 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ xfs_bmap_alloc() 3894 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) xfs_bmap_alloc() 3895 return xfs_bmap_rtalloc(ap); xfs_bmap_alloc() 3896 return xfs_bmap_btalloc(ap); xfs_bmap_alloc() 3353 xfs_bmap_adjacent( struct xfs_bmalloca *ap) xfs_bmap_adjacent() argument 3534 xfs_bmap_select_minlen( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen, int notinit) xfs_bmap_select_minlen() argument 3562 xfs_bmap_btalloc_nullfb( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen) xfs_bmap_btalloc_nullfb() argument 3596 xfs_bmap_btalloc_filestreams( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen) xfs_bmap_btalloc_filestreams() argument 3640 xfs_bmap_btalloc( struct xfs_bmalloca *ap) xfs_bmap_btalloc() argument 3891 xfs_bmap_alloc( struct xfs_bmalloca *ap) xfs_bmap_alloc() argument
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_bmap_util.c | 147 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ xfs_bmap_rtalloc() 157 mp = ap->ip->i_mount; xfs_bmap_rtalloc() 158 align = xfs_get_extsz_hint(ap->ip); xfs_bmap_rtalloc() 160 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, xfs_bmap_rtalloc() 161 align, 1, ap->eof, 0, xfs_bmap_rtalloc() 162 ap->conv, &ap->offset, &ap->length); xfs_bmap_rtalloc() 165 ASSERT(ap->length); xfs_bmap_rtalloc() 166 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); xfs_bmap_rtalloc() 172 if (do_mod(ap->offset, align) || ap->length % align) xfs_bmap_rtalloc() 177 ralen = ap->length / mp->m_sb.sb_rextsize; xfs_bmap_rtalloc() 192 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); xfs_bmap_rtalloc() 198 if (ap->eof && ap->offset == 0) { xfs_bmap_rtalloc() 201 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); xfs_bmap_rtalloc() 204 ap->blkno = rtx * mp->m_sb.sb_rextsize; xfs_bmap_rtalloc() 206 ap->blkno = 0; xfs_bmap_rtalloc() 209 xfs_bmap_adjacent(ap); xfs_bmap_rtalloc() 214 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; xfs_bmap_rtalloc() 215 do_div(ap->blkno, mp->m_sb.sb_rextsize); xfs_bmap_rtalloc() 216 rtb = ap->blkno; xfs_bmap_rtalloc() 217 ap->length = ralen; xfs_bmap_rtalloc() 218 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, xfs_bmap_rtalloc() 219 &ralen, atype, ap->wasdel, prod, &rtb))) xfs_bmap_rtalloc() 222 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, xfs_bmap_rtalloc() 223 ap->length, &ralen, atype, xfs_bmap_rtalloc() 224 ap->wasdel, 1, &rtb))) xfs_bmap_rtalloc() 226 ap->blkno = rtb; xfs_bmap_rtalloc() 227 if (ap->blkno != NULLFSBLOCK) { xfs_bmap_rtalloc() 228 ap->blkno *= mp->m_sb.sb_rextsize; xfs_bmap_rtalloc() 230 ap->length = ralen; xfs_bmap_rtalloc() 231 ap->ip->i_d.di_nblocks += ralen; xfs_bmap_rtalloc() 232 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); xfs_bmap_rtalloc() 233 if (ap->wasdel) xfs_bmap_rtalloc() 234 ap->ip->i_delayed_blks -= ralen; xfs_bmap_rtalloc() 239 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, xfs_bmap_rtalloc() 240 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : xfs_bmap_rtalloc() 243 ap->length = 0; xfs_bmap_rtalloc() 146 xfs_bmap_rtalloc( struct xfs_bmalloca *ap) xfs_bmap_rtalloc() argument
|
H A D | xfs_bmap_util.h | 31 int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 52 void xfs_bmap_adjacent(struct xfs_bmalloca *ap);
|
H A D | xfs_filestream.h | 29 int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp);
|
/linux-4.1.27/arch/um/drivers/ |
H A D | net_user.c | 253 va_list ap; split_if_spec() local 255 va_start(ap, str); split_if_spec() 256 while ((arg = va_arg(ap, char **)) != NULL) { split_if_spec() 267 va_end(ap); split_if_spec()
|
/linux-4.1.27/drivers/media/pci/ivtv/ |
H A D | ivtv-mailbox.c | 346 va_list ap; ivtv_vapi_result() local 349 va_start(ap, args); ivtv_vapi_result() 351 data[i] = va_arg(ap, u32); ivtv_vapi_result() 353 va_end(ap); ivtv_vapi_result() 360 va_list ap; ivtv_vapi() local 363 va_start(ap, args); ivtv_vapi() 365 data[i] = va_arg(ap, u32); ivtv_vapi() 367 va_end(ap); ivtv_vapi()
|
/linux-4.1.27/include/scsi/ |
H A D | sas_ata.h | 50 void sas_ata_end_eh(struct ata_port *ap); 100 static inline void sas_ata_end_eh(struct ata_port *ap) sas_ata_end_eh() argument
|
/linux-4.1.27/lib/mpi/ |
H A D | mpiutil.c | 75 void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) mpi_assign_limb_space() argument 78 a->d = ap; mpi_assign_limb_space()
|
/linux-4.1.27/arch/nios2/lib/ |
H A D | memcpy.c | 115 op_t ap; _wordcopy_fwd_dest_aligned() local 127 ap = ((op_t *) srcp)[0]; _wordcopy_fwd_dest_aligned() 137 ((op_t *) dstp)[0] = MERGE(ap, sh_1, a0, sh_2); _wordcopy_fwd_dest_aligned() 142 ap = a3; _wordcopy_fwd_dest_aligned() 151 ((op_t *) dstp)[0] = MERGE(ap, sh_1, a0, sh_2); _wordcopy_fwd_dest_aligned() 153 ap = a0; _wordcopy_fwd_dest_aligned()
|
/linux-4.1.27/sound/mips/ |
H A D | ad1843.c | 230 va_list ap; ad1843_read_multi() local 234 va_start(ap, argcount); ad1843_read_multi() 236 fp = va_arg(ap, const struct ad1843_bitfield *); ad1843_read_multi() 237 value = va_arg(ap, int *); ad1843_read_multi() 246 va_end(ap); ad1843_read_multi() 262 va_list ap; ad1843_write_multi() local 272 va_start(ap, argcount); ad1843_write_multi() 274 fp = va_arg(ap, const struct ad1843_bitfield *); ad1843_write_multi() 275 value = va_arg(ap, int); ad1843_write_multi() 284 va_end(ap); ad1843_write_multi()
|
/linux-4.1.27/drivers/xen/xenbus/ |
H A D | xenbus_xs.c | 548 va_list ap; xenbus_scanf() local 556 va_start(ap, fmt); xenbus_scanf() 557 ret = vsscanf(val, fmt, ap); xenbus_scanf() 558 va_end(ap); xenbus_scanf() 571 va_list ap; xenbus_printf() local 575 va_start(ap, fmt); xenbus_printf() 576 buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); xenbus_printf() 577 va_end(ap); xenbus_printf() 593 va_list ap; xenbus_gather() local 597 va_start(ap, dir); xenbus_gather() 598 while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { xenbus_gather() 599 const char *fmt = va_arg(ap, char *); xenbus_gather() 600 void *result = va_arg(ap, void *); xenbus_gather() 615 va_end(ap); xenbus_gather()
|
H A D | xenbus_client.c | 155 va_list ap; xenbus_watch_pathfmt() local 158 va_start(ap, pathfmt); xenbus_watch_pathfmt() 159 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); xenbus_watch_pathfmt() 160 va_end(ap); xenbus_watch_pathfmt() 268 const char *fmt, va_list ap) xenbus_va_dev_error() 280 vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); xenbus_va_dev_error() 315 va_list ap; xenbus_dev_error() local 317 va_start(ap, fmt); xenbus_dev_error() 318 xenbus_va_dev_error(dev, err, fmt, ap); xenbus_dev_error() 319 va_end(ap); xenbus_dev_error() 336 va_list ap; xenbus_dev_fatal() local 338 va_start(ap, fmt); xenbus_dev_fatal() 339 xenbus_va_dev_error(dev, err, fmt, ap); xenbus_dev_fatal() 340 va_end(ap); xenbus_dev_fatal() 353 va_list ap; xenbus_switch_fatal() local 355 va_start(ap, fmt); xenbus_switch_fatal() 356 xenbus_va_dev_error(dev, err, fmt, ap); xenbus_switch_fatal() 357 va_end(ap); xenbus_switch_fatal() 267 xenbus_va_dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) xenbus_va_dev_error() argument
|
/linux-4.1.27/net/ax25/ |
H A D | ax25_iface.c | 48 void ax25_register_pid(struct ax25_protocol *ap) ax25_register_pid() argument 51 ap->next = protocol_list; ax25_register_pid() 52 protocol_list = ap; ax25_register_pid()
|
/linux-4.1.27/include/acpi/platform/ |
H A D | acenv.h | 398 #define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND)))) 399 #define va_end(ap) (ap = (va_list) NULL) 400 #define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND))))
|
/linux-4.1.27/drivers/net/can/usb/peak_usb/ |
H A D | pcan_usb_pro.c | 134 va_list ap; pcan_msg_add_rec() local 136 va_start(ap, id); pcan_msg_add_rec() 147 *pc++ = va_arg(ap, int); pcan_msg_add_rec() 148 *pc++ = va_arg(ap, int); pcan_msg_add_rec() 149 *pc++ = va_arg(ap, int); pcan_msg_add_rec() 150 *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pcan_msg_add_rec() 152 memcpy(pc, va_arg(ap, int *), i); pcan_msg_add_rec() 158 *pc++ = va_arg(ap, int); pcan_msg_add_rec() 160 *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pcan_msg_add_rec() 167 *pc++ = va_arg(ap, int); pcan_msg_add_rec() 168 *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pcan_msg_add_rec() 173 *pc++ = va_arg(ap, int); pcan_msg_add_rec() 174 *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pcan_msg_add_rec() 176 *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pcan_msg_add_rec() 182 *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pcan_msg_add_rec() 202 va_end(ap); pcan_msg_add_rec()
|
/linux-4.1.27/arch/x86/vdso/ |
H A D | vdso2c.c | 110 va_list ap; fail() local 111 va_start(ap, format); fail() 113 vfprintf(stderr, format, ap); fail() 117 va_end(ap); fail()
|
/linux-4.1.27/include/trace/events/ |
H A D | libata.h | 170 __entry->ata_port = qc->ap->print_id; 227 __entry->ata_port = qc->ap->print_id; 282 __entry->ata_port = dev->link->ap->print_id; 309 __entry->ata_port = qc->ap->print_id;
|
/linux-4.1.27/arch/powerpc/boot/ |
H A D | devtree.c | 122 va_list ap; __dt_fixup_mac_addresses() local 126 va_start(ap, startindex); __dt_fixup_mac_addresses() 128 while ((addr = va_arg(ap, const u8 *))) __dt_fixup_mac_addresses() 131 va_end(ap); __dt_fixup_mac_addresses()
|
/linux-4.1.27/arch/arm/vdso/ |
H A D | vdsomunge.c | 98 va_list ap; fail() local 102 va_start(ap, fmt); fail() 103 vfprintf(stderr, fmt, ap); fail() 104 va_end(ap); fail()
|
/linux-4.1.27/tools/testing/selftests/vm/ |
H A D | thuge-gen.c | 111 va_list ap; read_sysfs() local 114 va_start(ap, fmt); read_sysfs() 115 vsnprintf(buf, sizeof buf, fmt, ap); read_sysfs() 116 va_end(ap); read_sysfs()
|
/linux-4.1.27/net/mac80211/ |
H A D | cfg.c | 607 old = sdata_dereference(sdata->u.ap.probe_resp, sdata); ieee80211_set_probe_resp() 621 rcu_assign_pointer(sdata->u.ap.probe_resp, new); ieee80211_set_probe_resp() 637 old = sdata_dereference(sdata->u.ap.beacon, sdata); ieee80211_assign_beacon() 701 rcu_assign_pointer(sdata->u.ap.beacon, new); ieee80211_assign_beacon() 724 old = sdata_dereference(sdata->u.ap.beacon, sdata); ieee80211_start_ap() 762 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { ieee80211_start_ap() 801 old = sdata_dereference(sdata->u.ap.beacon, sdata); ieee80211_start_ap() 805 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); ieee80211_start_ap() 814 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) ieee80211_start_ap() 836 old = sdata_dereference(sdata->u.ap.beacon, sdata); ieee80211_change_beacon() 858 old_beacon = sdata_dereference(sdata->u.ap.beacon, sdata); ieee80211_stop_ap() 861 old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata); ieee80211_stop_ap() 874 kfree(sdata->u.ap.next_beacon); ieee80211_stop_ap() 875 sdata->u.ap.next_beacon = NULL; ieee80211_stop_ap() 878 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) ieee80211_stop_ap() 883 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); ieee80211_stop_ap() 884 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); ieee80211_stop_ap() 888 sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF; ieee80211_stop_ap() 909 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); ieee80211_stop_ap() 910 skb_queue_purge(&sdata->u.ap.ps.bc_buf); ieee80211_stop_ap() 1846 if (!sdata_dereference(sdata->u.ap.beacon, sdata)) ieee80211_change_bss() 2003 if (sdata->u.ap.beacon && ieee80211_scan() 2289 old_req = sdata->u.ap.req_smps; __ieee80211_request_smps_ap() 2290 sdata->u.ap.req_smps = smps_mode; __ieee80211_request_smps_ap() 2298 if (!atomic_read(&sdata->u.ap.num_mcast_sta)) { __ieee80211_request_smps_ap() 2306 smps_mode, atomic_read(&sdata->u.ap.num_mcast_sta)); __ieee80211_request_smps_ap() 2314 if (sta->sdata->bss != &sdata->u.ap) __ieee80211_request_smps_ap() 2355 const u8 *ap; __ieee80211_request_smps_mgd() local 2380 ap = sdata->u.mgd.associated->bssid; __ieee80211_request_smps_mgd() 2391 ap, ap); __ieee80211_request_smps_mgd() 2882 err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon, ieee80211_set_after_csa_beacon() 2884 kfree(sdata->u.ap.next_beacon); ieee80211_set_after_csa_beacon() 2885 sdata->u.ap.next_beacon = NULL; ieee80211_set_after_csa_beacon() 3013 sdata->u.ap.next_beacon = ieee80211_set_csa_beacon() 3015 if (!sdata->u.ap.next_beacon) ieee80211_set_csa_beacon() 3051 kfree(sdata->u.ap.next_beacon); ieee80211_set_csa_beacon() 3368 beacon = rcu_dereference(sdata->u.ap.beacon); ieee80211_mgmt_tx()
|
H A D | ht.c | 486 u.ap.request_smps_work); ieee80211_request_smps_ap_work() 489 if (sdata_dereference(sdata->u.ap.beacon, sdata)) ieee80211_request_smps_ap_work() 491 sdata->u.ap.driver_smps_mode); ieee80211_request_smps_ap_work() 514 if (sdata->u.ap.driver_smps_mode == smps_mode) ieee80211_request_smps() 516 sdata->u.ap.driver_smps_mode = smps_mode; ieee80211_request_smps() 518 &sdata->u.ap.request_smps_work); ieee80211_request_smps()
|
/linux-4.1.27/drivers/net/wireless/ |
H A D | zd1201.c | 38 static int ap; /* Are we an AP or a normal station? */ variable 46 module_param(ap, int, 0); 47 MODULE_PARM_DESC(ap, "If non-zero Access Point firmware will be loaded"); 62 fwfile = "zd1201-ap.fw"; zd1201_fw_upload() 121 MODULE_FIRMWARE("zd1201-ap.fw"); 724 if (!zd->ap) { /* Normal station */ zd1201_join() 953 if (zd->ap) { zd1201_set_mode() 1121 if (zd->ap) zd1201_get_scan() 1613 if (!zd->ap) zd1201_set_hostauth() 1626 if (!zd->ap) zd1201_get_hostauth() 1644 if (!zd->ap) zd1201_auth_sta() 1660 if (!zd->ap) zd1201_set_maxassoc() 1676 if (!zd->ap) zd1201_get_maxassoc() 1749 zd->ap = ap; zd1201_probe() 1755 err = zd1201_fw_upload(usb, zd->ap); zd1201_probe() 1802 if (zd->ap) zd1201_probe()
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/ |
H A D | tracefile.c | 271 va_list ap; libcfs_debug_vmsg2() local 337 va_copy(ap, args); libcfs_debug_vmsg2() 338 needed = vsnprintf(string_buf, max_nob, format1, ap); libcfs_debug_vmsg2() 339 va_end(ap); libcfs_debug_vmsg2() 347 va_start(ap, format2); libcfs_debug_vmsg2() 349 format2, ap); libcfs_debug_vmsg2() 350 va_end(ap); libcfs_debug_vmsg2() 437 va_copy(ap, args); libcfs_debug_vmsg2() 440 format1, ap); libcfs_debug_vmsg2() 441 va_end(ap); libcfs_debug_vmsg2() 446 va_start(ap, format2); libcfs_debug_vmsg2() 448 format2, ap); libcfs_debug_vmsg2() 449 va_end(ap); libcfs_debug_vmsg2()
|
/linux-4.1.27/drivers/media/pci/cx18/ |
H A D | cx18-mailbox.c | 841 va_list ap; cx18_vapi_result() local 844 va_start(ap, args); cx18_vapi_result() 846 data[i] = va_arg(ap, u32); cx18_vapi_result() 847 va_end(ap); cx18_vapi_result() 854 va_list ap; cx18_vapi() local 865 va_start(ap, args); cx18_vapi() 867 data[i] = va_arg(ap, u32); cx18_vapi() 868 va_end(ap); cx18_vapi()
|
/linux-4.1.27/scripts/kconfig/lxdialog/ |
H A D | util.c | 608 va_list ap; item_make() local 618 va_start(ap, fmt); item_make() 619 vsnprintf(item_cur->node.str, sizeof(item_cur->node.str), fmt, ap); item_make() 620 va_end(ap); item_make() 625 va_list ap; item_add_str() local 630 va_start(ap, fmt); item_add_str() 632 avail, fmt, ap); item_add_str() 634 va_end(ap); item_add_str()
|